From 30e39692f2df20c251e3c7263a1f29f3b4969883 Mon Sep 17 00:00:00 2001 From: Jessica Gadling Date: Wed, 28 Aug 2024 14:40:31 -0400 Subject: [PATCH] Lint fixes. --- .pre-commit-config.yaml | 1 - apiv2/database/migrations/env.py | 5 +- apiv2/database/models/alignment.py | 18 +- apiv2/database/models/annotation.py | 17 +- apiv2/database/models/annotation_author.py | 11 +- apiv2/database/models/annotation_file.py | 24 +- apiv2/database/models/annotation_shape.py | 14 +- apiv2/database/models/dataset.py | 14 +- apiv2/database/models/dataset_author.py | 11 +- apiv2/database/models/dataset_funding.py | 11 +- apiv2/database/models/deposition.py | 14 +- apiv2/database/models/deposition_author.py | 11 +- apiv2/database/models/deposition_type.py | 11 +- apiv2/database/models/frame.py | 11 +- .../per_section_alignment_parameters.py | 11 +- .../database/models/per_section_parameters.py | 11 +- apiv2/database/models/run.py | 13 +- apiv2/database/models/tiltseries.py | 16 +- apiv2/database/models/tomogram.py | 32 +-- apiv2/database/models/tomogram_author.py | 11 +- .../database/models/tomogram_voxel_spacing.py | 11 +- apiv2/graphql_api/helpers/alignment.py | 9 +- apiv2/graphql_api/helpers/annotation.py | 10 +- .../graphql_api/helpers/annotation_author.py | 5 +- apiv2/graphql_api/helpers/annotation_file.py | 7 +- apiv2/graphql_api/helpers/annotation_shape.py | 7 +- apiv2/graphql_api/helpers/dataset.py | 8 +- apiv2/graphql_api/helpers/dataset_author.py | 5 +- apiv2/graphql_api/helpers/dataset_funding.py | 5 +- apiv2/graphql_api/helpers/deposition.py | 5 +- .../graphql_api/helpers/deposition_author.py | 5 +- apiv2/graphql_api/helpers/deposition_type.py | 7 +- apiv2/graphql_api/helpers/frame.py | 5 +- .../per_section_alignment_parameters.py | 5 +- .../helpers/per_section_parameters.py | 5 +- apiv2/graphql_api/helpers/run.py | 5 +- apiv2/graphql_api/helpers/tiltseries.py | 9 +- apiv2/graphql_api/helpers/tomogram.py | 17 +- apiv2/graphql_api/helpers/tomogram_author.py | 5 +- .../helpers/tomogram_voxel_spacing.py | 5 +- apiv2/graphql_api/mutations.py | 51 ++-- apiv2/graphql_api/queries.py | 76 +++--- apiv2/graphql_api/types/alignment.py | 158 +++++++----- apiv2/graphql_api/types/annotation.py | 174 +++++++------ apiv2/graphql_api/types/annotation_author.py | 106 ++++---- apiv2/graphql_api/types/annotation_file.py | 86 ++++--- apiv2/graphql_api/types/annotation_shape.py | 59 +++-- apiv2/graphql_api/types/dataset.py | 180 ++++++++------ apiv2/graphql_api/types/dataset_author.py | 116 +++++---- apiv2/graphql_api/types/dataset_funding.py | 73 +++--- apiv2/graphql_api/types/deposition.py | 122 +++++----- apiv2/graphql_api/types/deposition_author.py | 100 ++++---- apiv2/graphql_api/types/deposition_type.py | 50 ++-- apiv2/graphql_api/types/frame.py | 102 ++++---- .../types/per_section_alignment_parameters.py | 94 ++++--- .../types/per_section_parameters.py | 85 ++++--- apiv2/graphql_api/types/run.py | 93 +++---- apiv2/graphql_api/types/tiltseries.py | 230 +++++++++++------- apiv2/graphql_api/types/tomogram.py | 216 +++++++++------- apiv2/graphql_api/types/tomogram_author.py | 116 +++++---- .../types/tomogram_voxel_spacing.py | 99 +++++--- apiv2/main.py | 3 +- apiv2/platformics/graphql_api/core/deps.py | 4 +- apiv2/platformics/graphql_api/files.py | 4 +- apiv2/platformics/security/authorization.py | 15 +- apiv2/scrape.py | 1 - apiv2/support/enums.py | 3 +- apiv2/support/limit_offset.py | 3 +- .../graphql_api/queries.py.j2 | 38 +++ apiv2/test_infra/factories/alignment.py | 11 +- apiv2/test_infra/factories/annotation.py | 9 +- .../test_infra/factories/annotation_author.py | 7 +- apiv2/test_infra/factories/annotation_file.py | 11 +- .../test_infra/factories/annotation_shape.py | 7 +- apiv2/test_infra/factories/dataset.py | 9 +- apiv2/test_infra/factories/dataset_author.py | 7 +- apiv2/test_infra/factories/dataset_funding.py | 7 +- apiv2/test_infra/factories/deposition.py | 4 +- .../test_infra/factories/deposition_author.py | 7 +- apiv2/test_infra/factories/deposition_type.py | 7 +- apiv2/test_infra/factories/frame.py | 9 +- .../per_section_alignment_parameters.py | 7 +- .../factories/per_section_parameters.py | 9 +- apiv2/test_infra/factories/run.py | 7 +- apiv2/test_infra/factories/tiltseries.py | 9 +- apiv2/test_infra/factories/tomogram.py | 13 +- apiv2/test_infra/factories/tomogram_author.py | 7 +- .../factories/tomogram_voxel_spacing.py | 7 +- apiv2/validators/alignment.py | 5 +- apiv2/validators/annotation.py | 4 +- apiv2/validators/annotation_author.py | 2 - apiv2/validators/annotation_file.py | 5 +- apiv2/validators/annotation_shape.py | 7 +- apiv2/validators/dataset.py | 4 +- apiv2/validators/dataset_author.py | 2 - apiv2/validators/dataset_funding.py | 2 - apiv2/validators/deposition.py | 2 - apiv2/validators/deposition_author.py | 2 - apiv2/validators/deposition_type.py | 7 +- apiv2/validators/frame.py | 2 - .../per_section_alignment_parameters.py | 4 +- apiv2/validators/per_section_parameters.py | 4 +- apiv2/validators/run.py | 2 - apiv2/validators/tiltseries.py | 5 +- apiv2/validators/tomogram.py | 11 +- apiv2/validators/tomogram_author.py | 2 - apiv2/validators/tomogram_voxel_spacing.py | 2 - 107 files changed, 1655 insertions(+), 1437 deletions(-) create mode 100644 apiv2/template_overrides/graphql_api/queries.py.j2 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5130a0fb1..dbf8ae1a2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -36,7 +36,6 @@ repos: # pydantic-settings, # click, # ] -repos: - repo: https://github.com/mpalmer/action-validator rev: v0.6.0 hooks: diff --git a/apiv2/database/migrations/env.py b/apiv2/database/migrations/env.py index d06a0ca27..1067199ff 100644 --- a/apiv2/database/migrations/env.py +++ b/apiv2/database/migrations/env.py @@ -1,11 +1,10 @@ from logging.config import fileConfig -from alembic import context -from sqlalchemy import create_engine import sqlalchemy as sa - +from alembic import context from database.models import meta from platformics.settings import CLISettings +from sqlalchemy import create_engine # this is the Alembic Config object, which provides # access to the values within the .ini file in use. diff --git a/apiv2/database/models/alignment.py b/apiv2/database/models/alignment.py index 09f2e59ae..d84b6622b 100644 --- a/apiv2/database/models/alignment.py +++ b/apiv2/database/models/alignment.py @@ -5,27 +5,22 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import Enum, Float, ForeignKey, Integer, String from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import alignment_type_enum -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.annotation_file import AnnotationFile - from database.models.per_section_alignment_parameters import PerSectionAlignmentParameters from database.models.deposition import Deposition + from database.models.per_section_alignment_parameters import PerSectionAlignmentParameters + from database.models.run import Run from database.models.tiltseries import Tiltseries from database.models.tomogram import Tomogram - from database.models.run import Run + from platformics.database.models.file import File ... else: @@ -81,7 +76,8 @@ class Alignment(Base): ) alignment: Mapped[str] = mapped_column(String, nullable=False) alignment_type: Mapped[alignment_type_enum] = mapped_column( - Enum(alignment_type_enum, native_enum=False), nullable=True + Enum(alignment_type_enum, native_enum=False), + nullable=True, ) volume_x_dimension: Mapped[float] = mapped_column(Float, nullable=True) volume_y_dimension: Mapped[float] = mapped_column(Float, nullable=True) diff --git a/apiv2/database/models/annotation.py b/apiv2/database/models/annotation.py index c8abb5949..840a89f45 100644 --- a/apiv2/database/models/annotation.py +++ b/apiv2/database/models/annotation.py @@ -5,25 +5,21 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import Boolean, DateTime, Enum, Float, ForeignKey, Integer, String from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import annotation_method_type_enum -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File - from database.models.run import Run - from database.models.annotation_shape import AnnotationShape from database.models.annotation_author import AnnotationAuthor + from database.models.annotation_shape import AnnotationShape from database.models.deposition import Deposition + from database.models.run import Run + from platformics.database.models.file import File ... else: @@ -79,7 +75,8 @@ class Annotation(Base): annotation_software: Mapped[str] = mapped_column(String, nullable=True) is_curator_recommended: Mapped[bool] = mapped_column(Boolean, nullable=True) method_type: Mapped[annotation_method_type_enum] = mapped_column( - Enum(annotation_method_type_enum, native_enum=False), nullable=False + Enum(annotation_method_type_enum, native_enum=False), + nullable=False, ) deposition_date: Mapped[datetime.datetime] = mapped_column(DateTime(timezone=True), nullable=False) release_date: Mapped[datetime.datetime] = mapped_column(DateTime(timezone=True), nullable=False) diff --git a/apiv2/database/models/annotation_author.py b/apiv2/database/models/annotation_author.py index 71918e559..ac582bf78 100644 --- a/apiv2/database/models/annotation_author.py +++ b/apiv2/database/models/annotation_author.py @@ -5,21 +5,16 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Boolean, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.annotation import Annotation + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/annotation_file.py b/apiv2/database/models/annotation_file.py index c0b553552..03751acef 100644 --- a/apiv2/database/models/annotation_file.py +++ b/apiv2/database/models/annotation_file.py @@ -5,24 +5,19 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import Boolean, Enum, ForeignKey, Integer, String from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import annotation_file_source_enum -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.alignment import Alignment from database.models.annotation_shape import AnnotationShape from database.models.tomogram_voxel_spacing import TomogramVoxelSpacing + from platformics.database.models.file import File ... else: @@ -44,7 +39,10 @@ class AnnotationFile(Base): back_populates="annotation_files", ) annotation_shape_id: Mapped[int] = mapped_column( - Integer, ForeignKey("annotation_shape.id"), nullable=True, index=True + Integer, + ForeignKey("annotation_shape.id"), + nullable=True, + index=True, ) annotation_shape: Mapped["AnnotationShape"] = relationship( "AnnotationShape", @@ -52,7 +50,10 @@ class AnnotationFile(Base): back_populates="annotation_files", ) tomogram_voxel_spacing_id: Mapped[int] = mapped_column( - Integer, ForeignKey("tomogram_voxel_spacing.id"), nullable=True, index=True + Integer, + ForeignKey("tomogram_voxel_spacing.id"), + nullable=True, + index=True, ) tomogram_voxel_spacing: Mapped["TomogramVoxelSpacing"] = relationship( "TomogramVoxelSpacing", @@ -64,6 +65,7 @@ class AnnotationFile(Base): https_path: Mapped[str] = mapped_column(String, nullable=False) is_visualization_default: Mapped[bool] = mapped_column(Boolean, nullable=True) source: Mapped[annotation_file_source_enum] = mapped_column( - Enum(annotation_file_source_enum, native_enum=False), nullable=True + Enum(annotation_file_source_enum, native_enum=False), + nullable=True, ) id: Mapped[int] = mapped_column(Integer, nullable=False, index=True, autoincrement=True, primary_key=True) diff --git a/apiv2/database/models/annotation_shape.py b/apiv2/database/models/annotation_shape.py index 91da3c03e..fa46513ec 100644 --- a/apiv2/database/models/annotation_shape.py +++ b/apiv2/database/models/annotation_shape.py @@ -5,23 +5,18 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import Enum, ForeignKey, Integer from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import annotation_file_shape_type_enum -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.annotation import Annotation from database.models.annotation_file import AnnotationFile + from platformics.database.models.file import File ... else: @@ -48,6 +43,7 @@ class AnnotationShape(Base): foreign_keys="AnnotationFile.annotation_shape_id", ) shape_type: Mapped[annotation_file_shape_type_enum] = mapped_column( - Enum(annotation_file_shape_type_enum, native_enum=False), nullable=True + Enum(annotation_file_shape_type_enum, native_enum=False), + nullable=True, ) id: Mapped[int] = mapped_column(Integer, nullable=False, index=True, autoincrement=True, primary_key=True) diff --git a/apiv2/database/models/dataset.py b/apiv2/database/models/dataset.py index 2fb6506b0..c78cdec5f 100644 --- a/apiv2/database/models/dataset.py +++ b/apiv2/database/models/dataset.py @@ -5,25 +5,21 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import DateTime, Enum, ForeignKey, Integer, String from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import sample_type_enum -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File - from database.models.deposition import Deposition - from database.models.dataset_funding import DatasetFunding from database.models.dataset_author import DatasetAuthor + from database.models.dataset_funding import DatasetFunding + from database.models.deposition import Deposition from database.models.run import Run + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/dataset_author.py b/apiv2/database/models/dataset_author.py index c708a3625..853073a79 100644 --- a/apiv2/database/models/dataset_author.py +++ b/apiv2/database/models/dataset_author.py @@ -5,21 +5,16 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Boolean, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.dataset import Dataset + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/dataset_funding.py b/apiv2/database/models/dataset_funding.py index 3534a0e7a..4dfe41ba5 100644 --- a/apiv2/database/models/dataset_funding.py +++ b/apiv2/database/models/dataset_funding.py @@ -5,21 +5,16 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.dataset import Dataset + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/deposition.py b/apiv2/database/models/deposition.py index d9485b90c..764484696 100644 --- a/apiv2/database/models/deposition.py +++ b/apiv2/database/models/deposition.py @@ -5,28 +5,24 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import DateTime, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File - from database.models.deposition_author import DepositionAuthor from database.models.alignment import Alignment from database.models.annotation import Annotation from database.models.dataset import Dataset + from database.models.deposition_author import DepositionAuthor + from database.models.deposition_type import DepositionType from database.models.frame import Frame from database.models.tiltseries import Tiltseries from database.models.tomogram import Tomogram - from database.models.deposition_type import DepositionType + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/deposition_author.py b/apiv2/database/models/deposition_author.py index 026a0843d..d593f3c9b 100644 --- a/apiv2/database/models/deposition_author.py +++ b/apiv2/database/models/deposition_author.py @@ -5,21 +5,16 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Boolean, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.deposition import Deposition + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/deposition_type.py b/apiv2/database/models/deposition_type.py index b799032e4..341e7e063 100644 --- a/apiv2/database/models/deposition_type.py +++ b/apiv2/database/models/deposition_type.py @@ -5,22 +5,17 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import Enum, ForeignKey, Integer from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import deposition_types_enum -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.deposition import Deposition + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/frame.py b/apiv2/database/models/frame.py index 06ffbfeaa..a0f196ddf 100644 --- a/apiv2/database/models/frame.py +++ b/apiv2/database/models/frame.py @@ -5,23 +5,18 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Boolean, Float, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.deposition import Deposition from database.models.per_section_parameters import PerSectionParameters from database.models.run import Run + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/per_section_alignment_parameters.py b/apiv2/database/models/per_section_alignment_parameters.py index 91c6942af..0edeed060 100644 --- a/apiv2/database/models/per_section_alignment_parameters.py +++ b/apiv2/database/models/per_section_alignment_parameters.py @@ -5,21 +5,16 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Float, ForeignKey, Integer +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.alignment import Alignment + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/per_section_parameters.py b/apiv2/database/models/per_section_parameters.py index 224a55225..b8ad54ea1 100644 --- a/apiv2/database/models/per_section_parameters.py +++ b/apiv2/database/models/per_section_parameters.py @@ -5,22 +5,17 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Float, ForeignKey, Integer +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.frame import Frame from database.models.tiltseries import Tiltseries + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/run.py b/apiv2/database/models/run.py index 5d66e4bd3..0cbddd866 100644 --- a/apiv2/database/models/run.py +++ b/apiv2/database/models/run.py @@ -5,27 +5,22 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.alignment import Alignment from database.models.annotation import Annotation from database.models.dataset import Dataset from database.models.frame import Frame from database.models.tiltseries import Tiltseries - from database.models.tomogram_voxel_spacing import TomogramVoxelSpacing from database.models.tomogram import Tomogram + from database.models.tomogram_voxel_spacing import TomogramVoxelSpacing + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/tiltseries.py b/apiv2/database/models/tiltseries.py index 3adbdacd2..b5782fcf8 100644 --- a/apiv2/database/models/tiltseries.py +++ b/apiv2/database/models/tiltseries.py @@ -5,25 +5,20 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import Boolean, Enum, Float, ForeignKey, Integer, String from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import tiltseries_microscope_manufacturer_enum -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.alignment import Alignment + from database.models.deposition import Deposition from database.models.per_section_parameters import PerSectionParameters from database.models.run import Run - from database.models.deposition import Deposition + from platformics.database.models.file import File ... else: @@ -76,7 +71,8 @@ class Tiltseries(Base): acceleration_voltage: Mapped[float] = mapped_column(Float, nullable=False) spherical_aberration_constant: Mapped[float] = mapped_column(Float, nullable=False) microscope_manufacturer: Mapped[tiltseries_microscope_manufacturer_enum] = mapped_column( - Enum(tiltseries_microscope_manufacturer_enum, native_enum=False), nullable=False + Enum(tiltseries_microscope_manufacturer_enum, native_enum=False), + nullable=False, ) microscope_model: Mapped[str] = mapped_column(String, nullable=False) microscope_energy_filter: Mapped[str] = mapped_column(String, nullable=False) diff --git a/apiv2/database/models/tomogram.py b/apiv2/database/models/tomogram.py index 2a46fb113..cac931e27 100644 --- a/apiv2/database/models/tomogram.py +++ b/apiv2/database/models/tomogram.py @@ -5,31 +5,26 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB +from platformics.database.models.file import File +from sqlalchemy import Boolean, Enum, Float, ForeignKey, Integer, String from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from support.enums import ( fiducial_alignment_status_enum, - tomogram_reconstruction_method_enum, tomogram_processing_enum, + tomogram_reconstruction_method_enum, tomogram_type_enum, ) -from platformics.database.models.file import File if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.alignment import Alignment - from database.models.tomogram_author import TomogramAuthor from database.models.deposition import Deposition from database.models.run import Run + from database.models.tomogram_author import TomogramAuthor from database.models.tomogram_voxel_spacing import TomogramVoxelSpacing + from platformics.database.models.file import File ... else: @@ -71,7 +66,10 @@ class Tomogram(Base): back_populates="tomograms", ) tomogram_voxel_spacing_id: Mapped[int] = mapped_column( - Integer, ForeignKey("tomogram_voxel_spacing.id"), nullable=True, index=True + Integer, + ForeignKey("tomogram_voxel_spacing.id"), + nullable=True, + index=True, ) tomogram_voxel_spacing: Mapped["TomogramVoxelSpacing"] = relationship( "TomogramVoxelSpacing", @@ -84,13 +82,16 @@ class Tomogram(Base): size_z: Mapped[float] = mapped_column(Float, nullable=False) voxel_spacing: Mapped[float] = mapped_column(Float, nullable=False) fiducial_alignment_status: Mapped[fiducial_alignment_status_enum] = mapped_column( - Enum(fiducial_alignment_status_enum, native_enum=False), nullable=False + Enum(fiducial_alignment_status_enum, native_enum=False), + nullable=False, ) reconstruction_method: Mapped[tomogram_reconstruction_method_enum] = mapped_column( - Enum(tomogram_reconstruction_method_enum, native_enum=False), nullable=False + Enum(tomogram_reconstruction_method_enum, native_enum=False), + nullable=False, ) processing: Mapped[tomogram_processing_enum] = mapped_column( - Enum(tomogram_processing_enum, native_enum=False), nullable=False + Enum(tomogram_processing_enum, native_enum=False), + nullable=False, ) tomogram_version: Mapped[float] = mapped_column(Float, nullable=True) processing_software: Mapped[str] = mapped_column(String, nullable=True) @@ -112,7 +113,8 @@ class Tomogram(Base): key_photo_thumbnail_url: Mapped[str] = mapped_column(String, nullable=True) neuroglancer_config: Mapped[str] = mapped_column(String, nullable=True) tomogram_type: Mapped[tomogram_type_enum] = mapped_column( - Enum(tomogram_type_enum, native_enum=False), nullable=True + Enum(tomogram_type_enum, native_enum=False), + nullable=True, ) is_standardized: Mapped[bool] = mapped_column(Boolean, nullable=False) id: Mapped[int] = mapped_column(Integer, nullable=False, index=True, autoincrement=True, primary_key=True) diff --git a/apiv2/database/models/tomogram_author.py b/apiv2/database/models/tomogram_author.py index 35745931f..8f3a0d520 100644 --- a/apiv2/database/models/tomogram_author.py +++ b/apiv2/database/models/tomogram_author.py @@ -5,21 +5,16 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Boolean, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.tomogram import Tomogram + from platformics.database.models.file import File ... else: diff --git a/apiv2/database/models/tomogram_voxel_spacing.py b/apiv2/database/models/tomogram_voxel_spacing.py index 29fbebd67..20bc248d0 100644 --- a/apiv2/database/models/tomogram_voxel_spacing.py +++ b/apiv2/database/models/tomogram_voxel_spacing.py @@ -5,23 +5,18 @@ Make changes to the template codegen/templates/database/models/class_name.py.j2 instead. """ -import uuid -import uuid6 -import datetime from typing import TYPE_CHECKING from platformics.database.models.base import Base -from sqlalchemy import ForeignKey, String, Float, Integer, Enum, Boolean, DateTime -from sqlalchemy.dialects.postgresql import UUID, JSONB -from sqlalchemy.orm import Mapped, mapped_column, relationship -from sqlalchemy.sql import func from platformics.database.models.file import File +from sqlalchemy import Float, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, mapped_column, relationship if TYPE_CHECKING: - from platformics.database.models.file import File from database.models.annotation_file import AnnotationFile from database.models.run import Run from database.models.tomogram import Tomogram + from platformics.database.models.file import File ... else: diff --git a/apiv2/graphql_api/helpers/alignment.py b/apiv2/graphql_api/helpers/alignment.py index d06352deb..91ba12e38 100644 --- a/apiv2/graphql_api/helpers/alignment.py +++ b/apiv2/graphql_api/helpers/alignment.py @@ -5,14 +5,13 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid -from support.enums import alignment_type_enum from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output -from graphql_api.helpers.tiltseries import TiltseriesGroupByOptions, build_tiltseries_groupby_output from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output +from graphql_api.helpers.tiltseries import TiltseriesGroupByOptions, build_tiltseries_groupby_output +from support.enums import alignment_type_enum if TYPE_CHECKING: from api.types.deposition import Deposition diff --git a/apiv2/graphql_api/helpers/annotation.py b/apiv2/graphql_api/helpers/annotation.py index dd58b6048..0252ec446 100644 --- a/apiv2/graphql_api/helpers/annotation.py +++ b/apiv2/graphql_api/helpers/annotation.py @@ -5,13 +5,13 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING -import strawberry import datetime -import uuid -from support.enums import annotation_method_type_enum -from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output +from typing import TYPE_CHECKING, Any, Optional + +import strawberry from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output +from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output +from support.enums import annotation_method_type_enum if TYPE_CHECKING: from api.types.run import Run diff --git a/apiv2/graphql_api/helpers/annotation_author.py b/apiv2/graphql_api/helpers/annotation_author.py index e2e8bf8ba..d1959a049 100644 --- a/apiv2/graphql_api/helpers/annotation_author.py +++ b/apiv2/graphql_api/helpers/annotation_author.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.annotation import AnnotationGroupByOptions, build_annotation_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/helpers/annotation_file.py b/apiv2/graphql_api/helpers/annotation_file.py index ad9eb6938..e7ce50fcb 100644 --- a/apiv2/graphql_api/helpers/annotation_file.py +++ b/apiv2/graphql_api/helpers/annotation_file.py @@ -5,17 +5,16 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid -from support.enums import annotation_file_source_enum from graphql_api.helpers.alignment import AlignmentGroupByOptions, build_alignment_groupby_output from graphql_api.helpers.annotation_shape import AnnotationShapeGroupByOptions, build_annotation_shape_groupby_output from graphql_api.helpers.tomogram_voxel_spacing import ( TomogramVoxelSpacingGroupByOptions, build_tomogram_voxel_spacing_groupby_output, ) +from support.enums import annotation_file_source_enum if TYPE_CHECKING: from api.types.alignment import Alignment diff --git a/apiv2/graphql_api/helpers/annotation_shape.py b/apiv2/graphql_api/helpers/annotation_shape.py index 8cd58a432..5222433a2 100644 --- a/apiv2/graphql_api/helpers/annotation_shape.py +++ b/apiv2/graphql_api/helpers/annotation_shape.py @@ -5,12 +5,11 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid -from support.enums import annotation_file_shape_type_enum from graphql_api.helpers.annotation import AnnotationGroupByOptions, build_annotation_groupby_output +from support.enums import annotation_file_shape_type_enum if TYPE_CHECKING: from api.types.annotation import Annotation diff --git a/apiv2/graphql_api/helpers/dataset.py b/apiv2/graphql_api/helpers/dataset.py index 60008403c..07dd5ab13 100644 --- a/apiv2/graphql_api/helpers/dataset.py +++ b/apiv2/graphql_api/helpers/dataset.py @@ -5,12 +5,12 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING -import strawberry import datetime -import uuid -from support.enums import sample_type_enum +from typing import TYPE_CHECKING, Any, Optional + +import strawberry from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output +from support.enums import sample_type_enum if TYPE_CHECKING: from api.types.deposition import Deposition diff --git a/apiv2/graphql_api/helpers/dataset_author.py b/apiv2/graphql_api/helpers/dataset_author.py index 163ccb1b4..ece3dc054 100644 --- a/apiv2/graphql_api/helpers/dataset_author.py +++ b/apiv2/graphql_api/helpers/dataset_author.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.dataset import DatasetGroupByOptions, build_dataset_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/helpers/dataset_funding.py b/apiv2/graphql_api/helpers/dataset_funding.py index 5655dc55c..d22b369bd 100644 --- a/apiv2/graphql_api/helpers/dataset_funding.py +++ b/apiv2/graphql_api/helpers/dataset_funding.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.dataset import DatasetGroupByOptions, build_dataset_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/helpers/deposition.py b/apiv2/graphql_api/helpers/deposition.py index 54ec0d751..af53074bb 100644 --- a/apiv2/graphql_api/helpers/deposition.py +++ b/apiv2/graphql_api/helpers/deposition.py @@ -5,11 +5,10 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING -import strawberry import datetime -import uuid +from typing import Any, Optional +import strawberry """ Define groupby options for Deposition type. diff --git a/apiv2/graphql_api/helpers/deposition_author.py b/apiv2/graphql_api/helpers/deposition_author.py index 831dc4298..56b2fb96d 100644 --- a/apiv2/graphql_api/helpers/deposition_author.py +++ b/apiv2/graphql_api/helpers/deposition_author.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/helpers/deposition_type.py b/apiv2/graphql_api/helpers/deposition_type.py index 24a8956c4..3a3e7ea30 100644 --- a/apiv2/graphql_api/helpers/deposition_type.py +++ b/apiv2/graphql_api/helpers/deposition_type.py @@ -5,12 +5,11 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid -from support.enums import deposition_types_enum from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output +from support.enums import deposition_types_enum if TYPE_CHECKING: from api.types.deposition import Deposition diff --git a/apiv2/graphql_api/helpers/frame.py b/apiv2/graphql_api/helpers/frame.py index 6fa9e94da..fbb14d451 100644 --- a/apiv2/graphql_api/helpers/frame.py +++ b/apiv2/graphql_api/helpers/frame.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output diff --git a/apiv2/graphql_api/helpers/per_section_alignment_parameters.py b/apiv2/graphql_api/helpers/per_section_alignment_parameters.py index eab038851..f93b52d1d 100644 --- a/apiv2/graphql_api/helpers/per_section_alignment_parameters.py +++ b/apiv2/graphql_api/helpers/per_section_alignment_parameters.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.alignment import AlignmentGroupByOptions, build_alignment_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/helpers/per_section_parameters.py b/apiv2/graphql_api/helpers/per_section_parameters.py index 4851bd7d1..77dbcf5c5 100644 --- a/apiv2/graphql_api/helpers/per_section_parameters.py +++ b/apiv2/graphql_api/helpers/per_section_parameters.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.frame import FrameGroupByOptions, build_frame_groupby_output from graphql_api.helpers.tiltseries import TiltseriesGroupByOptions, build_tiltseries_groupby_output diff --git a/apiv2/graphql_api/helpers/run.py b/apiv2/graphql_api/helpers/run.py index b79f7dacd..a49da369e 100644 --- a/apiv2/graphql_api/helpers/run.py +++ b/apiv2/graphql_api/helpers/run.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.dataset import DatasetGroupByOptions, build_dataset_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/helpers/tiltseries.py b/apiv2/graphql_api/helpers/tiltseries.py index 09698aeb6..2636056ed 100644 --- a/apiv2/graphql_api/helpers/tiltseries.py +++ b/apiv2/graphql_api/helpers/tiltseries.py @@ -5,13 +5,12 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid -from support.enums import tiltseries_microscope_manufacturer_enum -from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output +from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output +from support.enums import tiltseries_microscope_manufacturer_enum if TYPE_CHECKING: from api.types.run import Run diff --git a/apiv2/graphql_api/helpers/tomogram.py b/apiv2/graphql_api/helpers/tomogram.py index 191e3fc7b..41cfef3df 100644 --- a/apiv2/graphql_api/helpers/tomogram.py +++ b/apiv2/graphql_api/helpers/tomogram.py @@ -5,16 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid -from support.enums import ( - fiducial_alignment_status_enum, - tomogram_reconstruction_method_enum, - tomogram_processing_enum, - tomogram_type_enum, -) from graphql_api.helpers.alignment import AlignmentGroupByOptions, build_alignment_groupby_output from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output @@ -22,6 +15,12 @@ TomogramVoxelSpacingGroupByOptions, build_tomogram_voxel_spacing_groupby_output, ) +from support.enums import ( + fiducial_alignment_status_enum, + tomogram_processing_enum, + tomogram_reconstruction_method_enum, + tomogram_type_enum, +) if TYPE_CHECKING: from api.types.alignment import Alignment diff --git a/apiv2/graphql_api/helpers/tomogram_author.py b/apiv2/graphql_api/helpers/tomogram_author.py index 89dac199a..d4688b936 100644 --- a/apiv2/graphql_api/helpers/tomogram_author.py +++ b/apiv2/graphql_api/helpers/tomogram_author.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.tomogram import TomogramGroupByOptions, build_tomogram_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/helpers/tomogram_voxel_spacing.py b/apiv2/graphql_api/helpers/tomogram_voxel_spacing.py index 377a114cc..7079e2551 100644 --- a/apiv2/graphql_api/helpers/tomogram_voxel_spacing.py +++ b/apiv2/graphql_api/helpers/tomogram_voxel_spacing.py @@ -5,10 +5,9 @@ Make changes to the template codegen/templates/graphql_api/groupby_helpers.py.j2 instead. """ -from typing import Any, Optional, TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional + import strawberry -import datetime -import uuid from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output if TYPE_CHECKING: diff --git a/apiv2/graphql_api/mutations.py b/apiv2/graphql_api/mutations.py index 4ef3a6e1e..6dbb174ee 100644 --- a/apiv2/graphql_api/mutations.py +++ b/apiv2/graphql_api/mutations.py @@ -5,82 +5,83 @@ Make changes to the template codegen/templates/graphql_api/mutations.py.j2 instead. """ -import strawberry from typing import Sequence -from graphql_api.types.alignment import Alignment, create_alignment, update_alignment, delete_alignment + +import strawberry +from graphql_api.types.alignment import Alignment, create_alignment, delete_alignment, update_alignment +from graphql_api.types.annotation import Annotation, create_annotation, delete_annotation, update_annotation from graphql_api.types.annotation_author import ( AnnotationAuthor, create_annotation_author, - update_annotation_author, delete_annotation_author, + update_annotation_author, ) from graphql_api.types.annotation_file import ( AnnotationFile, create_annotation_file, - update_annotation_file, delete_annotation_file, + update_annotation_file, ) from graphql_api.types.annotation_shape import ( AnnotationShape, create_annotation_shape, - update_annotation_shape, delete_annotation_shape, + update_annotation_shape, ) -from graphql_api.types.annotation import Annotation, create_annotation, update_annotation, delete_annotation +from graphql_api.types.dataset import Dataset, create_dataset, delete_dataset, update_dataset from graphql_api.types.dataset_author import ( DatasetAuthor, create_dataset_author, - update_dataset_author, delete_dataset_author, + update_dataset_author, ) from graphql_api.types.dataset_funding import ( DatasetFunding, create_dataset_funding, - update_dataset_funding, delete_dataset_funding, + update_dataset_funding, ) -from graphql_api.types.dataset import Dataset, create_dataset, update_dataset, delete_dataset +from graphql_api.types.deposition import Deposition, create_deposition, delete_deposition, update_deposition from graphql_api.types.deposition_author import ( DepositionAuthor, create_deposition_author, - update_deposition_author, delete_deposition_author, + update_deposition_author, ) -from graphql_api.types.deposition import Deposition, create_deposition, update_deposition, delete_deposition from graphql_api.types.deposition_type import ( DepositionType, create_deposition_type, - update_deposition_type, delete_deposition_type, + update_deposition_type, ) -from graphql_api.types.frame import Frame, create_frame, update_frame, delete_frame -from graphql_api.types.per_section_parameters import ( - PerSectionParameters, - create_per_section_parameters, - update_per_section_parameters, - delete_per_section_parameters, -) +from graphql_api.types.frame import Frame, create_frame, delete_frame, update_frame from graphql_api.types.per_section_alignment_parameters import ( PerSectionAlignmentParameters, create_per_section_alignment_parameters, - update_per_section_alignment_parameters, delete_per_section_alignment_parameters, + update_per_section_alignment_parameters, ) -from graphql_api.types.run import Run, create_run, update_run, delete_run -from graphql_api.types.tiltseries import Tiltseries, create_tiltseries, update_tiltseries, delete_tiltseries +from graphql_api.types.per_section_parameters import ( + PerSectionParameters, + create_per_section_parameters, + delete_per_section_parameters, + update_per_section_parameters, +) +from graphql_api.types.run import Run, create_run, delete_run, update_run +from graphql_api.types.tiltseries import Tiltseries, create_tiltseries, delete_tiltseries, update_tiltseries +from graphql_api.types.tomogram import Tomogram, create_tomogram, delete_tomogram, update_tomogram from graphql_api.types.tomogram_author import ( TomogramAuthor, create_tomogram_author, - update_tomogram_author, delete_tomogram_author, + update_tomogram_author, ) from graphql_api.types.tomogram_voxel_spacing import ( TomogramVoxelSpacing, create_tomogram_voxel_spacing, - update_tomogram_voxel_spacing, delete_tomogram_voxel_spacing, + update_tomogram_voxel_spacing, ) -from graphql_api.types.tomogram import Tomogram, create_tomogram, update_tomogram, delete_tomogram @strawberry.type diff --git a/apiv2/graphql_api/queries.py b/apiv2/graphql_api/queries.py index 812abca27..dadb022c7 100644 --- a/apiv2/graphql_api/queries.py +++ b/apiv2/graphql_api/queries.py @@ -5,105 +5,105 @@ Make changes to the template codegen/templates/graphql_api/queries.py.j2 instead. """ +from typing import Sequence + import strawberry -from strawberry import relay -from typing import Sequence, List -from graphql_api.types.alignment import Alignment, resolve_alignments, AlignmentAggregate, resolve_alignments_aggregate +from graphql_api.types.alignment import Alignment, AlignmentAggregate, resolve_alignments, resolve_alignments_aggregate +from graphql_api.types.annotation import ( + Annotation, + AnnotationAggregate, + resolve_annotations, + resolve_annotations_aggregate, +) from graphql_api.types.annotation_author import ( AnnotationAuthor, - resolve_annotation_authors, AnnotationAuthorAggregate, + resolve_annotation_authors, resolve_annotation_authors_aggregate, ) from graphql_api.types.annotation_file import ( AnnotationFile, - resolve_annotation_files, AnnotationFileAggregate, + resolve_annotation_files, resolve_annotation_files_aggregate, ) from graphql_api.types.annotation_shape import ( AnnotationShape, - resolve_annotation_shapes, AnnotationShapeAggregate, + resolve_annotation_shapes, resolve_annotation_shapes_aggregate, ) -from graphql_api.types.annotation import ( - Annotation, - resolve_annotations, - AnnotationAggregate, - resolve_annotations_aggregate, -) +from graphql_api.types.dataset import Dataset, DatasetAggregate, resolve_datasets, resolve_datasets_aggregate from graphql_api.types.dataset_author import ( DatasetAuthor, - resolve_dataset_authors, DatasetAuthorAggregate, + resolve_dataset_authors, resolve_dataset_authors_aggregate, ) from graphql_api.types.dataset_funding import ( DatasetFunding, - resolve_dataset_funding, DatasetFundingAggregate, + resolve_dataset_funding, resolve_dataset_funding_aggregate, ) -from graphql_api.types.dataset import Dataset, resolve_datasets, DatasetAggregate, resolve_datasets_aggregate -from graphql_api.types.deposition_author import ( - DepositionAuthor, - resolve_deposition_authors, - DepositionAuthorAggregate, - resolve_deposition_authors_aggregate, -) from graphql_api.types.deposition import ( Deposition, - resolve_depositions, DepositionAggregate, + resolve_depositions, resolve_depositions_aggregate, ) +from graphql_api.types.deposition_author import ( + DepositionAuthor, + DepositionAuthorAggregate, + resolve_deposition_authors, + resolve_deposition_authors_aggregate, +) from graphql_api.types.deposition_type import ( DepositionType, - resolve_deposition_types, DepositionTypeAggregate, + resolve_deposition_types, resolve_deposition_types_aggregate, ) -from graphql_api.types.frame import Frame, resolve_frames, FrameAggregate, resolve_frames_aggregate -from graphql_api.types.per_section_parameters import ( - PerSectionParameters, - resolve_per_section_parameters, - PerSectionParametersAggregate, - resolve_per_section_parameters_aggregate, -) +from graphql_api.types.frame import Frame, FrameAggregate, resolve_frames, resolve_frames_aggregate from graphql_api.types.per_section_alignment_parameters import ( PerSectionAlignmentParameters, - resolve_per_section_alignment_parameters, PerSectionAlignmentParametersAggregate, + resolve_per_section_alignment_parameters, resolve_per_section_alignment_parameters_aggregate, ) -from graphql_api.types.run import Run, resolve_runs, RunAggregate, resolve_runs_aggregate +from graphql_api.types.per_section_parameters import ( + PerSectionParameters, + PerSectionParametersAggregate, + resolve_per_section_parameters, + resolve_per_section_parameters_aggregate, +) +from graphql_api.types.run import Run, RunAggregate, resolve_runs, resolve_runs_aggregate from graphql_api.types.tiltseries import ( Tiltseries, - resolve_tiltseries, TiltseriesAggregate, + resolve_tiltseries, resolve_tiltseries_aggregate, ) +from graphql_api.types.tomogram import Tomogram, TomogramAggregate, resolve_tomograms, resolve_tomograms_aggregate from graphql_api.types.tomogram_author import ( TomogramAuthor, - resolve_tomogram_authors, TomogramAuthorAggregate, + resolve_tomogram_authors, resolve_tomogram_authors_aggregate, ) from graphql_api.types.tomogram_voxel_spacing import ( TomogramVoxelSpacing, - resolve_tomogram_voxel_spacings, TomogramVoxelSpacingAggregate, + resolve_tomogram_voxel_spacings, resolve_tomogram_voxel_spacings_aggregate, ) -from graphql_api.types.tomogram import Tomogram, resolve_tomograms, TomogramAggregate, resolve_tomograms_aggregate @strawberry.type class Query: # Allow relay-style queries by node ID - node: relay.Node = relay.node() - nodes: List[relay.Node] = relay.node() + # node: relay.Node = relay.node() + # nodes: List[relay.Node] = relay.node() # diff --git a/apiv2/graphql_api/types/alignment.py b/apiv2/graphql_api/types/alignment.py index 2d3a2d104..7ef809f36 100644 --- a/apiv2/graphql_api/types/alignment.py +++ b/apiv2/graphql_api/types/alignment.py @@ -8,38 +8,33 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.alignment import AlignmentCreateInputValidator -from validators.alignment import AlignmentUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.alignment import AlignmentGroupByOptions, build_alignment_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from graphql_api.types.annotation_file import AnnotationFileAggregate, format_annotation_file_aggregate_output from graphql_api.types.per_section_alignment_parameters import ( PerSectionAlignmentParametersAggregate, format_per_section_alignment_parameters_aggregate_output, ) from graphql_api.types.tomogram import TomogramAggregate, format_tomogram_aggregate_output -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, EnumComparators, - DatetimeComparators, - IntComparators, FloatComparators, + IntComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -47,25 +42,25 @@ from sqlalchemy.ext.asyncio import AsyncSession from strawberry import relay from strawberry.types import Info +from support.enums import alignment_type_enum from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum -from support.enums import alignment_type_enum +from validators.alignment import AlignmentCreateInputValidator, AlignmentUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.annotation_file import AnnotationFileOrderByClause, AnnotationFileWhereClause, AnnotationFile + from graphql_api.types.annotation_file import AnnotationFile, AnnotationFileOrderByClause, AnnotationFileWhereClause + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause from graphql_api.types.per_section_alignment_parameters import ( + PerSectionAlignmentParameters, PerSectionAlignmentParametersOrderByClause, PerSectionAlignmentParametersWhereClause, - PerSectionAlignmentParameters, ) - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition - from graphql_api.types.tiltseries import TiltseriesOrderByClause, TiltseriesWhereClause, Tiltseries - from graphql_api.types.tomogram import TomogramOrderByClause, TomogramWhereClause, Tomogram - from graphql_api.types.run import RunOrderByClause, RunWhereClause, Run + from graphql_api.types.run import Run, RunOrderByClause, RunWhereClause + from graphql_api.types.tiltseries import Tiltseries, TiltseriesOrderByClause, TiltseriesWhereClause + from graphql_api.types.tomogram import Tomogram, TomogramOrderByClause, TomogramWhereClause pass else: @@ -101,7 +96,7 @@ @relay.connection( relay.ListConnection[ Annotated["AnnotationFile", strawberry.lazy("graphql_api.types.annotation_file")] - ] # type:ignore + ], # type:ignore ) async def load_annotation_file_rows( root: "Alignment", @@ -135,9 +130,10 @@ async def load_annotation_file_aggregate_rows( @relay.connection( relay.ListConnection[ Annotated[ - "PerSectionAlignmentParameters", strawberry.lazy("graphql_api.types.per_section_alignment_parameters") + "PerSectionAlignmentParameters", + strawberry.lazy("graphql_api.types.per_section_alignment_parameters"), ] - ] # type:ignore + ], # type:ignore ) async def load_per_section_alignment_parameters_rows( root: "Alignment", @@ -179,7 +175,8 @@ async def load_per_section_alignment_parameters_aggregate_rows( ) = None, ) -> Optional[ Annotated[ - "PerSectionAlignmentParametersAggregate", strawberry.lazy("graphql_api.types.per_section_alignment_parameters") + "PerSectionAlignmentParametersAggregate", + strawberry.lazy("graphql_api.types.per_section_alignment_parameters"), ] ]: selections = info.selected_fields[0].selections[0].selections @@ -222,7 +219,7 @@ async def load_tiltseries_rows( @relay.connection( - relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]] # type:ignore + relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]], # type:ignore ) async def load_tomogram_rows( root: "Alignment", @@ -358,7 +355,8 @@ class Alignment(EntityInterface): ] = load_annotation_file_aggregate_rows # type:ignore per_section_alignments: Sequence[ Annotated[ - "PerSectionAlignmentParameters", strawberry.lazy("graphql_api.types.per_section_alignment_parameters") + "PerSectionAlignmentParameters", + strawberry.lazy("graphql_api.types.per_section_alignment_parameters"), ] ] = load_per_section_alignment_parameters_rows # type:ignore per_section_alignments_aggregate: Optional[ @@ -382,28 +380,36 @@ class Alignment(EntityInterface): run: Optional[Annotated["Run", strawberry.lazy("graphql_api.types.run")]] = load_run_rows # type:ignore alignment: str = strawberry.field(description="Describe a tiltseries alignment") alignment_type: Optional[alignment_type_enum] = strawberry.field( - description="Type of alignment included, i.e. is a non-rigid alignment included?", default=None + description="Type of alignment included, i.e. is a non-rigid alignment included?", + default=None, ) volume_x_dimension: Optional[float] = strawberry.field( - description="X dimension of the reconstruction volume in angstrom", default=None + description="X dimension of the reconstruction volume in angstrom", + default=None, ) volume_y_dimension: Optional[float] = strawberry.field( - description="Y dimension of the reconstruction volume in angstrom", default=None + description="Y dimension of the reconstruction volume in angstrom", + default=None, ) volume_z_dimension: Optional[float] = strawberry.field( - description="Z dimension of the reconstruction volume in angstrom", default=None + description="Z dimension of the reconstruction volume in angstrom", + default=None, ) volume_x_offset: Optional[float] = strawberry.field( - description="X shift of the reconstruction volume in angstrom", default=None + description="X shift of the reconstruction volume in angstrom", + default=None, ) volume_y_offset: Optional[float] = strawberry.field( - description="Y shift of the reconstruction volume in angstrom", default=None + description="Y shift of the reconstruction volume in angstrom", + default=None, ) volume_z_offset: Optional[float] = strawberry.field( - description="Z shift of the reconstruction volume in angstrom", default=None + description="Z shift of the reconstruction volume in angstrom", + default=None, ) volume_x_rotation: Optional[float] = strawberry.field( - description="Additional X rotation of the reconstruction volume in degrees", default=None + description="Additional X rotation of the reconstruction volume in degrees", + default=None, ) tilt_offset: Optional[float] = strawberry.field(description="Additional tilt offset in degrees", default=None) local_alignment_file: Optional[str] = strawberry.field(description="Path to the local alignment file", default=None) @@ -534,28 +540,36 @@ class AlignmentCreateInput: run_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) alignment: str = strawberry.field(description="Describe a tiltseries alignment") alignment_type: Optional[alignment_type_enum] = strawberry.field( - description="Type of alignment included, i.e. is a non-rigid alignment included?", default=None + description="Type of alignment included, i.e. is a non-rigid alignment included?", + default=None, ) volume_x_dimension: Optional[float] = strawberry.field( - description="X dimension of the reconstruction volume in angstrom", default=None + description="X dimension of the reconstruction volume in angstrom", + default=None, ) volume_y_dimension: Optional[float] = strawberry.field( - description="Y dimension of the reconstruction volume in angstrom", default=None + description="Y dimension of the reconstruction volume in angstrom", + default=None, ) volume_z_dimension: Optional[float] = strawberry.field( - description="Z dimension of the reconstruction volume in angstrom", default=None + description="Z dimension of the reconstruction volume in angstrom", + default=None, ) volume_x_offset: Optional[float] = strawberry.field( - description="X shift of the reconstruction volume in angstrom", default=None + description="X shift of the reconstruction volume in angstrom", + default=None, ) volume_y_offset: Optional[float] = strawberry.field( - description="Y shift of the reconstruction volume in angstrom", default=None + description="Y shift of the reconstruction volume in angstrom", + default=None, ) volume_z_offset: Optional[float] = strawberry.field( - description="Z shift of the reconstruction volume in angstrom", default=None + description="Z shift of the reconstruction volume in angstrom", + default=None, ) volume_x_rotation: Optional[float] = strawberry.field( - description="Additional X rotation of the reconstruction volume in degrees", default=None + description="Additional X rotation of the reconstruction volume in degrees", + default=None, ) tilt_offset: Optional[float] = strawberry.field(description="Additional tilt offset in degrees", default=None) local_alignment_file: Optional[str] = strawberry.field(description="Path to the local alignment file", default=None) @@ -569,28 +583,36 @@ class AlignmentUpdateInput: run_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) alignment: Optional[str] = strawberry.field(description="Describe a tiltseries alignment") alignment_type: Optional[alignment_type_enum] = strawberry.field( - description="Type of alignment included, i.e. is a non-rigid alignment included?", default=None + description="Type of alignment included, i.e. is a non-rigid alignment included?", + default=None, ) volume_x_dimension: Optional[float] = strawberry.field( - description="X dimension of the reconstruction volume in angstrom", default=None + description="X dimension of the reconstruction volume in angstrom", + default=None, ) volume_y_dimension: Optional[float] = strawberry.field( - description="Y dimension of the reconstruction volume in angstrom", default=None + description="Y dimension of the reconstruction volume in angstrom", + default=None, ) volume_z_dimension: Optional[float] = strawberry.field( - description="Z dimension of the reconstruction volume in angstrom", default=None + description="Z dimension of the reconstruction volume in angstrom", + default=None, ) volume_x_offset: Optional[float] = strawberry.field( - description="X shift of the reconstruction volume in angstrom", default=None + description="X shift of the reconstruction volume in angstrom", + default=None, ) volume_y_offset: Optional[float] = strawberry.field( - description="Y shift of the reconstruction volume in angstrom", default=None + description="Y shift of the reconstruction volume in angstrom", + default=None, ) volume_z_offset: Optional[float] = strawberry.field( - description="Z shift of the reconstruction volume in angstrom", default=None + description="Z shift of the reconstruction volume in angstrom", + default=None, ) volume_x_rotation: Optional[float] = strawberry.field( - description="Additional X rotation of the reconstruction volume in degrees", default=None + description="Additional X rotation of the reconstruction volume in degrees", + default=None, ) tilt_offset: Optional[float] = strawberry.field(description="Additional tilt offset in degrees", default=None) local_alignment_file: Optional[str] = strawberry.field(description="Path to the local alignment file", default=None) @@ -629,7 +651,7 @@ def format_alignment_aggregate_output(query_results: Sequence[RowMapping] | RowM format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_alignment_aggregate_row(row)) @@ -648,10 +670,10 @@ def format_alignment_aggregate_row(row: RowMapping) -> AlignmentAggregateFunctio aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", AlignmentGroupByOptions()) - group = build_alignment_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = AlignmentGroupByOptions() + group = build_alignment_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -682,8 +704,8 @@ async def resolve_alignments_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -740,7 +762,13 @@ async def create_alignment( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") @@ -812,7 +840,13 @@ async def update_alignment( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") diff --git a/apiv2/graphql_api/types/annotation.py b/apiv2/graphql_api/types/annotation.py index 818d3a821..43adac0c2 100644 --- a/apiv2/graphql_api/types/annotation.py +++ b/apiv2/graphql_api/types/annotation.py @@ -8,34 +8,31 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.annotation import AnnotationCreateInputValidator -from validators.annotation import AnnotationUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.annotation import AnnotationGroupByOptions, build_annotation_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface -from graphql_api.types.annotation_shape import AnnotationShapeAggregate, format_annotation_shape_aggregate_output from graphql_api.types.annotation_author import AnnotationAuthorAggregate, format_annotation_author_aggregate_output -from fastapi import Depends +from graphql_api.types.annotation_shape import AnnotationShapeAggregate, format_annotation_shape_aggregate_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, + BoolComparators, DatetimeComparators, - IntComparators, + EnumComparators, FloatComparators, + IntComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -43,27 +40,27 @@ from sqlalchemy.ext.asyncio import AsyncSession from strawberry import relay from strawberry.types import Info +from support.enums import annotation_method_type_enum from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum -from support.enums import annotation_method_type_enum +from validators.annotation import AnnotationCreateInputValidator, AnnotationUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.run import RunOrderByClause, RunWhereClause, Run - from graphql_api.types.annotation_shape import ( - AnnotationShapeOrderByClause, - AnnotationShapeWhereClause, - AnnotationShape, - ) from graphql_api.types.annotation_author import ( + AnnotationAuthor, AnnotationAuthorOrderByClause, AnnotationAuthorWhereClause, - AnnotationAuthor, ) - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition + from graphql_api.types.annotation_shape import ( + AnnotationShape, + AnnotationShapeOrderByClause, + AnnotationShapeWhereClause, + ) + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause + from graphql_api.types.run import Run, RunOrderByClause, RunWhereClause pass else: @@ -106,7 +103,7 @@ async def load_run_rows( @relay.connection( relay.ListConnection[ Annotated["AnnotationShape", strawberry.lazy("graphql_api.types.annotation_shape")] - ] # type:ignore + ], # type:ignore ) async def load_annotation_shape_rows( root: "Annotation", @@ -140,7 +137,7 @@ async def load_annotation_shape_aggregate_rows( @relay.connection( relay.ListConnection[ Annotated["AnnotationAuthor", strawberry.lazy("graphql_api.types.annotation_author")] - ] # type:ignore + ], # type:ignore ) async def load_annotation_author_rows( root: "Annotation", @@ -307,23 +304,25 @@ class Annotation(EntityInterface): default=None, ) annotation_method: str = strawberry.field( - description="Describe how the annotation is made (e.g. Manual, crYoLO, Positive Unlabeled Learning, template matching)" + description="Describe how the annotation is made (e.g. Manual, crYoLO, Positive Unlabeled Learning, template matching)", ) ground_truth_status: Optional[bool] = strawberry.field( - description="Whether an annotation is considered ground truth, as determined by the annotator.", default=None + description="Whether an annotation is considered ground truth, as determined by the annotator.", + default=None, ) object_id: str = strawberry.field( - description="Gene Ontology Cellular Component identifier for the annotation object" + description="Gene Ontology Cellular Component identifier for the annotation object", ) object_name: str = strawberry.field( - description="Name of the object being annotated (e.g. ribosome, nuclear pore complex, actin filament, membrane)" + description="Name of the object being annotated (e.g. ribosome, nuclear pore complex, actin filament, membrane)", ) object_description: Optional[str] = strawberry.field( description="A textual description of the annotation object, can be a longer description to include additional information not covered by the Annotation object name and state.", default=None, ) object_state: Optional[str] = strawberry.field( - description="Molecule state annotated (e.g. open, closed)", default=None + description="Molecule state annotated (e.g. open, closed)", + default=None, ) object_count: Optional[int] = strawberry.field(description="Number of objects identified", default=None) confidence_precision: Optional[float] = strawberry.field( @@ -335,25 +334,28 @@ class Annotation(EntityInterface): default=None, ) ground_truth_used: Optional[str] = strawberry.field( - description="Annotation filename used as ground truth for precision and recall", default=None + description="Annotation filename used as ground truth for precision and recall", + default=None, ) annotation_software: Optional[str] = strawberry.field( - description="Software used for generating this annotation", default=None + description="Software used for generating this annotation", + default=None, ) is_curator_recommended: Optional[bool] = strawberry.field( - description="This annotation is recommended by the curator to be preferred for this object type.", default=None + description="This annotation is recommended by the curator to be preferred for this object type.", + default=None, ) method_type: annotation_method_type_enum = strawberry.field( - description="Classification of the annotation method based on supervision." + description="Classification of the annotation method based on supervision.", ) deposition_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: datetime.datetime = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -453,7 +455,9 @@ class AnnotationAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[AnnotationCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[AnnotationCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -495,23 +499,25 @@ class AnnotationCreateInput: default=None, ) annotation_method: str = strawberry.field( - description="Describe how the annotation is made (e.g. Manual, crYoLO, Positive Unlabeled Learning, template matching)" + description="Describe how the annotation is made (e.g. Manual, crYoLO, Positive Unlabeled Learning, template matching)", ) ground_truth_status: Optional[bool] = strawberry.field( - description="Whether an annotation is considered ground truth, as determined by the annotator.", default=None + description="Whether an annotation is considered ground truth, as determined by the annotator.", + default=None, ) object_id: str = strawberry.field( - description="Gene Ontology Cellular Component identifier for the annotation object" + description="Gene Ontology Cellular Component identifier for the annotation object", ) object_name: str = strawberry.field( - description="Name of the object being annotated (e.g. ribosome, nuclear pore complex, actin filament, membrane)" + description="Name of the object being annotated (e.g. ribosome, nuclear pore complex, actin filament, membrane)", ) object_description: Optional[str] = strawberry.field( description="A textual description of the annotation object, can be a longer description to include additional information not covered by the Annotation object name and state.", default=None, ) object_state: Optional[str] = strawberry.field( - description="Molecule state annotated (e.g. open, closed)", default=None + description="Molecule state annotated (e.g. open, closed)", + default=None, ) object_count: Optional[int] = strawberry.field(description="Number of objects identified", default=None) confidence_precision: Optional[float] = strawberry.field( @@ -523,25 +529,28 @@ class AnnotationCreateInput: default=None, ) ground_truth_used: Optional[str] = strawberry.field( - description="Annotation filename used as ground truth for precision and recall", default=None + description="Annotation filename used as ground truth for precision and recall", + default=None, ) annotation_software: Optional[str] = strawberry.field( - description="Software used for generating this annotation", default=None + description="Software used for generating this annotation", + default=None, ) is_curator_recommended: Optional[bool] = strawberry.field( - description="This annotation is recommended by the curator to be preferred for this object type.", default=None + description="This annotation is recommended by the curator to be preferred for this object type.", + default=None, ) method_type: annotation_method_type_enum = strawberry.field( - description="Classification of the annotation method based on supervision." + description="Classification of the annotation method based on supervision.", ) deposition_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: datetime.datetime = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -557,23 +566,25 @@ class AnnotationUpdateInput: default=None, ) annotation_method: Optional[str] = strawberry.field( - description="Describe how the annotation is made (e.g. Manual, crYoLO, Positive Unlabeled Learning, template matching)" + description="Describe how the annotation is made (e.g. Manual, crYoLO, Positive Unlabeled Learning, template matching)", ) ground_truth_status: Optional[bool] = strawberry.field( - description="Whether an annotation is considered ground truth, as determined by the annotator.", default=None + description="Whether an annotation is considered ground truth, as determined by the annotator.", + default=None, ) object_id: Optional[str] = strawberry.field( - description="Gene Ontology Cellular Component identifier for the annotation object" + description="Gene Ontology Cellular Component identifier for the annotation object", ) object_name: Optional[str] = strawberry.field( - description="Name of the object being annotated (e.g. ribosome, nuclear pore complex, actin filament, membrane)" + description="Name of the object being annotated (e.g. ribosome, nuclear pore complex, actin filament, membrane)", ) object_description: Optional[str] = strawberry.field( description="A textual description of the annotation object, can be a longer description to include additional information not covered by the Annotation object name and state.", default=None, ) object_state: Optional[str] = strawberry.field( - description="Molecule state annotated (e.g. open, closed)", default=None + description="Molecule state annotated (e.g. open, closed)", + default=None, ) object_count: Optional[int] = strawberry.field(description="Number of objects identified", default=None) confidence_precision: Optional[float] = strawberry.field( @@ -585,25 +596,28 @@ class AnnotationUpdateInput: default=None, ) ground_truth_used: Optional[str] = strawberry.field( - description="Annotation filename used as ground truth for precision and recall", default=None + description="Annotation filename used as ground truth for precision and recall", + default=None, ) annotation_software: Optional[str] = strawberry.field( - description="Software used for generating this annotation", default=None + description="Software used for generating this annotation", + default=None, ) is_curator_recommended: Optional[bool] = strawberry.field( - description="This annotation is recommended by the curator to be preferred for this object type.", default=None + description="This annotation is recommended by the curator to be preferred for this object type.", + default=None, ) method_type: Optional[annotation_method_type_enum] = strawberry.field( - description="Classification of the annotation method based on supervision." + description="Classification of the annotation method based on supervision.", ) deposition_date: Optional[datetime.datetime] = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: Optional[datetime.datetime] = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: Optional[datetime.datetime] = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -640,7 +654,7 @@ def format_annotation_aggregate_output(query_results: Sequence[RowMapping] | Row format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_annotation_aggregate_row(row)) @@ -659,10 +673,10 @@ def format_annotation_aggregate_row(row: RowMapping) -> AnnotationAggregateFunct aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", AnnotationGroupByOptions()) - group = build_annotation_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = AnnotationGroupByOptions() + group = build_annotation_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -693,8 +707,8 @@ async def resolve_annotations_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -725,7 +739,13 @@ async def create_annotation( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") @@ -780,7 +800,13 @@ async def update_annotation( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") diff --git a/apiv2/graphql_api/types/annotation_author.py b/apiv2/graphql_api/types/annotation_author.py index 66703ca7e..560fd3d02 100644 --- a/apiv2/graphql_api/types/annotation_author.py +++ b/apiv2/graphql_api/types/annotation_author.py @@ -8,48 +8,41 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.annotation_author import AnnotationAuthorCreateInputValidator -from validators.annotation_author import AnnotationAuthorUpdateInputValidator -from graphql_api.helpers.annotation_author import AnnotationAuthorGroupByOptions, build_annotation_author_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from fastapi import Depends +from graphql_api.helpers.annotation_author import AnnotationAuthorGroupByOptions, build_annotation_author_groupby_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, + BoolComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.annotation_author import AnnotationAuthorCreateInputValidator, AnnotationAuthorUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.annotation import AnnotationOrderByClause, AnnotationWhereClause, Annotation + from graphql_api.types.annotation import Annotation, AnnotationOrderByClause, AnnotationWhereClause pass else: @@ -151,25 +144,30 @@ class AnnotationAuthor(EntityInterface): load_annotation_rows ) # type:ignore author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -245,7 +243,9 @@ class AnnotationAuthorAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[AnnotationAuthorCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[AnnotationAuthorCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -279,28 +279,34 @@ class AnnotationAuthorAggregate: @strawberry.input() class AnnotationAuthorCreateInput: annotation_id: Optional[strawberry.ID] = strawberry.field( - description="Metadata about an annotation for a run", default=None + description="Metadata about an annotation for a run", + default=None, ) author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -308,28 +314,34 @@ class AnnotationAuthorCreateInput: @strawberry.input() class AnnotationAuthorUpdateInput: annotation_id: Optional[strawberry.ID] = strawberry.field( - description="Metadata about an annotation for a run", default=None + description="Metadata about an annotation for a run", + default=None, ) author_list_order: Optional[int] = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: Optional[str] = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -368,7 +380,7 @@ def format_annotation_author_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_annotation_author_aggregate_row(row)) @@ -387,10 +399,10 @@ def format_annotation_author_aggregate_row(row: RowMapping) -> AnnotationAuthorA aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", AnnotationAuthorGroupByOptions()) - group = build_annotation_author_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = AnnotationAuthorGroupByOptions() + group = build_annotation_author_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -421,8 +433,8 @@ async def resolve_annotation_authors_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: diff --git a/apiv2/graphql_api/types/annotation_file.py b/apiv2/graphql_api/types/annotation_file.py index ed12521b0..93794a102 100644 --- a/apiv2/graphql_api/types/annotation_file.py +++ b/apiv2/graphql_api/types/annotation_file.py @@ -8,58 +8,52 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.annotation_file import AnnotationFileCreateInputValidator -from validators.annotation_file import AnnotationFileUpdateInputValidator -from graphql_api.helpers.annotation_file import AnnotationFileGroupByOptions, build_annotation_file_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from fastapi import Depends +from graphql_api.helpers.annotation_file import AnnotationFileGroupByOptions, build_annotation_file_groupby_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, + BoolComparators, EnumComparators, - DatetimeComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info +from support.enums import annotation_file_source_enum from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum -from support.enums import annotation_file_source_enum +from validators.annotation_file import AnnotationFileCreateInputValidator, AnnotationFileUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.alignment import AlignmentOrderByClause, AlignmentWhereClause, Alignment + from graphql_api.types.alignment import Alignment, AlignmentOrderByClause, AlignmentWhereClause from graphql_api.types.annotation_shape import ( + AnnotationShape, AnnotationShapeOrderByClause, AnnotationShapeWhereClause, - AnnotationShape, ) from graphql_api.types.tomogram_voxel_spacing import ( + TomogramVoxelSpacing, TomogramVoxelSpacingOrderByClause, TomogramVoxelSpacingWhereClause, - TomogramVoxelSpacing, ) pass @@ -129,7 +123,7 @@ async def load_tomogram_voxel_spacing_rows( mapper = inspect(db.AnnotationFile) relationship = mapper.relationships["tomogram_voxel_spacing"] return await dataloader.loader_for(relationship, where, order_by).load( - root.tomogram_voxel_spacing_id + root.tomogram_voxel_spacing_id, ) # type:ignore @@ -222,10 +216,12 @@ class AnnotationFile(EntityInterface): s3_path: str = strawberry.field(description="Path to the file in s3") https_path: str = strawberry.field(description="Path to the file as an https url") is_visualization_default: Optional[bool] = strawberry.field( - description="This annotation will be rendered in neuroglancer by default.", default=None + description="This annotation will be rendered in neuroglancer by default.", + default=None, ) source: Optional[annotation_file_source_enum] = strawberry.field( - description="The source type for the annotation file", default=None + description="The source type for the annotation file", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -294,7 +290,9 @@ class AnnotationFileAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[AnnotationFileCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[AnnotationFileCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -329,19 +327,23 @@ class AnnotationFileAggregate: class AnnotationFileCreateInput: alignment_id: Optional[strawberry.ID] = strawberry.field(description="Tiltseries Alignment", default=None) annotation_shape_id: Optional[strawberry.ID] = strawberry.field( - description="Shapes associated with an annotation", default=None + description="Shapes associated with an annotation", + default=None, ) tomogram_voxel_spacing_id: Optional[strawberry.ID] = strawberry.field( - description="Voxel spacings for a run", default=None + description="Voxel spacings for a run", + default=None, ) format: str = strawberry.field(description="File format label") s3_path: str = strawberry.field(description="Path to the file in s3") https_path: str = strawberry.field(description="Path to the file as an https url") is_visualization_default: Optional[bool] = strawberry.field( - description="This annotation will be rendered in neuroglancer by default.", default=None + description="This annotation will be rendered in neuroglancer by default.", + default=None, ) source: Optional[annotation_file_source_enum] = strawberry.field( - description="The source type for the annotation file", default=None + description="The source type for the annotation file", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -350,19 +352,23 @@ class AnnotationFileCreateInput: class AnnotationFileUpdateInput: alignment_id: Optional[strawberry.ID] = strawberry.field(description="Tiltseries Alignment", default=None) annotation_shape_id: Optional[strawberry.ID] = strawberry.field( - description="Shapes associated with an annotation", default=None + description="Shapes associated with an annotation", + default=None, ) tomogram_voxel_spacing_id: Optional[strawberry.ID] = strawberry.field( - description="Voxel spacings for a run", default=None + description="Voxel spacings for a run", + default=None, ) format: Optional[str] = strawberry.field(description="File format label") s3_path: Optional[str] = strawberry.field(description="Path to the file in s3") https_path: Optional[str] = strawberry.field(description="Path to the file as an https url") is_visualization_default: Optional[bool] = strawberry.field( - description="This annotation will be rendered in neuroglancer by default.", default=None + description="This annotation will be rendered in neuroglancer by default.", + default=None, ) source: Optional[annotation_file_source_enum] = strawberry.field( - description="The source type for the annotation file", default=None + description="The source type for the annotation file", + default=None, ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -401,7 +407,7 @@ def format_annotation_file_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_annotation_file_aggregate_row(row)) @@ -420,10 +426,10 @@ def format_annotation_file_aggregate_row(row: RowMapping) -> AnnotationFileAggre aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", AnnotationFileGroupByOptions()) - group = build_annotation_file_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = AnnotationFileGroupByOptions() + group = build_annotation_file_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -454,8 +460,8 @@ async def resolve_annotation_files_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: diff --git a/apiv2/graphql_api/types/annotation_shape.py b/apiv2/graphql_api/types/annotation_shape.py index 53f925e2e..6e3f7988b 100644 --- a/apiv2/graphql_api/types/annotation_shape.py +++ b/apiv2/graphql_api/types/annotation_shape.py @@ -8,33 +8,26 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.annotation_shape import AnnotationShapeCreateInputValidator -from validators.annotation_shape import AnnotationShapeUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.annotation_shape import AnnotationShapeGroupByOptions, build_annotation_shape_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from graphql_api.types.annotation_file import AnnotationFileAggregate, format_annotation_file_aggregate_output -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, EnumComparators, - DatetimeComparators, IntComparators, - FloatComparators, - StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -42,17 +35,17 @@ from sqlalchemy.ext.asyncio import AsyncSession from strawberry import relay from strawberry.types import Info +from support.enums import annotation_file_shape_type_enum from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum -from support.enums import annotation_file_shape_type_enum +from validators.annotation_shape import AnnotationShapeCreateInputValidator, AnnotationShapeUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.annotation import AnnotationOrderByClause, AnnotationWhereClause, Annotation - from graphql_api.types.annotation_file import AnnotationFileOrderByClause, AnnotationFileWhereClause, AnnotationFile + from graphql_api.types.annotation import Annotation, AnnotationOrderByClause, AnnotationWhereClause + from graphql_api.types.annotation_file import AnnotationFile, AnnotationFileOrderByClause, AnnotationFileWhereClause pass else: @@ -91,7 +84,7 @@ async def load_annotation_rows( @relay.connection( relay.ListConnection[ Annotated["AnnotationFile", strawberry.lazy("graphql_api.types.annotation_file")] - ] # type:ignore + ], # type:ignore ) async def load_annotation_file_rows( root: "AnnotationShape", @@ -243,7 +236,9 @@ class AnnotationShapeAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[AnnotationShapeCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[AnnotationShapeCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -277,7 +272,8 @@ class AnnotationShapeAggregate: @strawberry.input() class AnnotationShapeCreateInput: annotation_id: Optional[strawberry.ID] = strawberry.field( - description="Metadata about an annotation for a run", default=None + description="Metadata about an annotation for a run", + default=None, ) shape_type: Optional[annotation_file_shape_type_enum] = strawberry.field(description=None, default=None) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -286,7 +282,8 @@ class AnnotationShapeCreateInput: @strawberry.input() class AnnotationShapeUpdateInput: annotation_id: Optional[strawberry.ID] = strawberry.field( - description="Metadata about an annotation for a run", default=None + description="Metadata about an annotation for a run", + default=None, ) shape_type: Optional[annotation_file_shape_type_enum] = strawberry.field(description=None, default=None) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -326,7 +323,7 @@ def format_annotation_shape_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_annotation_shape_aggregate_row(row)) @@ -345,10 +342,10 @@ def format_annotation_shape_aggregate_row(row: RowMapping) -> AnnotationShapeAgg aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", AnnotationShapeGroupByOptions()) - group = build_annotation_shape_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = AnnotationShapeGroupByOptions() + group = build_annotation_shape_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -379,8 +376,8 @@ async def resolve_annotation_shapes_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: diff --git a/apiv2/graphql_api/types/dataset.py b/apiv2/graphql_api/types/dataset.py index 189fb605b..05be02579 100644 --- a/apiv2/graphql_api/types/dataset.py +++ b/apiv2/graphql_api/types/dataset.py @@ -8,35 +8,30 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.dataset import DatasetCreateInputValidator -from validators.dataset import DatasetUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.dataset import DatasetGroupByOptions, build_dataset_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface -from graphql_api.types.dataset_funding import DatasetFundingAggregate, format_dataset_funding_aggregate_output from graphql_api.types.dataset_author import DatasetAuthorAggregate, format_dataset_author_aggregate_output +from graphql_api.types.dataset_funding import DatasetFundingAggregate, format_dataset_funding_aggregate_output from graphql_api.types.run import RunAggregate, format_run_aggregate_output -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, DatetimeComparators, + EnumComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -44,19 +39,19 @@ from sqlalchemy.ext.asyncio import AsyncSession from strawberry import relay from strawberry.types import Info +from support.enums import sample_type_enum from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum -from support.enums import sample_type_enum +from validators.dataset import DatasetCreateInputValidator, DatasetUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition - from graphql_api.types.dataset_funding import DatasetFundingOrderByClause, DatasetFundingWhereClause, DatasetFunding - from graphql_api.types.dataset_author import DatasetAuthorOrderByClause, DatasetAuthorWhereClause, DatasetAuthor - from graphql_api.types.run import RunOrderByClause, RunWhereClause, Run + from graphql_api.types.dataset_author import DatasetAuthor, DatasetAuthorOrderByClause, DatasetAuthorWhereClause + from graphql_api.types.dataset_funding import DatasetFunding, DatasetFundingOrderByClause, DatasetFundingWhereClause + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause + from graphql_api.types.run import Run, RunOrderByClause, RunWhereClause pass else: @@ -101,7 +96,7 @@ async def load_deposition_rows( @relay.connection( relay.ListConnection[ Annotated["DatasetFunding", strawberry.lazy("graphql_api.types.dataset_funding")] - ] # type:ignore + ], # type:ignore ) async def load_dataset_funding_rows( root: "Dataset", @@ -133,7 +128,9 @@ async def load_dataset_funding_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["DatasetAuthor", strawberry.lazy("graphql_api.types.dataset_author")]] # type:ignore + relay.ListConnection[ + Annotated["DatasetAuthor", strawberry.lazy("graphql_api.types.dataset_author")] + ], # type:ignore ) async def load_dataset_author_rows( root: "Dataset", @@ -165,7 +162,7 @@ async def load_dataset_author_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Run", strawberry.lazy("graphql_api.types.run")]] # type:ignore + relay.ListConnection[Annotated["Run", strawberry.lazy("graphql_api.types.run")]], # type:ignore ) async def load_run_rows( root: "Dataset", @@ -317,13 +314,14 @@ class Dataset(EntityInterface): ) # type:ignore title: str = strawberry.field(description="Title of a CryoET dataset.") description: str = strawberry.field( - description="A short description of a CryoET dataset, similar to an abstract for a journal article or dataset." + description="A short description of a CryoET dataset, similar to an abstract for a journal article or dataset.", ) organism_name: str = strawberry.field( - description="Name of the organism from which a biological sample used in a CryoET study is derived from, e.g. homo sapiens." + description="Name of the organism from which a biological sample used in a CryoET study is derived from, e.g. homo sapiens.", ) organism_taxid: Optional[int] = strawberry.field( - description="NCBI taxonomy identifier for the organism, e.g. 9606", default=None + description="NCBI taxonomy identifier for the organism, e.g. 9606", + default=None, ) tissue_name: Optional[str] = strawberry.field( description="Name of the tissue from which a biological sample used in a CryoET study is derived from.", @@ -335,17 +333,21 @@ class Dataset(EntityInterface): default=None, ) cell_type_id: Optional[str] = strawberry.field( - description="Cell Ontology identifier for the cell type", default=None + description="Cell Ontology identifier for the cell type", + default=None, ) cell_strain_name: Optional[str] = strawberry.field(description="Cell line or strain for the sample.", default=None) cell_strain_id: Optional[str] = strawberry.field( - description="Link to more information about the cell strain.", default=None + description="Link to more information about the cell strain.", + default=None, ) sample_type: Optional[sample_type_enum] = strawberry.field( - description="Type of sample imaged in a CryoET study", default=None + description="Type of sample imaged in a CryoET study", + default=None, ) sample_preparation: Optional[str] = strawberry.field( - description="Describes how the sample was prepared.", default=None + description="Describes how the sample was prepared.", + default=None, ) grid_preparation: Optional[str] = strawberry.field(description="Describes Cryo-ET grid preparation.", default=None) other_setup: Optional[str] = strawberry.field( @@ -354,30 +356,34 @@ class Dataset(EntityInterface): ) key_photo_url: Optional[str] = strawberry.field(description="URL for the dataset preview image.", default=None) key_photo_thumbnail_url: Optional[str] = strawberry.field( - description="URL for the thumbnail of preview image.", default=None + description="URL for the thumbnail of preview image.", + default=None, ) cell_component_name: Optional[str] = strawberry.field(description="Name of the cellular component.", default=None) cell_component_id: Optional[str] = strawberry.field( - description="The GO identifier for the cellular component.", default=None + description="The GO identifier for the cellular component.", + default=None, ) deposition_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: datetime.datetime = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) publications: Optional[str] = strawberry.field( - description="Comma-separated list of DOIs for publications associated with the dataset.", default=None + description="Comma-separated list of DOIs for publications associated with the dataset.", + default=None, ) related_database_entries: Optional[str] = strawberry.field( - description="Comma-separated list of related database entries for the dataset.", default=None + description="Comma-separated list of related database entries for the dataset.", + default=None, ) s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -523,13 +529,14 @@ class DatasetCreateInput: deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) title: str = strawberry.field(description="Title of a CryoET dataset.") description: str = strawberry.field( - description="A short description of a CryoET dataset, similar to an abstract for a journal article or dataset." + description="A short description of a CryoET dataset, similar to an abstract for a journal article or dataset.", ) organism_name: str = strawberry.field( - description="Name of the organism from which a biological sample used in a CryoET study is derived from, e.g. homo sapiens." + description="Name of the organism from which a biological sample used in a CryoET study is derived from, e.g. homo sapiens.", ) organism_taxid: Optional[int] = strawberry.field( - description="NCBI taxonomy identifier for the organism, e.g. 9606", default=None + description="NCBI taxonomy identifier for the organism, e.g. 9606", + default=None, ) tissue_name: Optional[str] = strawberry.field( description="Name of the tissue from which a biological sample used in a CryoET study is derived from.", @@ -541,17 +548,21 @@ class DatasetCreateInput: default=None, ) cell_type_id: Optional[str] = strawberry.field( - description="Cell Ontology identifier for the cell type", default=None + description="Cell Ontology identifier for the cell type", + default=None, ) cell_strain_name: Optional[str] = strawberry.field(description="Cell line or strain for the sample.", default=None) cell_strain_id: Optional[str] = strawberry.field( - description="Link to more information about the cell strain.", default=None + description="Link to more information about the cell strain.", + default=None, ) sample_type: Optional[sample_type_enum] = strawberry.field( - description="Type of sample imaged in a CryoET study", default=None + description="Type of sample imaged in a CryoET study", + default=None, ) sample_preparation: Optional[str] = strawberry.field( - description="Describes how the sample was prepared.", default=None + description="Describes how the sample was prepared.", + default=None, ) grid_preparation: Optional[str] = strawberry.field(description="Describes Cryo-ET grid preparation.", default=None) other_setup: Optional[str] = strawberry.field( @@ -560,30 +571,34 @@ class DatasetCreateInput: ) key_photo_url: Optional[str] = strawberry.field(description="URL for the dataset preview image.", default=None) key_photo_thumbnail_url: Optional[str] = strawberry.field( - description="URL for the thumbnail of preview image.", default=None + description="URL for the thumbnail of preview image.", + default=None, ) cell_component_name: Optional[str] = strawberry.field(description="Name of the cellular component.", default=None) cell_component_id: Optional[str] = strawberry.field( - description="The GO identifier for the cellular component.", default=None + description="The GO identifier for the cellular component.", + default=None, ) deposition_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: datetime.datetime = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) publications: Optional[str] = strawberry.field( - description="Comma-separated list of DOIs for publications associated with the dataset.", default=None + description="Comma-separated list of DOIs for publications associated with the dataset.", + default=None, ) related_database_entries: Optional[str] = strawberry.field( - description="Comma-separated list of related database entries for the dataset.", default=None + description="Comma-separated list of related database entries for the dataset.", + default=None, ) s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -593,13 +608,14 @@ class DatasetUpdateInput: deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) title: Optional[str] = strawberry.field(description="Title of a CryoET dataset.") description: Optional[str] = strawberry.field( - description="A short description of a CryoET dataset, similar to an abstract for a journal article or dataset." + description="A short description of a CryoET dataset, similar to an abstract for a journal article or dataset.", ) organism_name: Optional[str] = strawberry.field( - description="Name of the organism from which a biological sample used in a CryoET study is derived from, e.g. homo sapiens." + description="Name of the organism from which a biological sample used in a CryoET study is derived from, e.g. homo sapiens.", ) organism_taxid: Optional[int] = strawberry.field( - description="NCBI taxonomy identifier for the organism, e.g. 9606", default=None + description="NCBI taxonomy identifier for the organism, e.g. 9606", + default=None, ) tissue_name: Optional[str] = strawberry.field( description="Name of the tissue from which a biological sample used in a CryoET study is derived from.", @@ -611,17 +627,21 @@ class DatasetUpdateInput: default=None, ) cell_type_id: Optional[str] = strawberry.field( - description="Cell Ontology identifier for the cell type", default=None + description="Cell Ontology identifier for the cell type", + default=None, ) cell_strain_name: Optional[str] = strawberry.field(description="Cell line or strain for the sample.", default=None) cell_strain_id: Optional[str] = strawberry.field( - description="Link to more information about the cell strain.", default=None + description="Link to more information about the cell strain.", + default=None, ) sample_type: Optional[sample_type_enum] = strawberry.field( - description="Type of sample imaged in a CryoET study", default=None + description="Type of sample imaged in a CryoET study", + default=None, ) sample_preparation: Optional[str] = strawberry.field( - description="Describes how the sample was prepared.", default=None + description="Describes how the sample was prepared.", + default=None, ) grid_preparation: Optional[str] = strawberry.field(description="Describes Cryo-ET grid preparation.", default=None) other_setup: Optional[str] = strawberry.field( @@ -630,32 +650,36 @@ class DatasetUpdateInput: ) key_photo_url: Optional[str] = strawberry.field(description="URL for the dataset preview image.", default=None) key_photo_thumbnail_url: Optional[str] = strawberry.field( - description="URL for the thumbnail of preview image.", default=None + description="URL for the thumbnail of preview image.", + default=None, ) cell_component_name: Optional[str] = strawberry.field(description="Name of the cellular component.", default=None) cell_component_id: Optional[str] = strawberry.field( - description="The GO identifier for the cellular component.", default=None + description="The GO identifier for the cellular component.", + default=None, ) deposition_date: Optional[datetime.datetime] = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: Optional[datetime.datetime] = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: Optional[datetime.datetime] = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) publications: Optional[str] = strawberry.field( - description="Comma-separated list of DOIs for publications associated with the dataset.", default=None + description="Comma-separated list of DOIs for publications associated with the dataset.", + default=None, ) related_database_entries: Optional[str] = strawberry.field( - description="Comma-separated list of related database entries for the dataset.", default=None + description="Comma-separated list of related database entries for the dataset.", + default=None, ) s3_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an S3 url" + description="Path to a directory containing data for this entity as an S3 url", ) https_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -692,7 +716,7 @@ def format_dataset_aggregate_output(query_results: Sequence[RowMapping] | RowMap format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_dataset_aggregate_row(row)) @@ -711,10 +735,10 @@ def format_dataset_aggregate_row(row: RowMapping) -> DatasetAggregateFunctions: aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", DatasetGroupByOptions()) - group = build_dataset_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = DatasetGroupByOptions() + group = build_dataset_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -745,8 +769,8 @@ async def resolve_datasets_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: diff --git a/apiv2/graphql_api/types/dataset_author.py b/apiv2/graphql_api/types/dataset_author.py index 28dcfbe48..4d7c39608 100644 --- a/apiv2/graphql_api/types/dataset_author.py +++ b/apiv2/graphql_api/types/dataset_author.py @@ -8,48 +8,41 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.dataset_author import DatasetAuthorCreateInputValidator -from validators.dataset_author import DatasetAuthorUpdateInputValidator -from graphql_api.helpers.dataset_author import DatasetAuthorGroupByOptions, build_dataset_author_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from fastapi import Depends +from graphql_api.helpers.dataset_author import DatasetAuthorGroupByOptions, build_dataset_author_groupby_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, + BoolComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.dataset_author import DatasetAuthorCreateInputValidator, DatasetAuthorUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.dataset import DatasetOrderByClause, DatasetWhereClause, Dataset + from graphql_api.types.dataset import Dataset, DatasetOrderByClause, DatasetWhereClause pass else: @@ -149,24 +142,29 @@ class DatasetAuthor(EntityInterface): load_dataset_rows ) # type:ignore author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -243,7 +241,9 @@ class DatasetAuthorAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[DatasetAuthorCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[DatasetAuthorCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -278,24 +278,29 @@ class DatasetAuthorAggregate: class DatasetAuthorCreateInput: dataset_id: Optional[strawberry.ID] = strawberry.field(description="An author of a dataset", default=None) author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -305,24 +310,29 @@ class DatasetAuthorCreateInput: class DatasetAuthorUpdateInput: dataset_id: Optional[strawberry.ID] = strawberry.field(description="An author of a dataset", default=None) author_list_order: Optional[int] = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) name: Optional[str] = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -360,7 +370,7 @@ def format_dataset_author_aggregate_output(query_results: Sequence[RowMapping] | format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_dataset_author_aggregate_row(row)) @@ -379,10 +389,10 @@ def format_dataset_author_aggregate_row(row: RowMapping) -> DatasetAuthorAggrega aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", DatasetAuthorGroupByOptions()) - group = build_dataset_author_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = DatasetAuthorGroupByOptions() + group = build_dataset_author_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -413,8 +423,8 @@ async def resolve_dataset_authors_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -445,7 +455,13 @@ async def create_dataset_author( # Check that dataset relationship is accessible. if validated.dataset_id: dataset = await get_db_rows( - db.Dataset, session, authz_client, principal, {"id": {"_eq": validated.dataset_id}}, [], AuthzAction.VIEW + db.Dataset, + session, + authz_client, + principal, + {"id": {"_eq": validated.dataset_id}}, + [], + AuthzAction.VIEW, ) if not dataset: raise PlatformicsError("Unauthorized: dataset does not exist") @@ -487,7 +503,13 @@ async def update_dataset_author( # Check that dataset relationship is accessible. if validated.dataset_id: dataset = await get_db_rows( - db.Dataset, session, authz_client, principal, {"id": {"_eq": validated.dataset_id}}, [], AuthzAction.VIEW + db.Dataset, + session, + authz_client, + principal, + {"id": {"_eq": validated.dataset_id}}, + [], + AuthzAction.VIEW, ) if not dataset: raise PlatformicsError("Unauthorized: dataset does not exist") diff --git a/apiv2/graphql_api/types/dataset_funding.py b/apiv2/graphql_api/types/dataset_funding.py index 87dcb0f06..1349beead 100644 --- a/apiv2/graphql_api/types/dataset_funding.py +++ b/apiv2/graphql_api/types/dataset_funding.py @@ -8,48 +8,40 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.dataset_funding import DatasetFundingCreateInputValidator -from validators.dataset_funding import DatasetFundingUpdateInputValidator -from graphql_api.helpers.dataset_funding import DatasetFundingGroupByOptions, build_dataset_funding_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from fastapi import Depends +from graphql_api.helpers.dataset_funding import DatasetFundingGroupByOptions, build_dataset_funding_groupby_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.dataset_funding import DatasetFundingCreateInputValidator, DatasetFundingUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.dataset import DatasetOrderByClause, DatasetWhereClause, Dataset + from graphql_api.types.dataset import Dataset, DatasetOrderByClause, DatasetWhereClause pass else: @@ -136,7 +128,8 @@ class DatasetFunding(EntityInterface): ) # type:ignore funding_agency_name: Optional[str] = strawberry.field(description="The name of the funding source.", default=None) grant_id: Optional[str] = strawberry.field( - description="Grant identifier provided by the funding agency", default=None + description="Grant identifier provided by the funding agency", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -199,7 +192,9 @@ class DatasetFundingAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[DatasetFundingCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[DatasetFundingCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -235,7 +230,8 @@ class DatasetFundingCreateInput: dataset_id: Optional[strawberry.ID] = strawberry.field(description="An author of a dataset", default=None) funding_agency_name: Optional[str] = strawberry.field(description="The name of the funding source.", default=None) grant_id: Optional[str] = strawberry.field( - description="Grant identifier provided by the funding agency", default=None + description="Grant identifier provided by the funding agency", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -245,7 +241,8 @@ class DatasetFundingUpdateInput: dataset_id: Optional[strawberry.ID] = strawberry.field(description="An author of a dataset", default=None) funding_agency_name: Optional[str] = strawberry.field(description="The name of the funding source.", default=None) grant_id: Optional[str] = strawberry.field( - description="Grant identifier provided by the funding agency", default=None + description="Grant identifier provided by the funding agency", + default=None, ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -284,7 +281,7 @@ def format_dataset_funding_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_dataset_funding_aggregate_row(row)) @@ -303,10 +300,10 @@ def format_dataset_funding_aggregate_row(row: RowMapping) -> DatasetFundingAggre aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", DatasetFundingGroupByOptions()) - group = build_dataset_funding_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = DatasetFundingGroupByOptions() + group = build_dataset_funding_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -337,8 +334,8 @@ async def resolve_dataset_funding_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -369,7 +366,13 @@ async def create_dataset_funding( # Check that dataset relationship is accessible. if validated.dataset_id: dataset = await get_db_rows( - db.Dataset, session, authz_client, principal, {"id": {"_eq": validated.dataset_id}}, [], AuthzAction.VIEW + db.Dataset, + session, + authz_client, + principal, + {"id": {"_eq": validated.dataset_id}}, + [], + AuthzAction.VIEW, ) if not dataset: raise PlatformicsError("Unauthorized: dataset does not exist") @@ -411,7 +414,13 @@ async def update_dataset_funding( # Check that dataset relationship is accessible. if validated.dataset_id: dataset = await get_db_rows( - db.Dataset, session, authz_client, principal, {"id": {"_eq": validated.dataset_id}}, [], AuthzAction.VIEW + db.Dataset, + session, + authz_client, + principal, + {"id": {"_eq": validated.dataset_id}}, + [], + AuthzAction.VIEW, ) if not dataset: raise PlatformicsError("Unauthorized: dataset does not exist") diff --git a/apiv2/graphql_api/types/deposition.py b/apiv2/graphql_api/types/deposition.py index 11ca8f88f..903909a41 100644 --- a/apiv2/graphql_api/types/deposition.py +++ b/apiv2/graphql_api/types/deposition.py @@ -8,40 +8,34 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.deposition import DepositionCreateInputValidator -from validators.deposition import DepositionUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.deposition import DepositionGroupByOptions, build_deposition_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface -from graphql_api.types.deposition_author import DepositionAuthorAggregate, format_deposition_author_aggregate_output from graphql_api.types.alignment import AlignmentAggregate, format_alignment_aggregate_output from graphql_api.types.annotation import AnnotationAggregate, format_annotation_aggregate_output from graphql_api.types.dataset import DatasetAggregate, format_dataset_aggregate_output +from graphql_api.types.deposition_author import DepositionAuthorAggregate, format_deposition_author_aggregate_output +from graphql_api.types.deposition_type import DepositionTypeAggregate, format_deposition_type_aggregate_output from graphql_api.types.frame import FrameAggregate, format_frame_aggregate_output from graphql_api.types.tiltseries import TiltseriesAggregate, format_tiltseries_aggregate_output from graphql_api.types.tomogram import TomogramAggregate, format_tomogram_aggregate_output -from graphql_api.types.deposition_type import DepositionTypeAggregate, format_deposition_type_aggregate_output -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, DatetimeComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -51,24 +45,24 @@ from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.deposition import DepositionCreateInputValidator, DepositionUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: + from graphql_api.types.alignment import Alignment, AlignmentOrderByClause, AlignmentWhereClause + from graphql_api.types.annotation import Annotation, AnnotationOrderByClause, AnnotationWhereClause + from graphql_api.types.dataset import Dataset, DatasetOrderByClause, DatasetWhereClause from graphql_api.types.deposition_author import ( + DepositionAuthor, DepositionAuthorOrderByClause, DepositionAuthorWhereClause, - DepositionAuthor, ) - from graphql_api.types.alignment import AlignmentOrderByClause, AlignmentWhereClause, Alignment - from graphql_api.types.annotation import AnnotationOrderByClause, AnnotationWhereClause, Annotation - from graphql_api.types.dataset import DatasetOrderByClause, DatasetWhereClause, Dataset - from graphql_api.types.frame import FrameOrderByClause, FrameWhereClause, Frame - from graphql_api.types.tiltseries import TiltseriesOrderByClause, TiltseriesWhereClause, Tiltseries - from graphql_api.types.tomogram import TomogramOrderByClause, TomogramWhereClause, Tomogram - from graphql_api.types.deposition_type import DepositionTypeOrderByClause, DepositionTypeWhereClause, DepositionType + from graphql_api.types.deposition_type import DepositionType, DepositionTypeOrderByClause, DepositionTypeWhereClause + from graphql_api.types.frame import Frame, FrameOrderByClause, FrameWhereClause + from graphql_api.types.tiltseries import Tiltseries, TiltseriesOrderByClause, TiltseriesWhereClause + from graphql_api.types.tomogram import Tomogram, TomogramOrderByClause, TomogramWhereClause pass else: @@ -110,7 +104,7 @@ @relay.connection( relay.ListConnection[ Annotated["DepositionAuthor", strawberry.lazy("graphql_api.types.deposition_author")] - ] # type:ignore + ], # type:ignore ) async def load_deposition_author_rows( root: "Deposition", @@ -146,7 +140,7 @@ async def load_deposition_author_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Alignment", strawberry.lazy("graphql_api.types.alignment")]] # type:ignore + relay.ListConnection[Annotated["Alignment", strawberry.lazy("graphql_api.types.alignment")]], # type:ignore ) async def load_alignment_rows( root: "Deposition", @@ -176,7 +170,7 @@ async def load_alignment_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Annotation", strawberry.lazy("graphql_api.types.annotation")]] # type:ignore + relay.ListConnection[Annotated["Annotation", strawberry.lazy("graphql_api.types.annotation")]], # type:ignore ) async def load_annotation_rows( root: "Deposition", @@ -208,7 +202,7 @@ async def load_annotation_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Dataset", strawberry.lazy("graphql_api.types.dataset")]] # type:ignore + relay.ListConnection[Annotated["Dataset", strawberry.lazy("graphql_api.types.dataset")]], # type:ignore ) async def load_dataset_rows( root: "Deposition", @@ -238,7 +232,7 @@ async def load_dataset_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Frame", strawberry.lazy("graphql_api.types.frame")]] # type:ignore + relay.ListConnection[Annotated["Frame", strawberry.lazy("graphql_api.types.frame")]], # type:ignore ) async def load_frame_rows( root: "Deposition", @@ -268,7 +262,7 @@ async def load_frame_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Tiltseries", strawberry.lazy("graphql_api.types.tiltseries")]] # type:ignore + relay.ListConnection[Annotated["Tiltseries", strawberry.lazy("graphql_api.types.tiltseries")]], # type:ignore ) async def load_tiltseries_rows( root: "Deposition", @@ -300,7 +294,7 @@ async def load_tiltseries_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]] # type:ignore + relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]], # type:ignore ) async def load_tomogram_rows( root: "Deposition", @@ -332,7 +326,7 @@ async def load_tomogram_aggregate_rows( @relay.connection( relay.ListConnection[ Annotated["DepositionType", strawberry.lazy("graphql_api.types.deposition_type")] - ] # type:ignore + ], # type:ignore ) async def load_deposition_type_rows( root: "Deposition", @@ -477,7 +471,7 @@ class Deposition(EntityInterface): ) # type:ignore deposition_title: str = strawberry.field(description="Title of a CryoET deposition.") deposition_description: str = strawberry.field( - description="A short description of the deposition, similar to an abstract for a journal article or dataset." + description="A short description of the deposition, similar to an abstract for a journal article or dataset.", ) deposition_types: Sequence[Annotated["DepositionType", strawberry.lazy("graphql_api.types.deposition_type")]] = ( load_deposition_type_rows @@ -486,19 +480,21 @@ class Deposition(EntityInterface): Annotated["DepositionTypeAggregate", strawberry.lazy("graphql_api.types.deposition_type")] ] = load_deposition_type_aggregate_rows # type:ignore publications: Optional[str] = strawberry.field( - description="Comma-separated list of DOIs for publications associated with the dataset.", default=None + description="Comma-separated list of DOIs for publications associated with the dataset.", + default=None, ) related_database_entries: Optional[str] = strawberry.field( - description="Comma-separated list of related database entries for the dataset.", default=None + description="Comma-separated list of related database entries for the dataset.", + default=None, ) deposition_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: datetime.datetime = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -578,7 +574,9 @@ class DepositionAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[DepositionCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[DepositionCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -613,22 +611,24 @@ class DepositionAggregate: class DepositionCreateInput: deposition_title: str = strawberry.field(description="Title of a CryoET deposition.") deposition_description: str = strawberry.field( - description="A short description of the deposition, similar to an abstract for a journal article or dataset." + description="A short description of the deposition, similar to an abstract for a journal article or dataset.", ) publications: Optional[str] = strawberry.field( - description="Comma-separated list of DOIs for publications associated with the dataset.", default=None + description="Comma-separated list of DOIs for publications associated with the dataset.", + default=None, ) related_database_entries: Optional[str] = strawberry.field( - description="Comma-separated list of related database entries for the dataset.", default=None + description="Comma-separated list of related database entries for the dataset.", + default=None, ) deposition_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: datetime.datetime = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: datetime.datetime = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -637,22 +637,24 @@ class DepositionCreateInput: class DepositionUpdateInput: deposition_title: Optional[str] = strawberry.field(description="Title of a CryoET deposition.") deposition_description: Optional[str] = strawberry.field( - description="A short description of the deposition, similar to an abstract for a journal article or dataset." + description="A short description of the deposition, similar to an abstract for a journal article or dataset.", ) publications: Optional[str] = strawberry.field( - description="Comma-separated list of DOIs for publications associated with the dataset.", default=None + description="Comma-separated list of DOIs for publications associated with the dataset.", + default=None, ) related_database_entries: Optional[str] = strawberry.field( - description="Comma-separated list of related database entries for the dataset.", default=None + description="Comma-separated list of related database entries for the dataset.", + default=None, ) deposition_date: Optional[datetime.datetime] = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) release_date: Optional[datetime.datetime] = strawberry.field( - description="The date a data item was received by the cryoET data portal." + description="The date a data item was received by the cryoET data portal.", ) last_modified_date: Optional[datetime.datetime] = strawberry.field( - description="The date a piece of data was last modified on the cryoET data portal." + description="The date a piece of data was last modified on the cryoET data portal.", ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -689,7 +691,7 @@ def format_deposition_aggregate_output(query_results: Sequence[RowMapping] | Row format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_deposition_aggregate_row(row)) @@ -708,10 +710,10 @@ def format_deposition_aggregate_row(row: RowMapping) -> DepositionAggregateFunct aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", DepositionGroupByOptions()) - group = build_deposition_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = DepositionGroupByOptions() + group = build_deposition_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -742,8 +744,8 @@ async def resolve_depositions_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: diff --git a/apiv2/graphql_api/types/deposition_author.py b/apiv2/graphql_api/types/deposition_author.py index a00a0a16e..dfdf45dfe 100644 --- a/apiv2/graphql_api/types/deposition_author.py +++ b/apiv2/graphql_api/types/deposition_author.py @@ -8,48 +8,41 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.deposition_author import DepositionAuthorCreateInputValidator -from validators.deposition_author import DepositionAuthorUpdateInputValidator -from graphql_api.helpers.deposition_author import DepositionAuthorGroupByOptions, build_deposition_author_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from fastapi import Depends +from graphql_api.helpers.deposition_author import DepositionAuthorGroupByOptions, build_deposition_author_groupby_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, + BoolComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.deposition_author import DepositionAuthorCreateInputValidator, DepositionAuthorUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause pass else: @@ -151,25 +144,30 @@ class DepositionAuthor(EntityInterface): load_deposition_rows ) # type:ignore author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -245,7 +243,9 @@ class DepositionAuthorAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[DepositionAuthorCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[DepositionAuthorCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -280,25 +280,30 @@ class DepositionAuthorAggregate: class DepositionAuthorCreateInput: deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -307,25 +312,30 @@ class DepositionAuthorCreateInput: class DepositionAuthorUpdateInput: deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) author_list_order: Optional[int] = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: Optional[str] = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -364,7 +374,7 @@ def format_deposition_author_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_deposition_author_aggregate_row(row)) @@ -383,10 +393,10 @@ def format_deposition_author_aggregate_row(row: RowMapping) -> DepositionAuthorA aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", DepositionAuthorGroupByOptions()) - group = build_deposition_author_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = DepositionAuthorGroupByOptions() + group = build_deposition_author_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -417,8 +427,8 @@ async def resolve_deposition_authors_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: diff --git a/apiv2/graphql_api/types/deposition_type.py b/apiv2/graphql_api/types/deposition_type.py index 5fe238234..9ef47a2d4 100644 --- a/apiv2/graphql_api/types/deposition_type.py +++ b/apiv2/graphql_api/types/deposition_type.py @@ -8,49 +8,41 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.deposition_type import DepositionTypeCreateInputValidator -from validators.deposition_type import DepositionTypeUpdateInputValidator -from graphql_api.helpers.deposition_type import DepositionTypeGroupByOptions, build_deposition_type_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from fastapi import Depends +from graphql_api.helpers.deposition_type import DepositionTypeGroupByOptions, build_deposition_type_groupby_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, EnumComparators, - DatetimeComparators, IntComparators, - FloatComparators, - StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info +from support.enums import deposition_types_enum from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum -from support.enums import deposition_types_enum +from validators.deposition_type import DepositionTypeCreateInputValidator, DepositionTypeUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause pass else: @@ -194,7 +186,9 @@ class DepositionTypeAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[DepositionTypeCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[DepositionTypeCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -273,7 +267,7 @@ def format_deposition_type_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_deposition_type_aggregate_row(row)) @@ -292,10 +286,10 @@ def format_deposition_type_aggregate_row(row: RowMapping) -> DepositionTypeAggre aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", DepositionTypeGroupByOptions()) - group = build_deposition_type_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = DepositionTypeGroupByOptions() + group = build_deposition_type_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -326,8 +320,8 @@ async def resolve_deposition_types_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: diff --git a/apiv2/graphql_api/types/frame.py b/apiv2/graphql_api/types/frame.py index 1c8466a1c..780b5f38b 100644 --- a/apiv2/graphql_api/types/frame.py +++ b/apiv2/graphql_api/types/frame.py @@ -8,36 +8,31 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.frame import FrameCreateInputValidator -from validators.frame import FrameUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.frame import FrameGroupByOptions, build_frame_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from graphql_api.types.per_section_parameters import ( PerSectionParametersAggregate, format_per_section_parameters_aggregate_output, ) -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, - IntComparators, + BoolComparators, FloatComparators, + IntComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -47,19 +42,19 @@ from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.frame import FrameCreateInputValidator, FrameUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause from graphql_api.types.per_section_parameters import ( + PerSectionParameters, PerSectionParametersOrderByClause, PerSectionParametersWhereClause, - PerSectionParameters, ) - from graphql_api.types.run import RunOrderByClause, RunWhereClause, Run + from graphql_api.types.run import Run, RunOrderByClause, RunWhereClause pass else: @@ -101,7 +96,7 @@ async def load_deposition_rows( @relay.connection( relay.ListConnection[ Annotated["PerSectionParameters", strawberry.lazy("graphql_api.types.per_section_parameters")] - ] # type:ignore + ], # type:ignore ) async def load_per_section_parameters_rows( root: "Frame", @@ -234,19 +229,22 @@ class Frame(EntityInterface): run: Optional[Annotated["Run", strawberry.lazy("graphql_api.types.run")]] = load_run_rows # type:ignore raw_angle: float = strawberry.field(description="Camera angle for a frame") acquisition_order: Optional[int] = strawberry.field( - description="Frame's acquistion order within a tilt experiment", default=None + description="Frame's acquistion order within a tilt experiment", + default=None, ) dose: float = strawberry.field(description="The raw camera angle for a frame") is_gain_corrected: Optional[bool] = strawberry.field( - description="Whether this frame has been gain corrected", default=None + description="Whether this frame has been gain corrected", + default=None, ) s3_gain_file: Optional[str] = strawberry.field(description="S3 path to the gain file for this frame", default=None) https_gain_file: Optional[str] = strawberry.field( - description="HTTPS path to the gain file for this frame", default=None + description="HTTPS path to the gain file for this frame", + default=None, ) s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -360,19 +358,22 @@ class FrameCreateInput: run_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) raw_angle: float = strawberry.field(description="Camera angle for a frame") acquisition_order: Optional[int] = strawberry.field( - description="Frame's acquistion order within a tilt experiment", default=None + description="Frame's acquistion order within a tilt experiment", + default=None, ) dose: float = strawberry.field(description="The raw camera angle for a frame") is_gain_corrected: Optional[bool] = strawberry.field( - description="Whether this frame has been gain corrected", default=None + description="Whether this frame has been gain corrected", + default=None, ) s3_gain_file: Optional[str] = strawberry.field(description="S3 path to the gain file for this frame", default=None) https_gain_file: Optional[str] = strawberry.field( - description="HTTPS path to the gain file for this frame", default=None + description="HTTPS path to the gain file for this frame", + default=None, ) s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -383,21 +384,24 @@ class FrameUpdateInput: run_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) raw_angle: Optional[float] = strawberry.field(description="Camera angle for a frame") acquisition_order: Optional[int] = strawberry.field( - description="Frame's acquistion order within a tilt experiment", default=None + description="Frame's acquistion order within a tilt experiment", + default=None, ) dose: Optional[float] = strawberry.field(description="The raw camera angle for a frame") is_gain_corrected: Optional[bool] = strawberry.field( - description="Whether this frame has been gain corrected", default=None + description="Whether this frame has been gain corrected", + default=None, ) s3_gain_file: Optional[str] = strawberry.field(description="S3 path to the gain file for this frame", default=None) https_gain_file: Optional[str] = strawberry.field( - description="HTTPS path to the gain file for this frame", default=None + description="HTTPS path to the gain file for this frame", + default=None, ) s3_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an S3 url" + description="Path to a directory containing data for this entity as an S3 url", ) https_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -434,7 +438,7 @@ def format_frame_aggregate_output(query_results: Sequence[RowMapping] | RowMappi format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_frame_aggregate_row(row)) @@ -453,10 +457,10 @@ def format_frame_aggregate_row(row: RowMapping) -> FrameAggregateFunctions: aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", FrameGroupByOptions()) - group = build_frame_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = FrameGroupByOptions() + group = build_frame_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -487,8 +491,8 @@ async def resolve_frames_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -532,7 +536,13 @@ async def create_frame( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") @@ -589,7 +599,13 @@ async def update_frame( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") diff --git a/apiv2/graphql_api/types/per_section_alignment_parameters.py b/apiv2/graphql_api/types/per_section_alignment_parameters.py index bb9d0aa0c..adbef28d1 100644 --- a/apiv2/graphql_api/types/per_section_alignment_parameters.py +++ b/apiv2/graphql_api/types/per_section_alignment_parameters.py @@ -8,51 +8,46 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.per_section_alignment_parameters import PerSectionAlignmentParametersCreateInputValidator -from validators.per_section_alignment_parameters import PerSectionAlignmentParametersUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.per_section_alignment_parameters import ( PerSectionAlignmentParametersGroupByOptions, build_per_section_alignment_parameters_groupby_output, ) -from platformics.graphql_api.core.relay_interface import EntityInterface -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( + FloatComparators, + IntComparators, aggregator_map, orderBy, - EnumComparators, - DatetimeComparators, - IntComparators, - FloatComparators, - StrComparators, - UUIDComparators, - BoolComparators, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.per_section_alignment_parameters import ( + PerSectionAlignmentParametersCreateInputValidator, + PerSectionAlignmentParametersUpdateInputValidator, +) E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.alignment import AlignmentOrderByClause, AlignmentWhereClause, Alignment + from graphql_api.types.alignment import Alignment, AlignmentOrderByClause, AlignmentWhereClause pass else: @@ -147,13 +142,16 @@ class PerSectionAlignmentParameters(EntityInterface): ) # type:ignore z_index: int = strawberry.field(description="z-index of the frame in the tiltseries") x_offset: Optional[float] = strawberry.field( - description="In-plane X-shift of the projection in angstrom", default=None + description="In-plane X-shift of the projection in angstrom", + default=None, ) y_offset: Optional[float] = strawberry.field( - description="In-plane Y-shift of the projection in angstrom", default=None + description="In-plane Y-shift of the projection in angstrom", + default=None, ) in_plane_rotation: Optional[float] = strawberry.field( - description="In-plane rotation of the projection in degrees", default=None + description="In-plane rotation of the projection in degrees", + default=None, ) beam_tilt: Optional[float] = strawberry.field(description="Beam tilt during projection in degrees", default=None) tilt_angle: Optional[float] = strawberry.field(description="Tilt angle of the projection in degrees", default=None) @@ -232,7 +230,9 @@ class PerSectionAlignmentParametersAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[PerSectionAlignmentParametersCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[PerSectionAlignmentParametersCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -268,13 +268,16 @@ class PerSectionAlignmentParametersCreateInput: alignment_id: strawberry.ID = strawberry.field(description="Tiltseries Alignment") z_index: int = strawberry.field(description="z-index of the frame in the tiltseries") x_offset: Optional[float] = strawberry.field( - description="In-plane X-shift of the projection in angstrom", default=None + description="In-plane X-shift of the projection in angstrom", + default=None, ) y_offset: Optional[float] = strawberry.field( - description="In-plane Y-shift of the projection in angstrom", default=None + description="In-plane Y-shift of the projection in angstrom", + default=None, ) in_plane_rotation: Optional[float] = strawberry.field( - description="In-plane rotation of the projection in degrees", default=None + description="In-plane rotation of the projection in degrees", + default=None, ) beam_tilt: Optional[float] = strawberry.field(description="Beam tilt during projection in degrees", default=None) tilt_angle: Optional[float] = strawberry.field(description="Tilt angle of the projection in degrees", default=None) @@ -286,13 +289,16 @@ class PerSectionAlignmentParametersUpdateInput: alignment_id: Optional[strawberry.ID] = strawberry.field(description="Tiltseries Alignment") z_index: Optional[int] = strawberry.field(description="z-index of the frame in the tiltseries") x_offset: Optional[float] = strawberry.field( - description="In-plane X-shift of the projection in angstrom", default=None + description="In-plane X-shift of the projection in angstrom", + default=None, ) y_offset: Optional[float] = strawberry.field( - description="In-plane Y-shift of the projection in angstrom", default=None + description="In-plane Y-shift of the projection in angstrom", + default=None, ) in_plane_rotation: Optional[float] = strawberry.field( - description="In-plane rotation of the projection in degrees", default=None + description="In-plane rotation of the projection in degrees", + default=None, ) beam_tilt: Optional[float] = strawberry.field(description="Beam tilt during projection in degrees", default=None) tilt_angle: Optional[float] = strawberry.field(description="Tilt angle of the projection in degrees", default=None) @@ -333,7 +339,7 @@ def format_per_section_alignment_parameters_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_per_section_alignment_parameters_aggregate_row(row)) @@ -354,10 +360,10 @@ def format_per_section_alignment_parameters_aggregate_row( aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", PerSectionAlignmentParametersGroupByOptions()) - group = build_per_section_alignment_parameters_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = PerSectionAlignmentParametersGroupByOptions() + group = build_per_section_alignment_parameters_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -388,8 +394,8 @@ async def resolve_per_section_alignment_parameters_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -483,7 +489,13 @@ async def update_per_section_alignment_parameters( # Fetch entities for update, if we have access to them entities = await get_db_rows( - db.PerSectionAlignmentParameters, session, authz_client, principal, where, [], AuthzAction.UPDATE + db.PerSectionAlignmentParameters, + session, + authz_client, + principal, + where, + [], + AuthzAction.UPDATE, ) if len(entities) == 0: raise PlatformicsError("Unauthorized: Cannot update entities") @@ -515,7 +527,13 @@ async def delete_per_section_alignment_parameters( """ # Fetch entities for deletion, if we have access to them entities = await get_db_rows( - db.PerSectionAlignmentParameters, session, authz_client, principal, where, [], AuthzAction.DELETE + db.PerSectionAlignmentParameters, + session, + authz_client, + principal, + where, + [], + AuthzAction.DELETE, ) if len(entities) == 0: raise PlatformicsError("Unauthorized: Cannot delete entities") diff --git a/apiv2/graphql_api/types/per_section_parameters.py b/apiv2/graphql_api/types/per_section_parameters.py index 9a2412c75..2181c5954 100644 --- a/apiv2/graphql_api/types/per_section_parameters.py +++ b/apiv2/graphql_api/types/per_section_parameters.py @@ -8,52 +8,47 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.per_section_parameters import PerSectionParametersCreateInputValidator -from validators.per_section_parameters import PerSectionParametersUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.per_section_parameters import ( PerSectionParametersGroupByOptions, build_per_section_parameters_groupby_output, ) -from platformics.graphql_api.core.relay_interface import EntityInterface -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( + FloatComparators, + IntComparators, aggregator_map, orderBy, - EnumComparators, - DatetimeComparators, - IntComparators, - FloatComparators, - StrComparators, - UUIDComparators, - BoolComparators, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.per_section_parameters import ( + PerSectionParametersCreateInputValidator, + PerSectionParametersUpdateInputValidator, +) E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.frame import FrameOrderByClause, FrameWhereClause, Frame - from graphql_api.types.tiltseries import TiltseriesOrderByClause, TiltseriesWhereClause, Tiltseries + from graphql_api.types.frame import Frame, FrameOrderByClause, FrameWhereClause + from graphql_api.types.tiltseries import Tiltseries, TiltseriesOrderByClause, TiltseriesWhereClause pass else: @@ -237,7 +232,9 @@ class PerSectionParametersAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[PerSectionParametersCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[PerSectionParametersCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -324,7 +321,7 @@ def format_per_section_parameters_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_per_section_parameters_aggregate_row(row)) @@ -343,10 +340,10 @@ def format_per_section_parameters_aggregate_row(row: RowMapping) -> PerSectionPa aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", PerSectionParametersGroupByOptions()) - group = build_per_section_parameters_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = PerSectionParametersGroupByOptions() + group = build_per_section_parameters_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -377,8 +374,8 @@ async def resolve_per_section_parameters_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -409,7 +406,13 @@ async def create_per_section_parameters( # Check that frame relationship is accessible. if validated.frame_id: frame = await get_db_rows( - db.Frame, session, authz_client, principal, {"id": {"_eq": validated.frame_id}}, [], AuthzAction.VIEW + db.Frame, + session, + authz_client, + principal, + {"id": {"_eq": validated.frame_id}}, + [], + AuthzAction.VIEW, ) if not frame: raise PlatformicsError("Unauthorized: frame does not exist") @@ -464,7 +467,13 @@ async def update_per_section_parameters( # Check that frame relationship is accessible. if validated.frame_id: frame = await get_db_rows( - db.Frame, session, authz_client, principal, {"id": {"_eq": validated.frame_id}}, [], AuthzAction.VIEW + db.Frame, + session, + authz_client, + principal, + {"id": {"_eq": validated.frame_id}}, + [], + AuthzAction.VIEW, ) if not frame: raise PlatformicsError("Unauthorized: frame does not exist") @@ -488,7 +497,13 @@ async def update_per_section_parameters( # Fetch entities for update, if we have access to them entities = await get_db_rows( - db.PerSectionParameters, session, authz_client, principal, where, [], AuthzAction.UPDATE + db.PerSectionParameters, + session, + authz_client, + principal, + where, + [], + AuthzAction.UPDATE, ) if len(entities) == 0: raise PlatformicsError("Unauthorized: Cannot update entities") @@ -520,7 +535,13 @@ async def delete_per_section_parameters( """ # Fetch entities for deletion, if we have access to them entities = await get_db_rows( - db.PerSectionParameters, session, authz_client, principal, where, [], AuthzAction.DELETE + db.PerSectionParameters, + session, + authz_client, + principal, + where, + [], + AuthzAction.DELETE, ) if len(entities) == 0: raise PlatformicsError("Unauthorized: Cannot delete entities") diff --git a/apiv2/graphql_api/types/run.py b/apiv2/graphql_api/types/run.py index f4beea2fb..52d462fb2 100644 --- a/apiv2/graphql_api/types/run.py +++ b/apiv2/graphql_api/types/run.py @@ -8,41 +8,34 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.run import RunCreateInputValidator -from validators.run import RunUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.run import RunGroupByOptions, build_run_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from graphql_api.types.alignment import AlignmentAggregate, format_alignment_aggregate_output from graphql_api.types.annotation import AnnotationAggregate, format_annotation_aggregate_output from graphql_api.types.frame import FrameAggregate, format_frame_aggregate_output from graphql_api.types.tiltseries import TiltseriesAggregate, format_tiltseries_aggregate_output +from graphql_api.types.tomogram import TomogramAggregate, format_tomogram_aggregate_output from graphql_api.types.tomogram_voxel_spacing import ( TomogramVoxelSpacingAggregate, format_tomogram_voxel_spacing_aggregate_output, ) -from graphql_api.types.tomogram import TomogramAggregate, format_tomogram_aggregate_output -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -52,23 +45,23 @@ from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.run import RunCreateInputValidator, RunUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.alignment import AlignmentOrderByClause, AlignmentWhereClause, Alignment - from graphql_api.types.annotation import AnnotationOrderByClause, AnnotationWhereClause, Annotation - from graphql_api.types.dataset import DatasetOrderByClause, DatasetWhereClause, Dataset - from graphql_api.types.frame import FrameOrderByClause, FrameWhereClause, Frame - from graphql_api.types.tiltseries import TiltseriesOrderByClause, TiltseriesWhereClause, Tiltseries + from graphql_api.types.alignment import Alignment, AlignmentOrderByClause, AlignmentWhereClause + from graphql_api.types.annotation import Annotation, AnnotationOrderByClause, AnnotationWhereClause + from graphql_api.types.dataset import Dataset, DatasetOrderByClause, DatasetWhereClause + from graphql_api.types.frame import Frame, FrameOrderByClause, FrameWhereClause + from graphql_api.types.tiltseries import Tiltseries, TiltseriesOrderByClause, TiltseriesWhereClause + from graphql_api.types.tomogram import Tomogram, TomogramOrderByClause, TomogramWhereClause from graphql_api.types.tomogram_voxel_spacing import ( + TomogramVoxelSpacing, TomogramVoxelSpacingOrderByClause, TomogramVoxelSpacingWhereClause, - TomogramVoxelSpacing, ) - from graphql_api.types.tomogram import TomogramOrderByClause, TomogramWhereClause, Tomogram pass else: @@ -105,7 +98,7 @@ @relay.connection( - relay.ListConnection[Annotated["Alignment", strawberry.lazy("graphql_api.types.alignment")]] # type:ignore + relay.ListConnection[Annotated["Alignment", strawberry.lazy("graphql_api.types.alignment")]], # type:ignore ) async def load_alignment_rows( root: "Run", @@ -135,7 +128,7 @@ async def load_alignment_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Annotation", strawberry.lazy("graphql_api.types.annotation")]] # type:ignore + relay.ListConnection[Annotated["Annotation", strawberry.lazy("graphql_api.types.annotation")]], # type:ignore ) async def load_annotation_rows( root: "Run", @@ -180,7 +173,7 @@ async def load_dataset_rows( @relay.connection( - relay.ListConnection[Annotated["Frame", strawberry.lazy("graphql_api.types.frame")]] # type:ignore + relay.ListConnection[Annotated["Frame", strawberry.lazy("graphql_api.types.frame")]], # type:ignore ) async def load_frame_rows( root: "Run", @@ -210,7 +203,7 @@ async def load_frame_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Tiltseries", strawberry.lazy("graphql_api.types.tiltseries")]] # type:ignore + relay.ListConnection[Annotated["Tiltseries", strawberry.lazy("graphql_api.types.tiltseries")]], # type:ignore ) async def load_tiltseries_rows( root: "Run", @@ -244,7 +237,7 @@ async def load_tiltseries_aggregate_rows( @relay.connection( relay.ListConnection[ Annotated["TomogramVoxelSpacing", strawberry.lazy("graphql_api.types.tomogram_voxel_spacing")] - ] # type:ignore + ], # type:ignore ) async def load_tomogram_voxel_spacing_rows( root: "Run", @@ -282,7 +275,7 @@ async def load_tomogram_voxel_spacing_aggregate_rows( @relay.connection( - relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]] # type:ignore + relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]], # type:ignore ) async def load_tomogram_rows( root: "Run", @@ -415,7 +408,7 @@ class Run(EntityInterface): name: str = strawberry.field(description="Name of a run") s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -519,7 +512,7 @@ class RunCreateInput: name: str = strawberry.field(description="Name of a run") s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -529,10 +522,10 @@ class RunUpdateInput: dataset_id: Optional[strawberry.ID] = strawberry.field(description="An author of a dataset") name: Optional[str] = strawberry.field(description="Name of a run") s3_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an S3 url" + description="Path to a directory containing data for this entity as an S3 url", ) https_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -569,7 +562,7 @@ def format_run_aggregate_output(query_results: Sequence[RowMapping] | RowMapping format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_run_aggregate_row(row)) @@ -588,10 +581,10 @@ def format_run_aggregate_row(row: RowMapping) -> RunAggregateFunctions: aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", RunGroupByOptions()) - group = build_run_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = RunGroupByOptions() + group = build_run_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -622,8 +615,8 @@ async def resolve_runs_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -654,7 +647,13 @@ async def create_run( # Check that dataset relationship is accessible. if validated.dataset_id: dataset = await get_db_rows( - db.Dataset, session, authz_client, principal, {"id": {"_eq": validated.dataset_id}}, [], AuthzAction.VIEW + db.Dataset, + session, + authz_client, + principal, + {"id": {"_eq": validated.dataset_id}}, + [], + AuthzAction.VIEW, ) if not dataset: raise PlatformicsError("Unauthorized: dataset does not exist") @@ -696,7 +695,13 @@ async def update_run( # Check that dataset relationship is accessible. if validated.dataset_id: dataset = await get_db_rows( - db.Dataset, session, authz_client, principal, {"id": {"_eq": validated.dataset_id}}, [], AuthzAction.VIEW + db.Dataset, + session, + authz_client, + principal, + {"id": {"_eq": validated.dataset_id}}, + [], + AuthzAction.VIEW, ) if not dataset: raise PlatformicsError("Unauthorized: dataset does not exist") diff --git a/apiv2/graphql_api/types/tiltseries.py b/apiv2/graphql_api/types/tiltseries.py index 5e49aa3d3..7deaad8d2 100644 --- a/apiv2/graphql_api/types/tiltseries.py +++ b/apiv2/graphql_api/types/tiltseries.py @@ -8,37 +8,33 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.tiltseries import TiltseriesCreateInputValidator -from validators.tiltseries import TiltseriesUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.tiltseries import TiltseriesGroupByOptions, build_tiltseries_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from graphql_api.types.alignment import AlignmentAggregate, format_alignment_aggregate_output from graphql_api.types.per_section_parameters import ( PerSectionParametersAggregate, format_per_section_parameters_aggregate_output, ) -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, + BoolComparators, EnumComparators, - DatetimeComparators, - IntComparators, FloatComparators, + IntComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -46,23 +42,23 @@ from sqlalchemy.ext.asyncio import AsyncSession from strawberry import relay from strawberry.types import Info +from support.enums import tiltseries_microscope_manufacturer_enum from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum -from support.enums import tiltseries_microscope_manufacturer_enum +from validators.tiltseries import TiltseriesCreateInputValidator, TiltseriesUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.alignment import AlignmentOrderByClause, AlignmentWhereClause, Alignment + from graphql_api.types.alignment import Alignment, AlignmentOrderByClause, AlignmentWhereClause + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause from graphql_api.types.per_section_parameters import ( + PerSectionParameters, PerSectionParametersOrderByClause, PerSectionParametersWhereClause, - PerSectionParameters, ) - from graphql_api.types.run import RunOrderByClause, RunWhereClause, Run - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition + from graphql_api.types.run import Run, RunOrderByClause, RunWhereClause pass else: @@ -90,7 +86,7 @@ @relay.connection( - relay.ListConnection[Annotated["Alignment", strawberry.lazy("graphql_api.types.alignment")]] # type:ignore + relay.ListConnection[Annotated["Alignment", strawberry.lazy("graphql_api.types.alignment")]], # type:ignore ) async def load_alignment_rows( root: "Tiltseries", @@ -122,7 +118,7 @@ async def load_alignment_aggregate_rows( @relay.connection( relay.ListConnection[ Annotated["PerSectionParameters", strawberry.lazy("graphql_api.types.per_section_parameters")] - ] # type:ignore + ], # type:ignore ) async def load_per_section_parameters_rows( root: "Tiltseries", @@ -330,41 +326,51 @@ class Tiltseries(EntityInterface): load_deposition_rows ) # type:ignore s3_omezarr_dir: Optional[str] = strawberry.field( - description="S3 path to this tiltseries in multiscale OME-Zarr format", default=None + description="S3 path to this tiltseries in multiscale OME-Zarr format", + default=None, ) s3_mrc_file: Optional[str] = strawberry.field( - description="S3 path to this tiltseries in MRC format (no scaling)", default=None + description="S3 path to this tiltseries in MRC format (no scaling)", + default=None, ) https_omezarr_dir: Optional[str] = strawberry.field( - description="HTTPS path to this tiltseries in multiscale OME-Zarr format", default=None + description="HTTPS path to this tiltseries in multiscale OME-Zarr format", + default=None, ) https_mrc_file: Optional[str] = strawberry.field( - description="HTTPS path to this tiltseries in MRC format (no scaling)", default=None + description="HTTPS path to this tiltseries in MRC format (no scaling)", + default=None, ) s3_collection_metadata: Optional[str] = strawberry.field( - description="S3 path to the collection metadata file for this tiltseries", default=None + description="S3 path to the collection metadata file for this tiltseries", + default=None, ) https_collection_metadata: Optional[str] = strawberry.field( - description="HTTPS path to the collection metadata file for this tiltseries", default=None + description="HTTPS path to the collection metadata file for this tiltseries", + default=None, ) s3_angle_list: Optional[str] = strawberry.field( - description="S3 path to the angle list file for this tiltseries", default=None + description="S3 path to the angle list file for this tiltseries", + default=None, ) https_angle_list: Optional[str] = strawberry.field( - description="HTTPS path to the angle list file for this tiltseries", default=None + description="HTTPS path to the angle list file for this tiltseries", + default=None, ) s3_gain_file: Optional[str] = strawberry.field( - description="S3 path to the gain file for this tiltseries", default=None + description="S3 path to the gain file for this tiltseries", + default=None, ) https_gain_file: Optional[str] = strawberry.field( - description="HTTPS path to the gain file for this tiltseries", default=None + description="HTTPS path to the gain file for this tiltseries", + default=None, ) acceleration_voltage: float = strawberry.field(description="Electron Microscope Accelerator voltage in volts") spherical_aberration_constant: float = strawberry.field( - description="Spherical Aberration Constant of the objective lens in millimeters" + description="Spherical Aberration Constant of the objective lens in millimeters", ) microscope_manufacturer: tiltseries_microscope_manufacturer_enum = strawberry.field( - description="Name of the microscope manufacturer" + description="Name of the microscope manufacturer", ) microscope_model: str = strawberry.field(description="Microscope model name") microscope_energy_filter: str = strawberry.field(description="Energy filter setup used") @@ -383,25 +389,29 @@ class Tiltseries(EntityInterface): tilting_scheme: str = strawberry.field(description="The order of stage tilting during acquisition of the data") tilt_axis: float = strawberry.field(description="Rotation angle in degrees") total_flux: float = strawberry.field( - description="Number of Electrons reaching the specimen in a square Angstrom area for the entire tilt series" + description="Number of Electrons reaching the specimen in a square Angstrom area for the entire tilt series", ) data_acquisition_software: str = strawberry.field(description="Software used to collect data") related_empiar_entry: Optional[str] = strawberry.field( - description="If a tilt series is deposited into EMPIAR, enter the EMPIAR dataset identifier", default=None + description="If a tilt series is deposited into EMPIAR, enter the EMPIAR dataset identifier", + default=None, ) binning_from_frames: Optional[float] = strawberry.field( - description="Describes the binning factor from frames to tilt series file", default=None + description="Describes the binning factor from frames to tilt series file", + default=None, ) tilt_series_quality: int = strawberry.field( - description="Author assessment of tilt series quality within the dataset (1-5, 5 is best)" + description="Author assessment of tilt series quality within the dataset (1-5, 5 is best)", ) is_aligned: bool = strawberry.field(description="Whether this tilt series is aligned") pixel_spacing: float = strawberry.field(description="Pixel spacing for the tilt series") aligned_tiltseries_binning: Optional[float] = strawberry.field( - description="Binning factor of the aligned tilt series", default=None + description="Binning factor of the aligned tilt series", + default=None, ) tiltseries_frames_count: Optional[int] = strawberry.field( - description="Number of frames associated with this tiltseries", default=None + description="Number of frames associated with this tiltseries", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -544,7 +554,9 @@ class TiltseriesAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[TiltseriesCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[TiltseriesCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -580,41 +592,51 @@ class TiltseriesCreateInput: run_id: strawberry.ID = strawberry.field(description=None) deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) s3_omezarr_dir: Optional[str] = strawberry.field( - description="S3 path to this tiltseries in multiscale OME-Zarr format", default=None + description="S3 path to this tiltseries in multiscale OME-Zarr format", + default=None, ) s3_mrc_file: Optional[str] = strawberry.field( - description="S3 path to this tiltseries in MRC format (no scaling)", default=None + description="S3 path to this tiltseries in MRC format (no scaling)", + default=None, ) https_omezarr_dir: Optional[str] = strawberry.field( - description="HTTPS path to this tiltseries in multiscale OME-Zarr format", default=None + description="HTTPS path to this tiltseries in multiscale OME-Zarr format", + default=None, ) https_mrc_file: Optional[str] = strawberry.field( - description="HTTPS path to this tiltseries in MRC format (no scaling)", default=None + description="HTTPS path to this tiltseries in MRC format (no scaling)", + default=None, ) s3_collection_metadata: Optional[str] = strawberry.field( - description="S3 path to the collection metadata file for this tiltseries", default=None + description="S3 path to the collection metadata file for this tiltseries", + default=None, ) https_collection_metadata: Optional[str] = strawberry.field( - description="HTTPS path to the collection metadata file for this tiltseries", default=None + description="HTTPS path to the collection metadata file for this tiltseries", + default=None, ) s3_angle_list: Optional[str] = strawberry.field( - description="S3 path to the angle list file for this tiltseries", default=None + description="S3 path to the angle list file for this tiltseries", + default=None, ) https_angle_list: Optional[str] = strawberry.field( - description="HTTPS path to the angle list file for this tiltseries", default=None + description="HTTPS path to the angle list file for this tiltseries", + default=None, ) s3_gain_file: Optional[str] = strawberry.field( - description="S3 path to the gain file for this tiltseries", default=None + description="S3 path to the gain file for this tiltseries", + default=None, ) https_gain_file: Optional[str] = strawberry.field( - description="HTTPS path to the gain file for this tiltseries", default=None + description="HTTPS path to the gain file for this tiltseries", + default=None, ) acceleration_voltage: float = strawberry.field(description="Electron Microscope Accelerator voltage in volts") spherical_aberration_constant: float = strawberry.field( - description="Spherical Aberration Constant of the objective lens in millimeters" + description="Spherical Aberration Constant of the objective lens in millimeters", ) microscope_manufacturer: tiltseries_microscope_manufacturer_enum = strawberry.field( - description="Name of the microscope manufacturer" + description="Name of the microscope manufacturer", ) microscope_model: str = strawberry.field(description="Microscope model name") microscope_energy_filter: str = strawberry.field(description="Energy filter setup used") @@ -633,25 +655,29 @@ class TiltseriesCreateInput: tilting_scheme: str = strawberry.field(description="The order of stage tilting during acquisition of the data") tilt_axis: float = strawberry.field(description="Rotation angle in degrees") total_flux: float = strawberry.field( - description="Number of Electrons reaching the specimen in a square Angstrom area for the entire tilt series" + description="Number of Electrons reaching the specimen in a square Angstrom area for the entire tilt series", ) data_acquisition_software: str = strawberry.field(description="Software used to collect data") related_empiar_entry: Optional[str] = strawberry.field( - description="If a tilt series is deposited into EMPIAR, enter the EMPIAR dataset identifier", default=None + description="If a tilt series is deposited into EMPIAR, enter the EMPIAR dataset identifier", + default=None, ) binning_from_frames: Optional[float] = strawberry.field( - description="Describes the binning factor from frames to tilt series file", default=None + description="Describes the binning factor from frames to tilt series file", + default=None, ) tilt_series_quality: int = strawberry.field( - description="Author assessment of tilt series quality within the dataset (1-5, 5 is best)" + description="Author assessment of tilt series quality within the dataset (1-5, 5 is best)", ) is_aligned: bool = strawberry.field(description="Whether this tilt series is aligned") pixel_spacing: float = strawberry.field(description="Pixel spacing for the tilt series") aligned_tiltseries_binning: Optional[float] = strawberry.field( - description="Binning factor of the aligned tilt series", default=None + description="Binning factor of the aligned tilt series", + default=None, ) tiltseries_frames_count: Optional[int] = strawberry.field( - description="Number of frames associated with this tiltseries", default=None + description="Number of frames associated with this tiltseries", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -661,43 +687,53 @@ class TiltseriesUpdateInput: run_id: Optional[strawberry.ID] = strawberry.field(description=None) deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) s3_omezarr_dir: Optional[str] = strawberry.field( - description="S3 path to this tiltseries in multiscale OME-Zarr format", default=None + description="S3 path to this tiltseries in multiscale OME-Zarr format", + default=None, ) s3_mrc_file: Optional[str] = strawberry.field( - description="S3 path to this tiltseries in MRC format (no scaling)", default=None + description="S3 path to this tiltseries in MRC format (no scaling)", + default=None, ) https_omezarr_dir: Optional[str] = strawberry.field( - description="HTTPS path to this tiltseries in multiscale OME-Zarr format", default=None + description="HTTPS path to this tiltseries in multiscale OME-Zarr format", + default=None, ) https_mrc_file: Optional[str] = strawberry.field( - description="HTTPS path to this tiltseries in MRC format (no scaling)", default=None + description="HTTPS path to this tiltseries in MRC format (no scaling)", + default=None, ) s3_collection_metadata: Optional[str] = strawberry.field( - description="S3 path to the collection metadata file for this tiltseries", default=None + description="S3 path to the collection metadata file for this tiltseries", + default=None, ) https_collection_metadata: Optional[str] = strawberry.field( - description="HTTPS path to the collection metadata file for this tiltseries", default=None + description="HTTPS path to the collection metadata file for this tiltseries", + default=None, ) s3_angle_list: Optional[str] = strawberry.field( - description="S3 path to the angle list file for this tiltseries", default=None + description="S3 path to the angle list file for this tiltseries", + default=None, ) https_angle_list: Optional[str] = strawberry.field( - description="HTTPS path to the angle list file for this tiltseries", default=None + description="HTTPS path to the angle list file for this tiltseries", + default=None, ) s3_gain_file: Optional[str] = strawberry.field( - description="S3 path to the gain file for this tiltseries", default=None + description="S3 path to the gain file for this tiltseries", + default=None, ) https_gain_file: Optional[str] = strawberry.field( - description="HTTPS path to the gain file for this tiltseries", default=None + description="HTTPS path to the gain file for this tiltseries", + default=None, ) acceleration_voltage: Optional[float] = strawberry.field( - description="Electron Microscope Accelerator voltage in volts" + description="Electron Microscope Accelerator voltage in volts", ) spherical_aberration_constant: Optional[float] = strawberry.field( - description="Spherical Aberration Constant of the objective lens in millimeters" + description="Spherical Aberration Constant of the objective lens in millimeters", ) microscope_manufacturer: Optional[tiltseries_microscope_manufacturer_enum] = strawberry.field( - description="Name of the microscope manufacturer" + description="Name of the microscope manufacturer", ) microscope_model: Optional[str] = strawberry.field(description="Microscope model name") microscope_energy_filter: Optional[str] = strawberry.field(description="Energy filter setup used") @@ -714,29 +750,33 @@ class TiltseriesUpdateInput: tilt_range: Optional[float] = strawberry.field(description="Total tilt range from min to max in degrees") tilt_step: Optional[float] = strawberry.field(description="Tilt step in degrees") tilting_scheme: Optional[str] = strawberry.field( - description="The order of stage tilting during acquisition of the data" + description="The order of stage tilting during acquisition of the data", ) tilt_axis: Optional[float] = strawberry.field(description="Rotation angle in degrees") total_flux: Optional[float] = strawberry.field( - description="Number of Electrons reaching the specimen in a square Angstrom area for the entire tilt series" + description="Number of Electrons reaching the specimen in a square Angstrom area for the entire tilt series", ) data_acquisition_software: Optional[str] = strawberry.field(description="Software used to collect data") related_empiar_entry: Optional[str] = strawberry.field( - description="If a tilt series is deposited into EMPIAR, enter the EMPIAR dataset identifier", default=None + description="If a tilt series is deposited into EMPIAR, enter the EMPIAR dataset identifier", + default=None, ) binning_from_frames: Optional[float] = strawberry.field( - description="Describes the binning factor from frames to tilt series file", default=None + description="Describes the binning factor from frames to tilt series file", + default=None, ) tilt_series_quality: Optional[int] = strawberry.field( - description="Author assessment of tilt series quality within the dataset (1-5, 5 is best)" + description="Author assessment of tilt series quality within the dataset (1-5, 5 is best)", ) is_aligned: Optional[bool] = strawberry.field(description="Whether this tilt series is aligned") pixel_spacing: Optional[float] = strawberry.field(description="Pixel spacing for the tilt series") aligned_tiltseries_binning: Optional[float] = strawberry.field( - description="Binning factor of the aligned tilt series", default=None + description="Binning factor of the aligned tilt series", + default=None, ) tiltseries_frames_count: Optional[int] = strawberry.field( - description="Number of frames associated with this tiltseries", default=None + description="Number of frames associated with this tiltseries", + default=None, ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -773,7 +813,7 @@ def format_tiltseries_aggregate_output(query_results: Sequence[RowMapping] | Row format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_tiltseries_aggregate_row(row)) @@ -792,10 +832,10 @@ def format_tiltseries_aggregate_row(row: RowMapping) -> TiltseriesAggregateFunct aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", TiltseriesGroupByOptions()) - group = build_tiltseries_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = TiltseriesGroupByOptions() + group = build_tiltseries_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -826,8 +866,8 @@ async def resolve_tiltseries_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -858,7 +898,13 @@ async def create_tiltseries( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") @@ -913,7 +959,13 @@ async def update_tiltseries( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") diff --git a/apiv2/graphql_api/types/tomogram.py b/apiv2/graphql_api/types/tomogram.py index c4e156b55..d8aa8fc1f 100644 --- a/apiv2/graphql_api/types/tomogram.py +++ b/apiv2/graphql_api/types/tomogram.py @@ -8,33 +8,29 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.tomogram import TomogramCreateInputValidator -from validators.tomogram import TomogramUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.tomogram import TomogramGroupByOptions, build_tomogram_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from graphql_api.types.tomogram_author import TomogramAuthorAggregate, format_tomogram_author_aggregate_output -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, + BoolComparators, EnumComparators, - DatetimeComparators, - IntComparators, FloatComparators, + IntComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -42,28 +38,28 @@ from sqlalchemy.ext.asyncio import AsyncSession from strawberry import relay from strawberry.types import Info -from support.limit_offset import LimitOffsetClause -from typing_extensions import TypedDict -import enum from support.enums import ( fiducial_alignment_status_enum, - tomogram_reconstruction_method_enum, tomogram_processing_enum, + tomogram_reconstruction_method_enum, tomogram_type_enum, ) +from support.limit_offset import LimitOffsetClause +from typing_extensions import TypedDict +from validators.tomogram import TomogramCreateInputValidator, TomogramUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.alignment import AlignmentOrderByClause, AlignmentWhereClause, Alignment - from graphql_api.types.tomogram_author import TomogramAuthorOrderByClause, TomogramAuthorWhereClause, TomogramAuthor - from graphql_api.types.deposition import DepositionOrderByClause, DepositionWhereClause, Deposition - from graphql_api.types.run import RunOrderByClause, RunWhereClause, Run + from graphql_api.types.alignment import Alignment, AlignmentOrderByClause, AlignmentWhereClause + from graphql_api.types.deposition import Deposition, DepositionOrderByClause, DepositionWhereClause + from graphql_api.types.run import Run, RunOrderByClause, RunWhereClause + from graphql_api.types.tomogram_author import TomogramAuthor, TomogramAuthorOrderByClause, TomogramAuthorWhereClause from graphql_api.types.tomogram_voxel_spacing import ( + TomogramVoxelSpacing, TomogramVoxelSpacingOrderByClause, TomogramVoxelSpacingWhereClause, - TomogramVoxelSpacing, ) pass @@ -110,7 +106,7 @@ async def load_alignment_rows( @relay.connection( relay.ListConnection[ Annotated["TomogramAuthor", strawberry.lazy("graphql_api.types.tomogram_author")] - ] # type:ignore + ], # type:ignore ) async def load_tomogram_author_rows( root: "Tomogram", @@ -186,7 +182,7 @@ async def load_tomogram_voxel_spacing_rows( mapper = inspect(db.Tomogram) relationship = mapper.relationships["tomogram_voxel_spacing"] return await dataloader.loader_for(relationship, where, order_by).load( - root.tomogram_voxel_spacing_id + root.tomogram_voxel_spacing_id, ) # type:ignore @@ -336,60 +332,72 @@ class Tomogram(EntityInterface): size_z: float = strawberry.field(description="Tomogram voxels in the z dimension") voxel_spacing: float = strawberry.field(description="Voxel spacing equal in all three axes in angstroms") fiducial_alignment_status: fiducial_alignment_status_enum = strawberry.field( - description="Whether the tomographic alignment was computed based on fiducial markers." + description="Whether the tomographic alignment was computed based on fiducial markers.", ) reconstruction_method: tomogram_reconstruction_method_enum = strawberry.field( - description="Describe reconstruction method (WBP, SART, SIRT)" + description="Describe reconstruction method (WBP, SART, SIRT)", ) processing: tomogram_processing_enum = strawberry.field( - description="Describe additional processing used to derive the tomogram" + description="Describe additional processing used to derive the tomogram", ) tomogram_version: Optional[float] = strawberry.field(description="Version of tomogram", default=None) processing_software: Optional[str] = strawberry.field( - description="Processing software used to derive the tomogram", default=None + description="Processing software used to derive the tomogram", + default=None, ) reconstruction_software: str = strawberry.field(description="Name of software used for reconstruction") is_canonical: Optional[bool] = strawberry.field( - description="whether this tomogram is canonical for the run", default=None + description="whether this tomogram is canonical for the run", + default=None, ) s3_omezarr_dir: Optional[str] = strawberry.field( - description="S3 path to this tomogram in multiscale OME-Zarr format", default=None + description="S3 path to this tomogram in multiscale OME-Zarr format", + default=None, ) https_omezarr_dir: Optional[str] = strawberry.field( - description="HTTPS path to this tomogram in multiscale OME-Zarr format", default=None + description="HTTPS path to this tomogram in multiscale OME-Zarr format", + default=None, ) s3_mrc_file: Optional[str] = strawberry.field( - description="S3 path to this tomogram in MRC format (no scaling)", default=None + description="S3 path to this tomogram in MRC format (no scaling)", + default=None, ) https_mrc_file: Optional[str] = strawberry.field( - description="HTTPS path to this tomogram in MRC format (no scaling)", default=None + description="HTTPS path to this tomogram in MRC format (no scaling)", + default=None, ) scale0_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the unscaled tomogram", default=None + description="comma separated x,y,z dimensions of the unscaled tomogram", + default=None, ) scale1_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the scale1 tomogram", default=None + description="comma separated x,y,z dimensions of the scale1 tomogram", + default=None, ) scale2_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the scale2 tomogram", default=None + description="comma separated x,y,z dimensions of the scale2 tomogram", + default=None, ) ctf_corrected: Optional[bool] = strawberry.field(description="Whether this tomogram is CTF corrected", default=None) offset_x: int = strawberry.field(description="x offset data relative to the canonical tomogram in pixels") offset_y: int = strawberry.field(description="y offset data relative to the canonical tomogram in pixels") offset_z: int = strawberry.field(description="z offset data relative to the canonical tomogram in pixels") affine_transformation_matrix: Optional[str] = strawberry.field( - description="A placeholder for any type of data.", default=None + description="A placeholder for any type of data.", + default=None, ) key_photo_url: Optional[str] = strawberry.field(description="URL for the key photo", default=None) key_photo_thumbnail_url: Optional[str] = strawberry.field( - description="URL for the thumbnail of key photo", default=None + description="URL for the thumbnail of key photo", + default=None, ) neuroglancer_config: Optional[str] = strawberry.field( - description="the compact json of neuroglancer config", default=None + description="the compact json of neuroglancer config", + default=None, ) tomogram_type: Optional[tomogram_type_enum] = strawberry.field(description=None, default=None) is_standardized: bool = strawberry.field( - description="Whether this tomogram was generated per the portal's standards" + description="Whether this tomogram was generated per the portal's standards", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -546,7 +554,8 @@ class TomogramCreateInput: deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) run_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) tomogram_voxel_spacing_id: Optional[strawberry.ID] = strawberry.field( - description="Voxel spacings for a run", default=None + description="Voxel spacings for a run", + default=None, ) name: Optional[str] = strawberry.field(description="Short name for this tomogram", default=None) size_x: float = strawberry.field(description="Tomogram voxels in the x dimension") @@ -554,60 +563,72 @@ class TomogramCreateInput: size_z: float = strawberry.field(description="Tomogram voxels in the z dimension") voxel_spacing: float = strawberry.field(description="Voxel spacing equal in all three axes in angstroms") fiducial_alignment_status: fiducial_alignment_status_enum = strawberry.field( - description="Whether the tomographic alignment was computed based on fiducial markers." + description="Whether the tomographic alignment was computed based on fiducial markers.", ) reconstruction_method: tomogram_reconstruction_method_enum = strawberry.field( - description="Describe reconstruction method (WBP, SART, SIRT)" + description="Describe reconstruction method (WBP, SART, SIRT)", ) processing: tomogram_processing_enum = strawberry.field( - description="Describe additional processing used to derive the tomogram" + description="Describe additional processing used to derive the tomogram", ) tomogram_version: Optional[float] = strawberry.field(description="Version of tomogram", default=None) processing_software: Optional[str] = strawberry.field( - description="Processing software used to derive the tomogram", default=None + description="Processing software used to derive the tomogram", + default=None, ) reconstruction_software: str = strawberry.field(description="Name of software used for reconstruction") is_canonical: Optional[bool] = strawberry.field( - description="whether this tomogram is canonical for the run", default=None + description="whether this tomogram is canonical for the run", + default=None, ) s3_omezarr_dir: Optional[str] = strawberry.field( - description="S3 path to this tomogram in multiscale OME-Zarr format", default=None + description="S3 path to this tomogram in multiscale OME-Zarr format", + default=None, ) https_omezarr_dir: Optional[str] = strawberry.field( - description="HTTPS path to this tomogram in multiscale OME-Zarr format", default=None + description="HTTPS path to this tomogram in multiscale OME-Zarr format", + default=None, ) s3_mrc_file: Optional[str] = strawberry.field( - description="S3 path to this tomogram in MRC format (no scaling)", default=None + description="S3 path to this tomogram in MRC format (no scaling)", + default=None, ) https_mrc_file: Optional[str] = strawberry.field( - description="HTTPS path to this tomogram in MRC format (no scaling)", default=None + description="HTTPS path to this tomogram in MRC format (no scaling)", + default=None, ) scale0_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the unscaled tomogram", default=None + description="comma separated x,y,z dimensions of the unscaled tomogram", + default=None, ) scale1_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the scale1 tomogram", default=None + description="comma separated x,y,z dimensions of the scale1 tomogram", + default=None, ) scale2_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the scale2 tomogram", default=None + description="comma separated x,y,z dimensions of the scale2 tomogram", + default=None, ) ctf_corrected: Optional[bool] = strawberry.field(description="Whether this tomogram is CTF corrected", default=None) offset_x: int = strawberry.field(description="x offset data relative to the canonical tomogram in pixels") offset_y: int = strawberry.field(description="y offset data relative to the canonical tomogram in pixels") offset_z: int = strawberry.field(description="z offset data relative to the canonical tomogram in pixels") affine_transformation_matrix: Optional[str] = strawberry.field( - description="A placeholder for any type of data.", default=None + description="A placeholder for any type of data.", + default=None, ) key_photo_url: Optional[str] = strawberry.field(description="URL for the key photo", default=None) key_photo_thumbnail_url: Optional[str] = strawberry.field( - description="URL for the thumbnail of key photo", default=None + description="URL for the thumbnail of key photo", + default=None, ) neuroglancer_config: Optional[str] = strawberry.field( - description="the compact json of neuroglancer config", default=None + description="the compact json of neuroglancer config", + default=None, ) tomogram_type: Optional[tomogram_type_enum] = strawberry.field(description=None, default=None) is_standardized: bool = strawberry.field( - description="Whether this tomogram was generated per the portal's standards" + description="Whether this tomogram was generated per the portal's standards", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -618,7 +639,8 @@ class TomogramUpdateInput: deposition_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) run_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) tomogram_voxel_spacing_id: Optional[strawberry.ID] = strawberry.field( - description="Voxel spacings for a run", default=None + description="Voxel spacings for a run", + default=None, ) name: Optional[str] = strawberry.field(description="Short name for this tomogram", default=None) size_x: Optional[float] = strawberry.field(description="Tomogram voxels in the x dimension") @@ -626,60 +648,72 @@ class TomogramUpdateInput: size_z: Optional[float] = strawberry.field(description="Tomogram voxels in the z dimension") voxel_spacing: Optional[float] = strawberry.field(description="Voxel spacing equal in all three axes in angstroms") fiducial_alignment_status: Optional[fiducial_alignment_status_enum] = strawberry.field( - description="Whether the tomographic alignment was computed based on fiducial markers." + description="Whether the tomographic alignment was computed based on fiducial markers.", ) reconstruction_method: Optional[tomogram_reconstruction_method_enum] = strawberry.field( - description="Describe reconstruction method (WBP, SART, SIRT)" + description="Describe reconstruction method (WBP, SART, SIRT)", ) processing: Optional[tomogram_processing_enum] = strawberry.field( - description="Describe additional processing used to derive the tomogram" + description="Describe additional processing used to derive the tomogram", ) tomogram_version: Optional[float] = strawberry.field(description="Version of tomogram", default=None) processing_software: Optional[str] = strawberry.field( - description="Processing software used to derive the tomogram", default=None + description="Processing software used to derive the tomogram", + default=None, ) reconstruction_software: Optional[str] = strawberry.field(description="Name of software used for reconstruction") is_canonical: Optional[bool] = strawberry.field( - description="whether this tomogram is canonical for the run", default=None + description="whether this tomogram is canonical for the run", + default=None, ) s3_omezarr_dir: Optional[str] = strawberry.field( - description="S3 path to this tomogram in multiscale OME-Zarr format", default=None + description="S3 path to this tomogram in multiscale OME-Zarr format", + default=None, ) https_omezarr_dir: Optional[str] = strawberry.field( - description="HTTPS path to this tomogram in multiscale OME-Zarr format", default=None + description="HTTPS path to this tomogram in multiscale OME-Zarr format", + default=None, ) s3_mrc_file: Optional[str] = strawberry.field( - description="S3 path to this tomogram in MRC format (no scaling)", default=None + description="S3 path to this tomogram in MRC format (no scaling)", + default=None, ) https_mrc_file: Optional[str] = strawberry.field( - description="HTTPS path to this tomogram in MRC format (no scaling)", default=None + description="HTTPS path to this tomogram in MRC format (no scaling)", + default=None, ) scale0_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the unscaled tomogram", default=None + description="comma separated x,y,z dimensions of the unscaled tomogram", + default=None, ) scale1_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the scale1 tomogram", default=None + description="comma separated x,y,z dimensions of the scale1 tomogram", + default=None, ) scale2_dimensions: Optional[str] = strawberry.field( - description="comma separated x,y,z dimensions of the scale2 tomogram", default=None + description="comma separated x,y,z dimensions of the scale2 tomogram", + default=None, ) ctf_corrected: Optional[bool] = strawberry.field(description="Whether this tomogram is CTF corrected", default=None) offset_x: Optional[int] = strawberry.field(description="x offset data relative to the canonical tomogram in pixels") offset_y: Optional[int] = strawberry.field(description="y offset data relative to the canonical tomogram in pixels") offset_z: Optional[int] = strawberry.field(description="z offset data relative to the canonical tomogram in pixels") affine_transformation_matrix: Optional[str] = strawberry.field( - description="A placeholder for any type of data.", default=None + description="A placeholder for any type of data.", + default=None, ) key_photo_url: Optional[str] = strawberry.field(description="URL for the key photo", default=None) key_photo_thumbnail_url: Optional[str] = strawberry.field( - description="URL for the thumbnail of key photo", default=None + description="URL for the thumbnail of key photo", + default=None, ) neuroglancer_config: Optional[str] = strawberry.field( - description="the compact json of neuroglancer config", default=None + description="the compact json of neuroglancer config", + default=None, ) tomogram_type: Optional[tomogram_type_enum] = strawberry.field(description=None, default=None) is_standardized: Optional[bool] = strawberry.field( - description="Whether this tomogram was generated per the portal's standards" + description="Whether this tomogram was generated per the portal's standards", ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -716,7 +750,7 @@ def format_tomogram_aggregate_output(query_results: Sequence[RowMapping] | RowMa format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_tomogram_aggregate_row(row)) @@ -735,10 +769,10 @@ def format_tomogram_aggregate_row(row: RowMapping) -> TomogramAggregateFunctions aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", TomogramGroupByOptions()) - group = build_tomogram_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = TomogramGroupByOptions() + group = build_tomogram_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -769,8 +803,8 @@ async def resolve_tomograms_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -827,7 +861,13 @@ async def create_tomogram( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") @@ -912,7 +952,13 @@ async def update_tomogram( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") diff --git a/apiv2/graphql_api/types/tomogram_author.py b/apiv2/graphql_api/types/tomogram_author.py index 5551482b4..a353b422d 100644 --- a/apiv2/graphql_api/types/tomogram_author.py +++ b/apiv2/graphql_api/types/tomogram_author.py @@ -8,48 +8,41 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.tomogram_author import TomogramAuthorCreateInputValidator -from validators.tomogram_author import TomogramAuthorUpdateInputValidator -from graphql_api.helpers.tomogram_author import TomogramAuthorGroupByOptions, build_tomogram_author_groupby_output -from platformics.graphql_api.core.relay_interface import EntityInterface from fastapi import Depends +from graphql_api.helpers.tomogram_author import TomogramAuthorGroupByOptions, build_tomogram_author_groupby_output +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, + BoolComparators, IntComparators, - FloatComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect from sqlalchemy.engine.row import RowMapping from sqlalchemy.ext.asyncio import AsyncSession -from strawberry import relay from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.tomogram_author import TomogramAuthorCreateInputValidator, TomogramAuthorUpdateInputValidator E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.tomogram import TomogramOrderByClause, TomogramWhereClause, Tomogram + from graphql_api.types.tomogram import Tomogram, TomogramOrderByClause, TomogramWhereClause pass else: @@ -149,25 +142,30 @@ class TomogramAuthor(EntityInterface): load_tomogram_rows ) # type:ignore author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -243,7 +241,9 @@ class TomogramAuthorAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[TomogramAuthorCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[TomogramAuthorCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -278,25 +278,30 @@ class TomogramAuthorAggregate: class TomogramAuthorCreateInput: tomogram_id: Optional[strawberry.ID] = strawberry.field(description="Metadata describing a tomogram.", default=None) author_list_order: int = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: str = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -305,25 +310,30 @@ class TomogramAuthorCreateInput: class TomogramAuthorUpdateInput: tomogram_id: Optional[strawberry.ID] = strawberry.field(description="Metadata describing a tomogram.", default=None) author_list_order: Optional[int] = strawberry.field( - description="The order that the author is listed as in the associated publication" + description="The order that the author is listed as in the associated publication", ) orcid: Optional[str] = strawberry.field(description="The ORCID identifier for the author.", default=None) name: Optional[str] = strawberry.field(description="The full name of the author.") email: Optional[str] = strawberry.field(description="The email address of the author.", default=None) affiliation_name: Optional[str] = strawberry.field( - description="The name of the author's affiliation.", default=None + description="The name of the author's affiliation.", + default=None, ) affiliation_address: Optional[str] = strawberry.field( - description="The address of the author's affiliation.", default=None + description="The address of the author's affiliation.", + default=None, ) affiliation_identifier: Optional[str] = strawberry.field( - description="A Research Organization Registry (ROR) identifier.", default=None + description="A Research Organization Registry (ROR) identifier.", + default=None, ) corresponding_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a corresponding author.", default=None + description="Whether the author is a corresponding author.", + default=None, ) primary_author_status: Optional[bool] = strawberry.field( - description="Whether the author is a primary author.", default=None + description="Whether the author is a primary author.", + default=None, ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -362,7 +372,7 @@ def format_tomogram_author_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_tomogram_author_aggregate_row(row)) @@ -381,10 +391,10 @@ def format_tomogram_author_aggregate_row(row: RowMapping) -> TomogramAuthorAggre aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", TomogramAuthorGroupByOptions()) - group = build_tomogram_author_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = TomogramAuthorGroupByOptions() + group = build_tomogram_author_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -415,8 +425,8 @@ async def resolve_tomogram_authors_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -447,7 +457,13 @@ async def create_tomogram_author( # Check that tomogram relationship is accessible. if validated.tomogram_id: tomogram = await get_db_rows( - db.Tomogram, session, authz_client, principal, {"id": {"_eq": validated.tomogram_id}}, [], AuthzAction.VIEW + db.Tomogram, + session, + authz_client, + principal, + {"id": {"_eq": validated.tomogram_id}}, + [], + AuthzAction.VIEW, ) if not tomogram: raise PlatformicsError("Unauthorized: tomogram does not exist") @@ -489,7 +505,13 @@ async def update_tomogram_author( # Check that tomogram relationship is accessible. if validated.tomogram_id: tomogram = await get_db_rows( - db.Tomogram, session, authz_client, principal, {"id": {"_eq": validated.tomogram_id}}, [], AuthzAction.VIEW + db.Tomogram, + session, + authz_client, + principal, + {"id": {"_eq": validated.tomogram_id}}, + [], + AuthzAction.VIEW, ) if not tomogram: raise PlatformicsError("Unauthorized: tomogram does not exist") diff --git a/apiv2/graphql_api/types/tomogram_voxel_spacing.py b/apiv2/graphql_api/types/tomogram_voxel_spacing.py index c611acd88..83b6550ed 100644 --- a/apiv2/graphql_api/types/tomogram_voxel_spacing.py +++ b/apiv2/graphql_api/types/tomogram_voxel_spacing.py @@ -8,37 +8,31 @@ # ruff: noqa: E501 Line too long +import datetime +import enum import typing -from typing import TYPE_CHECKING, Annotated, Any, Optional, Sequence, Callable, List +from typing import TYPE_CHECKING, Annotated, Optional, Sequence -import platformics.database.models as base_db import database.models as db import strawberry -import datetime -from platformics.graphql_api.core.query_builder import get_db_rows, get_aggregate_db_rows -from validators.tomogram_voxel_spacing import TomogramVoxelSpacingCreateInputValidator -from validators.tomogram_voxel_spacing import TomogramVoxelSpacingUpdateInputValidator +from fastapi import Depends from graphql_api.helpers.tomogram_voxel_spacing import ( TomogramVoxelSpacingGroupByOptions, build_tomogram_voxel_spacing_groupby_output, ) -from platformics.graphql_api.core.relay_interface import EntityInterface from graphql_api.types.annotation_file import AnnotationFileAggregate, format_annotation_file_aggregate_output from graphql_api.types.tomogram import TomogramAggregate, format_tomogram_aggregate_output -from fastapi import Depends +from platformics.graphql_api.core.deps import get_authz_client, get_db_session, is_system_user, require_auth_principal from platformics.graphql_api.core.errors import PlatformicsError -from platformics.graphql_api.core.deps import get_authz_client, get_db_session, require_auth_principal, is_system_user +from platformics.graphql_api.core.query_builder import get_aggregate_db_rows, get_db_rows from platformics.graphql_api.core.query_input_types import ( - aggregator_map, - orderBy, - EnumComparators, - DatetimeComparators, - IntComparators, FloatComparators, + IntComparators, StrComparators, - UUIDComparators, - BoolComparators, + aggregator_map, + orderBy, ) +from platformics.graphql_api.core.relay_interface import EntityInterface from platformics.graphql_api.core.strawberry_extensions import DependencyExtension from platformics.security.authorization import AuthzAction, AuthzClient, Principal from sqlalchemy import inspect @@ -48,15 +42,18 @@ from strawberry.types import Info from support.limit_offset import LimitOffsetClause from typing_extensions import TypedDict -import enum +from validators.tomogram_voxel_spacing import ( + TomogramVoxelSpacingCreateInputValidator, + TomogramVoxelSpacingUpdateInputValidator, +) E = typing.TypeVar("E") T = typing.TypeVar("T") if TYPE_CHECKING: - from graphql_api.types.annotation_file import AnnotationFileOrderByClause, AnnotationFileWhereClause, AnnotationFile - from graphql_api.types.run import RunOrderByClause, RunWhereClause, Run - from graphql_api.types.tomogram import TomogramOrderByClause, TomogramWhereClause, Tomogram + from graphql_api.types.annotation_file import AnnotationFile, AnnotationFileOrderByClause, AnnotationFileWhereClause + from graphql_api.types.run import Run, RunOrderByClause, RunWhereClause + from graphql_api.types.tomogram import Tomogram, TomogramOrderByClause, TomogramWhereClause pass else: @@ -83,7 +80,7 @@ @relay.connection( relay.ListConnection[ Annotated["AnnotationFile", strawberry.lazy("graphql_api.types.annotation_file")] - ] # type:ignore + ], # type:ignore ) async def load_annotation_file_rows( root: "TomogramVoxelSpacing", @@ -128,7 +125,7 @@ async def load_run_rows( @relay.connection( - relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]] # type:ignore + relay.ListConnection[Annotated["Tomogram", strawberry.lazy("graphql_api.types.tomogram")]], # type:ignore ) async def load_tomogram_rows( root: "TomogramVoxelSpacing", @@ -230,7 +227,7 @@ class TomogramVoxelSpacing(EntityInterface): voxel_spacing: float = strawberry.field(description="Voxel spacing equal in all three axes in angstroms") s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -298,7 +295,9 @@ class TomogramVoxelSpacingAggregateFunctions: # This is a hack to accept "distinct" and "columns" as arguments to "count" @strawberry.field def count( - self, distinct: Optional[bool] = False, columns: Optional[TomogramVoxelSpacingCountColumns] = None + self, + distinct: Optional[bool] = False, + columns: Optional[TomogramVoxelSpacingCountColumns] = None, ) -> Optional[int]: # Count gets set with the proper value in the resolver, so we just return it here return self.count # type: ignore @@ -335,7 +334,7 @@ class TomogramVoxelSpacingCreateInput: voxel_spacing: float = strawberry.field(description="Voxel spacing equal in all three axes in angstroms") s3_prefix: str = strawberry.field(description="Path to a directory containing data for this entity as an S3 url") https_prefix: str = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: int = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -345,10 +344,10 @@ class TomogramVoxelSpacingUpdateInput: run_id: Optional[strawberry.ID] = strawberry.field(description=None, default=None) voxel_spacing: Optional[float] = strawberry.field(description="Voxel spacing equal in all three axes in angstroms") s3_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an S3 url" + description="Path to a directory containing data for this entity as an S3 url", ) https_prefix: Optional[str] = strawberry.field( - description="Path to a directory containing data for this entity as an HTTPS url" + description="Path to a directory containing data for this entity as an HTTPS url", ) id: Optional[int] = strawberry.field(description="An identifier to refer to a specific instance of this type") @@ -387,7 +386,7 @@ def format_tomogram_voxel_spacing_aggregate_output( format the results using the proper GraphQL types. """ aggregate = [] - if not type(query_results) is list: + if type(query_results) is not list: query_results = [query_results] # type: ignore for row in query_results: aggregate.append(format_tomogram_voxel_spacing_aggregate_row(row)) @@ -406,10 +405,10 @@ def format_tomogram_voxel_spacing_aggregate_row(row: RowMapping) -> TomogramVoxe aggregate = key.split("_", 1) if aggregate[0] not in aggregator_map.keys(): # Turn list of groupby keys into nested objects - if not getattr(output, "groupBy"): - setattr(output, "groupBy", TomogramVoxelSpacingGroupByOptions()) - group = build_tomogram_voxel_spacing_groupby_output(getattr(output, "groupBy"), group_keys, value) - setattr(output, "groupBy", group) + if not output.groupBy: + output.groupBy = TomogramVoxelSpacingGroupByOptions() + group = build_tomogram_voxel_spacing_groupby_output(output.groupBy, group_keys, value) + output.groupBy = group else: aggregate_name = aggregate[0] if aggregate_name == "count": @@ -440,8 +439,8 @@ async def resolve_tomogram_voxel_spacings_aggregate( # Get the selected aggregate functions and columns to operate on, and groupby options if any were provided. # TODO: not sure why selected_fields is a list selections = info.selected_fields[0].selections[0].selections - aggregate_selections = [selection for selection in selections if getattr(selection, "name") != "groupBy"] - groupby_selections = [selection for selection in selections if getattr(selection, "name") == "groupBy"] + aggregate_selections = [selection for selection in selections if selection.name != "groupBy"] + groupby_selections = [selection for selection in selections if selection.name == "groupBy"] groupby_selections = groupby_selections[0].selections if groupby_selections else [] if not aggregate_selections: @@ -472,7 +471,13 @@ async def create_tomogram_voxel_spacing( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") @@ -514,7 +519,13 @@ async def update_tomogram_voxel_spacing( # Check that run relationship is accessible. if validated.run_id: run = await get_db_rows( - db.Run, session, authz_client, principal, {"id": {"_eq": validated.run_id}}, [], AuthzAction.VIEW + db.Run, + session, + authz_client, + principal, + {"id": {"_eq": validated.run_id}}, + [], + AuthzAction.VIEW, ) if not run: raise PlatformicsError("Unauthorized: run does not exist") @@ -523,7 +534,13 @@ async def update_tomogram_voxel_spacing( # Fetch entities for update, if we have access to them entities = await get_db_rows( - db.TomogramVoxelSpacing, session, authz_client, principal, where, [], AuthzAction.UPDATE + db.TomogramVoxelSpacing, + session, + authz_client, + principal, + where, + [], + AuthzAction.UPDATE, ) if len(entities) == 0: raise PlatformicsError("Unauthorized: Cannot update entities") @@ -555,7 +572,13 @@ async def delete_tomogram_voxel_spacing( """ # Fetch entities for deletion, if we have access to them entities = await get_db_rows( - db.TomogramVoxelSpacing, session, authz_client, principal, where, [], AuthzAction.DELETE + db.TomogramVoxelSpacing, + session, + authz_client, + principal, + where, + [], + AuthzAction.DELETE, ) if len(entities) == 0: raise PlatformicsError("Unauthorized: Cannot delete entities") diff --git a/apiv2/main.py b/apiv2/main.py index af3b6ed33..73e156415 100644 --- a/apiv2/main.py +++ b/apiv2/main.py @@ -4,7 +4,6 @@ import strawberry import uvicorn -from cerbos.sdk.model import Principal from graphql_api.mutations import Mutation from graphql_api.queries import Query from platformics.graphql_api.core.deps import get_auth_principal @@ -12,6 +11,8 @@ from platformics.graphql_api.setup import get_app, get_strawberry_config from platformics.settings import APISettings +from cerbos.sdk.model import Principal + settings = APISettings.model_validate({}) # Workaround for https://github.com/pydantic/pydantic/issues/3753 schema = strawberry.Schema(query=Query, mutation=Mutation, config=get_strawberry_config(), extensions=[HandleErrors()]) diff --git a/apiv2/platformics/graphql_api/core/deps.py b/apiv2/platformics/graphql_api/core/deps.py index ef85065af..fecd95729 100644 --- a/apiv2/platformics/graphql_api/core/deps.py +++ b/apiv2/platformics/graphql_api/core/deps.py @@ -78,9 +78,7 @@ def require_auth_principal( def is_system_user(principal: Principal = Depends(require_auth_principal)) -> bool: - if principal.attr.get("service_identity"): - return True - return False + return bool(principal.attr.get("service_identity")) def require_system_user(principal: Principal = Depends(require_auth_principal)) -> None: diff --git a/apiv2/platformics/graphql_api/files.py b/apiv2/platformics/graphql_api/files.py index ece91eded..e0f4c80c1 100644 --- a/apiv2/platformics/graphql_api/files.py +++ b/apiv2/platformics/graphql_api/files.py @@ -519,12 +519,12 @@ async def concatenate_files( # Concatenate files (tmp files are automatically deleted when closed) with tempfile.NamedTemporaryFile() as file_concatenated: - with open(file_concatenated.name, "ab") as fp_concat: # noqa: ASYNC101 + with open(file_concatenated.name, "ab") as fp_concat: # noqa: ASYNC101, ASYNC230 for file in files: # Download file locally and append it with tempfile.NamedTemporaryFile() as file_temp: s3_client.download_file(file.namespace, file.path, file_temp.name) - with open(file_temp.name, "rb") as fp_temp: # noqa: ASYNC101 + with open(file_temp.name, "rb") as fp_temp: # noqa: ASYNC101, ASYNC230 fp_concat.write(fp_temp.read()) # Upload to S3 path = f"{FILE_CONCATENATION_PREFIX}/{uuid6.uuid7()}" diff --git a/apiv2/platformics/security/authorization.py b/apiv2/platformics/security/authorization.py index b9ad69b74..aab519250 100644 --- a/apiv2/platformics/security/authorization.py +++ b/apiv2/platformics/security/authorization.py @@ -2,14 +2,15 @@ from enum import Enum import platformics.database.models as db -from cerbos.sdk.client import CerbosClient -from cerbos.sdk.model import Principal as CerbosPrincipal -from cerbos.sdk.model import Resource, ResourceDesc from platformics.security.token_auth import get_token_claims from platformics.settings import APISettings from platformics.thirdparty.cerbos_sqlalchemy.query import get_query from sqlalchemy.sql import Select +from cerbos.sdk.client import CerbosClient +from cerbos.sdk.model import Principal as CerbosPrincipal +from cerbos.sdk.model import Resource, ResourceDesc + class AuthzAction(str, Enum): VIEW = "view" @@ -97,9 +98,7 @@ def can_create(self, resource, principal: Principal) -> bool: resource_type = type(resource).__tablename__ attr = self._obj_to_dict(resource) resource = Resource(id="NEW_ID", kind=resource_type, attr=attr) - if self.client.is_allowed(AuthzAction.CREATE, principal, resource): - return True - return False + return bool(self.client.is_allowed(AuthzAction.CREATE, principal, resource)) def can_update(self, resource, principal: Principal) -> bool: resource_type = type(resource).__tablename__ @@ -109,9 +108,7 @@ def can_update(self, resource, principal: Principal) -> bool: # so they cannot be sent in cerbos perms checks, and we need to find/use the table's # primary key instead of a hardcoded column name. resource = Resource(id="resource_id", kind=resource_type, attr=attr) - if self.client.is_allowed(AuthzAction.UPDATE, principal, resource): - return True - return False + return bool(self.client.is_allowed(AuthzAction.UPDATE, principal, resource)) # Get a SQLAlchemy model with authz filters already applied def get_resource_query( diff --git a/apiv2/scrape.py b/apiv2/scrape.py index 4def85d71..df9d1ac9b 100644 --- a/apiv2/scrape.py +++ b/apiv2/scrape.py @@ -3,7 +3,6 @@ import click import cryoet_data_portal as cdp - from database import models from platformics.database.connect import init_sync_db from support.enums import tomogram_reconstruction_method_enum as reconstruction_enum diff --git a/apiv2/support/enums.py b/apiv2/support/enums.py index 9b6ac2b80..ad1151490 100644 --- a/apiv2/support/enums.py +++ b/apiv2/support/enums.py @@ -5,9 +5,10 @@ Make changes to the template codegen/templates/support/enums.py.j2 instead. """ -import strawberry import enum +import strawberry + @strawberry.enum class tomogram_type_enum(enum.Enum): diff --git a/apiv2/support/limit_offset.py b/apiv2/support/limit_offset.py index cd5a91df4..ecd2686d4 100644 --- a/apiv2/support/limit_offset.py +++ b/apiv2/support/limit_offset.py @@ -5,9 +5,10 @@ Make changes to the template codegen/templates/support/limit_offset.py.j2 instead. """ -import strawberry from typing import Optional, TypedDict +import strawberry + @strawberry.input class LimitOffsetClause(TypedDict): diff --git a/apiv2/template_overrides/graphql_api/queries.py.j2 b/apiv2/template_overrides/graphql_api/queries.py.j2 new file mode 100644 index 000000000..bdc8a31c6 --- /dev/null +++ b/apiv2/template_overrides/graphql_api/queries.py.j2 @@ -0,0 +1,38 @@ +""" +Supported GraphQL queries for files and entities + +Auto-generated by running 'make codegen'. Do not edit. +Make changes to the template codegen/templates/graphql_api/queries.py.j2 instead. +""" + +import strawberry +from strawberry import relay +from typing import Sequence, List +{%- if render_files %} +from platformics.graphql_api.files import File +{%- endif %} +{%- for class in classes %} +from graphql_api.types.{{ class.snake_name }} import {{ class.name }}, resolve_{{ class.plural_snake_name }}, {{ class.name }}Aggregate, resolve_{{ class.plural_snake_name }}_aggregate +{%- endfor %} + + +@strawberry.type +class Query: + # Allow relay-style queries by node ID +# node: relay.Node = relay.node() +# nodes: List[relay.Node] = relay.node() + +# {%- if render_files %} +# # Query files +# files: Sequence[File] = resolve_files +# {%- endif %} + + # Query entities + {%- for class in classes %} + {{ class.plural_snake_name }}: Sequence[{{ class.name }}] = resolve_{{ class.plural_snake_name }} + {%- endfor %} + + # Query entity aggregates + {%- for class in classes %} + {{ class.plural_snake_name }}_aggregate: {{ class.name }}Aggregate = resolve_{{ class.plural_snake_name }}_aggregate + {%- endfor %} diff --git a/apiv2/test_infra/factories/alignment.py b/apiv2/test_infra/factories/alignment.py index 27e356e45..bb609e63f 100644 --- a/apiv2/test_infra/factories/alignment.py +++ b/apiv2/test_infra/factories/alignment.py @@ -7,18 +7,17 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Alignment -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.deposition import DepositionFactory -from test_infra.factories.tiltseries import TiltseriesFactory -from test_infra.factories.run import RunFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.deposition import DepositionFactory +from test_infra.factories.run import RunFactory +from test_infra.factories.tiltseries import TiltseriesFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/annotation.py b/apiv2/test_infra/factories/annotation.py index c28e5259f..e77c2c029 100644 --- a/apiv2/test_infra/factories/annotation.py +++ b/apiv2/test_infra/factories/annotation.py @@ -7,17 +7,16 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Annotation -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.run import RunFactory -from test_infra.factories.deposition import DepositionFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.deposition import DepositionFactory +from test_infra.factories.run import RunFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/annotation_author.py b/apiv2/test_infra/factories/annotation_author.py index bd6bfa111..b6cfc6feb 100644 --- a/apiv2/test_infra/factories/annotation_author.py +++ b/apiv2/test_infra/factories/annotation_author.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import AnnotationAuthor -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.annotation import AnnotationFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.annotation import AnnotationFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/annotation_file.py b/apiv2/test_infra/factories/annotation_file.py index 0c18349f2..cc0de4a25 100644 --- a/apiv2/test_infra/factories/annotation_file.py +++ b/apiv2/test_infra/factories/annotation_file.py @@ -7,18 +7,17 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import AnnotationFile -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.alignment import AlignmentFactory -from test_infra.factories.annotation_shape import AnnotationShapeFactory -from test_infra.factories.tomogram_voxel_spacing import TomogramVoxelSpacingFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.alignment import AlignmentFactory +from test_infra.factories.annotation_shape import AnnotationShapeFactory +from test_infra.factories.tomogram_voxel_spacing import TomogramVoxelSpacingFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/annotation_shape.py b/apiv2/test_infra/factories/annotation_shape.py index 492e3b988..b10212c8c 100644 --- a/apiv2/test_infra/factories/annotation_shape.py +++ b/apiv2/test_infra/factories/annotation_shape.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import AnnotationShape -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.annotation import AnnotationFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.annotation import AnnotationFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/dataset.py b/apiv2/test_infra/factories/dataset.py index 0f5cffaad..24860055e 100644 --- a/apiv2/test_infra/factories/dataset.py +++ b/apiv2/test_infra/factories/dataset.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Dataset -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.deposition import DepositionFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.deposition import DepositionFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) @@ -44,7 +43,7 @@ class Meta: cell_strain_name = fuzzy.FuzzyText() cell_strain_id = fuzzy.FuzzyText() sample_type = fuzzy.FuzzyChoice( - ["cell", "tissue", "organism", "organelle", "virus", "in_vitro", "in_silico", "other"] + ["cell", "tissue", "organism", "organelle", "virus", "in_vitro", "in_silico", "other"], ) sample_preparation = fuzzy.FuzzyText() grid_preparation = fuzzy.FuzzyText() diff --git a/apiv2/test_infra/factories/dataset_author.py b/apiv2/test_infra/factories/dataset_author.py index 0ca1eb36d..5e7daf877 100644 --- a/apiv2/test_infra/factories/dataset_author.py +++ b/apiv2/test_infra/factories/dataset_author.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import DatasetAuthor -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.dataset import DatasetFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.dataset import DatasetFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/dataset_funding.py b/apiv2/test_infra/factories/dataset_funding.py index 6716174b2..47e750559 100644 --- a/apiv2/test_infra/factories/dataset_funding.py +++ b/apiv2/test_infra/factories/dataset_funding.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import DatasetFunding -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.dataset import DatasetFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.dataset import DatasetFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/deposition.py b/apiv2/test_infra/factories/deposition.py index ed4901eee..44144b42d 100644 --- a/apiv2/test_infra/factories/deposition.py +++ b/apiv2/test_infra/factories/deposition.py @@ -7,15 +7,13 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Deposition -from platformics.test_infra.factories.base import FileFactory, CommonFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/deposition_author.py b/apiv2/test_infra/factories/deposition_author.py index 0840948de..62f37f332 100644 --- a/apiv2/test_infra/factories/deposition_author.py +++ b/apiv2/test_infra/factories/deposition_author.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import DepositionAuthor -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.deposition import DepositionFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.deposition import DepositionFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/deposition_type.py b/apiv2/test_infra/factories/deposition_type.py index d1b76be7e..d7ab7033e 100644 --- a/apiv2/test_infra/factories/deposition_type.py +++ b/apiv2/test_infra/factories/deposition_type.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import DepositionType -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.deposition import DepositionFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.deposition import DepositionFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/frame.py b/apiv2/test_infra/factories/frame.py index edc211a21..e8e48221e 100644 --- a/apiv2/test_infra/factories/frame.py +++ b/apiv2/test_infra/factories/frame.py @@ -7,17 +7,16 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Frame -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.deposition import DepositionFactory -from test_infra.factories.run import RunFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.deposition import DepositionFactory +from test_infra.factories.run import RunFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/per_section_alignment_parameters.py b/apiv2/test_infra/factories/per_section_alignment_parameters.py index 86f29c145..c86a57758 100644 --- a/apiv2/test_infra/factories/per_section_alignment_parameters.py +++ b/apiv2/test_infra/factories/per_section_alignment_parameters.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import PerSectionAlignmentParameters -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.alignment import AlignmentFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.alignment import AlignmentFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/per_section_parameters.py b/apiv2/test_infra/factories/per_section_parameters.py index 13f6d7684..16e0d38d0 100644 --- a/apiv2/test_infra/factories/per_section_parameters.py +++ b/apiv2/test_infra/factories/per_section_parameters.py @@ -7,17 +7,16 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import PerSectionParameters -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.frame import FrameFactory -from test_infra.factories.tiltseries import TiltseriesFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.frame import FrameFactory +from test_infra.factories.tiltseries import TiltseriesFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/run.py b/apiv2/test_infra/factories/run.py index 047b3a0b1..d18c7534b 100644 --- a/apiv2/test_infra/factories/run.py +++ b/apiv2/test_infra/factories/run.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Run -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.dataset import DatasetFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.dataset import DatasetFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/tiltseries.py b/apiv2/test_infra/factories/tiltseries.py index f866e2961..e140e37c6 100644 --- a/apiv2/test_infra/factories/tiltseries.py +++ b/apiv2/test_infra/factories/tiltseries.py @@ -7,17 +7,16 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Tiltseries -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.run import RunFactory -from test_infra.factories.deposition import DepositionFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.deposition import DepositionFactory +from test_infra.factories.run import RunFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/tomogram.py b/apiv2/test_infra/factories/tomogram.py index 7c7fff7b1..76fa5effa 100644 --- a/apiv2/test_infra/factories/tomogram.py +++ b/apiv2/test_infra/factories/tomogram.py @@ -7,19 +7,18 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import Tomogram -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.alignment import AlignmentFactory -from test_infra.factories.deposition import DepositionFactory -from test_infra.factories.run import RunFactory -from test_infra.factories.tomogram_voxel_spacing import TomogramVoxelSpacingFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.alignment import AlignmentFactory +from test_infra.factories.deposition import DepositionFactory +from test_infra.factories.run import RunFactory +from test_infra.factories.tomogram_voxel_spacing import TomogramVoxelSpacingFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/tomogram_author.py b/apiv2/test_infra/factories/tomogram_author.py index 21a003692..c3bbb592f 100644 --- a/apiv2/test_infra/factories/tomogram_author.py +++ b/apiv2/test_infra/factories/tomogram_author.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import TomogramAuthor -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.tomogram import TomogramFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.tomogram import TomogramFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/test_infra/factories/tomogram_voxel_spacing.py b/apiv2/test_infra/factories/tomogram_voxel_spacing.py index ad1e4d246..f1374158b 100644 --- a/apiv2/test_infra/factories/tomogram_voxel_spacing.py +++ b/apiv2/test_infra/factories/tomogram_voxel_spacing.py @@ -7,16 +7,15 @@ # ruff: noqa: E501 Line too long -import random import factory -import uuid6 from database.models import TomogramVoxelSpacing -from platformics.test_infra.factories.base import FileFactory, CommonFactory -from test_infra.factories.run import RunFactory from factory import Faker, fuzzy from faker_biology.bioseq import Bioseq from faker_biology.physiology import Organ from faker_enum import EnumProvider +from platformics.test_infra.factories.base import CommonFactory + +from test_infra.factories.run import RunFactory Faker.add_provider(Bioseq) Faker.add_provider(Organ) diff --git a/apiv2/validators/alignment.py b/apiv2/validators/alignment.py index 01f4b1b09..ab47c75f0 100644 --- a/apiv2/validators/alignment.py +++ b/apiv2/validators/alignment.py @@ -8,13 +8,10 @@ # ruff: noqa: E501 Line too long -from support.enums import alignment_type_enum - -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from support.enums import alignment_type_enum from typing_extensions import Annotated diff --git a/apiv2/validators/annotation.py b/apiv2/validators/annotation.py index d9a2f5850..cd7ab27f8 100644 --- a/apiv2/validators/annotation.py +++ b/apiv2/validators/annotation.py @@ -8,13 +8,11 @@ # ruff: noqa: E501 Line too long -from support.enums import annotation_method_type_enum - -import typing import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from support.enums import annotation_method_type_enum from typing_extensions import Annotated diff --git a/apiv2/validators/annotation_author.py b/apiv2/validators/annotation_author.py index af65a5f3a..1a0cfed54 100644 --- a/apiv2/validators/annotation_author.py +++ b/apiv2/validators/annotation_author.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints diff --git a/apiv2/validators/annotation_file.py b/apiv2/validators/annotation_file.py index 690c30d90..f24891fb0 100644 --- a/apiv2/validators/annotation_file.py +++ b/apiv2/validators/annotation_file.py @@ -8,13 +8,10 @@ # ruff: noqa: E501 Line too long -from support.enums import annotation_file_source_enum - -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from support.enums import annotation_file_source_enum from typing_extensions import Annotated diff --git a/apiv2/validators/annotation_shape.py b/apiv2/validators/annotation_shape.py index f32b2d387..2ac81d5b5 100644 --- a/apiv2/validators/annotation_shape.py +++ b/apiv2/validators/annotation_shape.py @@ -8,13 +8,10 @@ # ruff: noqa: E501 Line too long -from support.enums import annotation_file_shape_type_enum - -import typing -import datetime import uuid -from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from pydantic import BaseModel, ConfigDict, Field +from support.enums import annotation_file_shape_type_enum from typing_extensions import Annotated diff --git a/apiv2/validators/dataset.py b/apiv2/validators/dataset.py index afb3e4601..a19e7c198 100644 --- a/apiv2/validators/dataset.py +++ b/apiv2/validators/dataset.py @@ -8,13 +8,11 @@ # ruff: noqa: E501 Line too long -from support.enums import sample_type_enum - -import typing import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from support.enums import sample_type_enum from typing_extensions import Annotated diff --git a/apiv2/validators/dataset_author.py b/apiv2/validators/dataset_author.py index e871ab92d..8e605640a 100644 --- a/apiv2/validators/dataset_author.py +++ b/apiv2/validators/dataset_author.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints diff --git a/apiv2/validators/dataset_funding.py b/apiv2/validators/dataset_funding.py index 75159aaff..4d31a764b 100644 --- a/apiv2/validators/dataset_funding.py +++ b/apiv2/validators/dataset_funding.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints diff --git a/apiv2/validators/deposition.py b/apiv2/validators/deposition.py index 14c831376..ea857899f 100644 --- a/apiv2/validators/deposition.py +++ b/apiv2/validators/deposition.py @@ -8,9 +8,7 @@ # ruff: noqa: E501 Line too long -import typing import datetime -import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints from typing_extensions import Annotated diff --git a/apiv2/validators/deposition_author.py b/apiv2/validators/deposition_author.py index 6fcb6b591..75d995fba 100644 --- a/apiv2/validators/deposition_author.py +++ b/apiv2/validators/deposition_author.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints diff --git a/apiv2/validators/deposition_type.py b/apiv2/validators/deposition_type.py index 7777b168a..01fb2a35a 100644 --- a/apiv2/validators/deposition_type.py +++ b/apiv2/validators/deposition_type.py @@ -8,13 +8,10 @@ # ruff: noqa: E501 Line too long -from support.enums import deposition_types_enum - -import typing -import datetime import uuid -from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from pydantic import BaseModel, ConfigDict, Field +from support.enums import deposition_types_enum from typing_extensions import Annotated diff --git a/apiv2/validators/frame.py b/apiv2/validators/frame.py index bfb10d352..f0f26a2e0 100644 --- a/apiv2/validators/frame.py +++ b/apiv2/validators/frame.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints diff --git a/apiv2/validators/per_section_alignment_parameters.py b/apiv2/validators/per_section_alignment_parameters.py index f1e06963e..f584e1ad1 100644 --- a/apiv2/validators/per_section_alignment_parameters.py +++ b/apiv2/validators/per_section_alignment_parameters.py @@ -8,11 +8,9 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid -from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from pydantic import BaseModel, ConfigDict, Field from typing_extensions import Annotated diff --git a/apiv2/validators/per_section_parameters.py b/apiv2/validators/per_section_parameters.py index 76c5e9fda..f2b4653a5 100644 --- a/apiv2/validators/per_section_parameters.py +++ b/apiv2/validators/per_section_parameters.py @@ -8,11 +8,9 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid -from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from pydantic import BaseModel, ConfigDict, Field from typing_extensions import Annotated diff --git a/apiv2/validators/run.py b/apiv2/validators/run.py index 35e14a75f..db754dd95 100644 --- a/apiv2/validators/run.py +++ b/apiv2/validators/run.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints diff --git a/apiv2/validators/tiltseries.py b/apiv2/validators/tiltseries.py index 716153e9d..b518ba498 100644 --- a/apiv2/validators/tiltseries.py +++ b/apiv2/validators/tiltseries.py @@ -8,13 +8,10 @@ # ruff: noqa: E501 Line too long -from support.enums import tiltseries_microscope_manufacturer_enum - -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints +from support.enums import tiltseries_microscope_manufacturer_enum from typing_extensions import Annotated diff --git a/apiv2/validators/tomogram.py b/apiv2/validators/tomogram.py index 66de863fa..f9fdff306 100644 --- a/apiv2/validators/tomogram.py +++ b/apiv2/validators/tomogram.py @@ -8,18 +8,15 @@ # ruff: noqa: E501 Line too long +import uuid + +from pydantic import BaseModel, ConfigDict, Field, StringConstraints from support.enums import ( fiducial_alignment_status_enum, - tomogram_reconstruction_method_enum, tomogram_processing_enum, + tomogram_reconstruction_method_enum, tomogram_type_enum, ) - -import typing -import datetime -import uuid - -from pydantic import BaseModel, ConfigDict, Field, StringConstraints from typing_extensions import Annotated diff --git a/apiv2/validators/tomogram_author.py b/apiv2/validators/tomogram_author.py index a8f6d1e10..41fa9992b 100644 --- a/apiv2/validators/tomogram_author.py +++ b/apiv2/validators/tomogram_author.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints diff --git a/apiv2/validators/tomogram_voxel_spacing.py b/apiv2/validators/tomogram_voxel_spacing.py index 04b6498de..72fee08cf 100644 --- a/apiv2/validators/tomogram_voxel_spacing.py +++ b/apiv2/validators/tomogram_voxel_spacing.py @@ -8,8 +8,6 @@ # ruff: noqa: E501 Line too long -import typing -import datetime import uuid from pydantic import BaseModel, ConfigDict, Field, StringConstraints