From a751ba0a855cb057e9d00e1934a65fe97bfa8773 Mon Sep 17 00:00:00 2001 From: Arthur Pastel Date: Wed, 1 Nov 2023 20:03:50 +0100 Subject: [PATCH] feat: add benchmarks for sync engine operations --- .github/workflows/codspeed.yml | 41 ++++++++++ Taskfile.yml | 10 ++- pyproject.toml | 3 + tests/integration/benchmarks/__init__.py | 0 tests/integration/benchmarks/models.py | 62 +++++++++++++++ .../benchmarks/test_bench_async.py | 77 +++++++++++++++++++ .../integration/benchmarks/test_bench_sync.py | 74 ++++++++++++++++++ 7 files changed, 266 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/codspeed.yml create mode 100644 tests/integration/benchmarks/__init__.py create mode 100644 tests/integration/benchmarks/models.py create mode 100644 tests/integration/benchmarks/test_bench_async.py create mode 100644 tests/integration/benchmarks/test_bench_sync.py diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml new file mode 100644 index 00000000..fa404286 --- /dev/null +++ b/.github/workflows/codspeed.yml @@ -0,0 +1,41 @@ +name: CodSpeed + +on: + # Run on pushes to the main branch + push: + branches: + - "master" # or "main" + # Run on pull requests + pull_request: + # `workflow_dispatch` allows CodSpeed to trigger backtest + # performance analysis in order to generate initial data. + workflow_dispatch: + +jobs: + benchmarks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.12 + cache: pip + cache-dependency-path: "pyproject.toml" + - name: Mongo Service + id: mongo-service + uses: art049/mongodb-cluster-action@v0 + with: + version: "4.2" + mode: "sharded" + - name: Install dependencies + run: | + pip install flit + pip install ".[test]" + - name: Run benches + uses: CodSpeedHQ/action@v1 + with: + run: pytest tests/integration/benchmarks --codspeed + env: + TEST_MONGO_URI: ${{ steps.mongo-service.outputs.connection-string }} + TEST_MONGO_MODE: "sharded" diff --git a/Taskfile.yml b/Taskfile.yml index d6a71b27..22b835df 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -27,6 +27,14 @@ tasks: cmds: - pytest -rs -n auto + bench: + desc: | + Run the benches with the current version. + deps: + - task: "mongodb:check" + cmds: + - pytest --benchmark-enable --benchmark-only + default: desc: | Run the tests related to changes with the current version. @@ -92,7 +100,7 @@ tasks: setup:deps-setup: cmds: - - flit install --deps=all + - flit install --deps=all --python python sources: - pyproject.toml diff --git a/pyproject.toml b/pyproject.toml index cea5bcd7..4e77709b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,6 +64,8 @@ test = [ "typer ~= 0.4.1", "semver ~= 2.13.0", "types-pytz ~= 2022.1.1", + "pytest-benchmark ~= 4.0.0", + "pytest-codspeed ~= 2.1.0", ] doc = [ "pydocstyle[toml] ~= 6.1.1", @@ -132,6 +134,7 @@ filterwarnings = [ "ignore:the AIOEngineDependency object is deprecated.*:DeprecationWarning:odmantic.*", ] pythonpath = "src tests" +addopts = "--benchmark-disable" [tool.coverage.run] branch = true [tool.coverage.report] diff --git a/tests/integration/benchmarks/__init__.py b/tests/integration/benchmarks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration/benchmarks/models.py b/tests/integration/benchmarks/models.py new file mode 100644 index 00000000..04f23b2c --- /dev/null +++ b/tests/integration/benchmarks/models.py @@ -0,0 +1,62 @@ +"""Models based on https://github.com/tortoise/orm-benchmarks""" + +from datetime import datetime +from decimal import Decimal +from random import choice +from typing import Iterator, Literal, Optional, get_args + +from odmantic import Field, Model + +Level = Literal[10, 20, 30, 40, 50] +VALID_LEVELS = list(get_args(Level)) + + +class SmallJournal(Model): + timestamp: datetime = Field(default_factory=datetime.utcnow) + level: Level = Field(index=True) + text: str = Field(index=True) + + @classmethod + def get_random_instances(cls, context: str, count: int) -> Iterator["SmallJournal"]: + for i in range(count): + yield cls(level=choice(VALID_LEVELS), text=f"From {context}, item {i}") + + +class JournalWithRelations(Model): + timestamp: datetime = Field(default_factory=datetime.utcnow) + level: Level = Field(index=True) + text: str = Field(index=True) + + # parent + + +class BigJournal(Model): + timestamp: datetime = Field(default_factory=datetime.utcnow) + level: Level = Field(index=True) + text: str = Field(index=True) + + col_float1: float = Field(default=2.2) + col_smallint1: int = Field(default=2) + col_int1: int = Field(default=2000000) + col_bigint1: int = Field(default=99999999) + col_char1: str = Field(default=255, max_length=255) + col_text1: str = Field( + default="Moo,Foo,Baa,Waa,Moo,Foo,Baa,Waa,Moo,Foo,Baa,Waa", + ) + col_decimal1: Decimal = Field(default=Decimal("2.2")) + col_json1: dict = Field( + default={"a": 1, "b": "b", "c": [2], "d": {"e": 3}, "f": True}, + ) + + col_float2: Optional[float] = Field(default=None) + col_smallint2: Optional[int] = Field(default=None) + col_int2: Optional[int] = Field(default=None) + col_bigint2: Optional[int] = Field(default=None) + col_char2: Optional[str] = Field(default=None, max_length=255) + col_text2: Optional[str] = Field( + default=None, + ) + col_decimal2: Optional[Decimal] = Field(default=None) + col_json2: Optional[dict] = Field( + default=None, + ) diff --git a/tests/integration/benchmarks/test_bench_async.py b/tests/integration/benchmarks/test_bench_async.py new file mode 100644 index 00000000..db595856 --- /dev/null +++ b/tests/integration/benchmarks/test_bench_async.py @@ -0,0 +1,77 @@ +import pytest + +from odmantic import AIOEngine + +from .models import VALID_LEVELS, SmallJournal + +pytestmark = [ + pytest.mark.asyncio, + pytest.mark.skip("@benchmark does not support async functions yet"), +] + + +@pytest.fixture(params=[10, 50, 100]) +def count(request): + return request.param + + +async def test_insert_small_single(benchmark, aio_engine: AIOEngine, count: int): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + + @benchmark + async def _(): + for instance in instances: + await aio_engine.save(instance) + + +async def test_write_small_bulk( + benchmark, + aio_engine: AIOEngine, + count: int, +): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + + @benchmark + async def _(): + await aio_engine.save_all(instances) + + +async def test_filter_by_level_small(benchmark, aio_engine: AIOEngine, count: int): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + await aio_engine.save_all(instances) + + @benchmark + async def _(): + total = 0 + for level in VALID_LEVELS: + total += len( + await aio_engine.find(SmallJournal, SmallJournal.level == level) + ) + + +async def test_filter_limit_skip_by_level_small( + benchmark, aio_engine: AIOEngine, count: int +): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + await aio_engine.save_all(instances) + + @benchmark + async def _(): + total = 0 + for level in VALID_LEVELS: + total += len( + await aio_engine.find( + SmallJournal, SmallJournal.level == level, limit=20, skip=20 + ) + ) + + +async def test_find_one_by_id(benchmark, aio_engine: AIOEngine, count: int): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + await aio_engine.save_all(instances) + ids = [instance.id for instance in instances] + + @benchmark + async def _(): + for id_ in ids: + await aio_engine.find_one(SmallJournal, SmallJournal.id == id_) diff --git a/tests/integration/benchmarks/test_bench_sync.py b/tests/integration/benchmarks/test_bench_sync.py new file mode 100644 index 00000000..c2dfbc37 --- /dev/null +++ b/tests/integration/benchmarks/test_bench_sync.py @@ -0,0 +1,74 @@ +import pytest + +from odmantic import SyncEngine + +from .models import VALID_LEVELS, SmallJournal + + +@pytest.fixture(params=[10, 50, 100]) +def count(request): + return request.param + + +def test_insert_small_single(benchmark, sync_engine: SyncEngine, count: int): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + + @benchmark + def _(): + for instance in instances: + sync_engine.save(instance) + + +def test_write_small_bulk( + benchmark, + sync_engine: SyncEngine, + count: int, +): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + + @benchmark + def _(): + sync_engine.save_all(instances) + + +def test_filter_by_level_small(benchmark, sync_engine: SyncEngine, count: int): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + sync_engine.save_all(instances) + + @benchmark + def _(): + total = 0 + for level in VALID_LEVELS: + total += len( + list(sync_engine.find(SmallJournal, SmallJournal.level == level)) + ) + + +def test_filter_limit_skip_by_level_small( + benchmark, sync_engine: SyncEngine, count: int +): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + sync_engine.save_all(instances) + + @benchmark + def _(): + total = 0 + for level in VALID_LEVELS: + total += len( + list( + sync_engine.find( + SmallJournal, SmallJournal.level == level, limit=20, skip=20 + ) + ) + ) + + +def test_find_one_by_id(benchmark, sync_engine: SyncEngine, count: int): + instances = list(SmallJournal.get_random_instances("test_write_small", count)) + sync_engine.save_all(instances) + ids = [instance.id for instance in instances] + + @benchmark + def _(): + for id_ in ids: + sync_engine.find_one(SmallJournal, SmallJournal.id == id_)