Skip to content

Commit

Permalink
feat: add benchmarks for sync engine operations
Browse files Browse the repository at this point in the history
  • Loading branch information
art049 committed Nov 1, 2023
1 parent 739a683 commit a751ba0
Show file tree
Hide file tree
Showing 7 changed files with 266 additions and 1 deletion.
41 changes: 41 additions & 0 deletions .github/workflows/codspeed.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: CodSpeed

on:
# Run on pushes to the main branch
push:
branches:
- "master" # or "main"
# Run on pull requests
pull_request:
# `workflow_dispatch` allows CodSpeed to trigger backtest
# performance analysis in order to generate initial data.
workflow_dispatch:

jobs:
benchmarks:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.12
cache: pip
cache-dependency-path: "pyproject.toml"
- name: Mongo Service
id: mongo-service
uses: art049/mongodb-cluster-action@v0
with:
version: "4.2"
mode: "sharded"
- name: Install dependencies
run: |
pip install flit
pip install ".[test]"
- name: Run benches
uses: CodSpeedHQ/action@v1
with:
run: pytest tests/integration/benchmarks --codspeed
env:
TEST_MONGO_URI: ${{ steps.mongo-service.outputs.connection-string }}
TEST_MONGO_MODE: "sharded"
10 changes: 9 additions & 1 deletion Taskfile.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,14 @@ tasks:
cmds:
- pytest -rs -n auto

bench:
desc: |
Run the benches with the current version.
deps:
- task: "mongodb:check"
cmds:
- pytest --benchmark-enable --benchmark-only

default:
desc: |
Run the tests related to changes with the current version.
Expand Down Expand Up @@ -92,7 +100,7 @@ tasks:

setup:deps-setup:
cmds:
- flit install --deps=all
- flit install --deps=all --python python
sources:
- pyproject.toml

Expand Down
3 changes: 3 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,8 @@ test = [
"typer ~= 0.4.1",
"semver ~= 2.13.0",
"types-pytz ~= 2022.1.1",
"pytest-benchmark ~= 4.0.0",
"pytest-codspeed ~= 2.1.0",
]
doc = [
"pydocstyle[toml] ~= 6.1.1",
Expand Down Expand Up @@ -132,6 +134,7 @@ filterwarnings = [
"ignore:the AIOEngineDependency object is deprecated.*:DeprecationWarning:odmantic.*",
]
pythonpath = "src tests"
addopts = "--benchmark-disable"
[tool.coverage.run]
branch = true
[tool.coverage.report]
Expand Down
Empty file.
62 changes: 62 additions & 0 deletions tests/integration/benchmarks/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
"""Models based on https://github.com/tortoise/orm-benchmarks"""

from datetime import datetime
from decimal import Decimal
from random import choice
from typing import Iterator, Literal, Optional, get_args

from odmantic import Field, Model

Level = Literal[10, 20, 30, 40, 50]
VALID_LEVELS = list(get_args(Level))


class SmallJournal(Model):
timestamp: datetime = Field(default_factory=datetime.utcnow)
level: Level = Field(index=True)
text: str = Field(index=True)

@classmethod
def get_random_instances(cls, context: str, count: int) -> Iterator["SmallJournal"]:
for i in range(count):
yield cls(level=choice(VALID_LEVELS), text=f"From {context}, item {i}")


class JournalWithRelations(Model):
timestamp: datetime = Field(default_factory=datetime.utcnow)
level: Level = Field(index=True)
text: str = Field(index=True)

# parent


class BigJournal(Model):
timestamp: datetime = Field(default_factory=datetime.utcnow)
level: Level = Field(index=True)
text: str = Field(index=True)

col_float1: float = Field(default=2.2)
col_smallint1: int = Field(default=2)
col_int1: int = Field(default=2000000)
col_bigint1: int = Field(default=99999999)
col_char1: str = Field(default=255, max_length=255)
col_text1: str = Field(
default="Moo,Foo,Baa,Waa,Moo,Foo,Baa,Waa,Moo,Foo,Baa,Waa",
)
col_decimal1: Decimal = Field(default=Decimal("2.2"))
col_json1: dict = Field(
default={"a": 1, "b": "b", "c": [2], "d": {"e": 3}, "f": True},
)

col_float2: Optional[float] = Field(default=None)
col_smallint2: Optional[int] = Field(default=None)
col_int2: Optional[int] = Field(default=None)
col_bigint2: Optional[int] = Field(default=None)
col_char2: Optional[str] = Field(default=None, max_length=255)
col_text2: Optional[str] = Field(
default=None,
)
col_decimal2: Optional[Decimal] = Field(default=None)
col_json2: Optional[dict] = Field(
default=None,
)
77 changes: 77 additions & 0 deletions tests/integration/benchmarks/test_bench_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import pytest

from odmantic import AIOEngine

from .models import VALID_LEVELS, SmallJournal

pytestmark = [
pytest.mark.asyncio,
pytest.mark.skip("@benchmark does not support async functions yet"),
]


@pytest.fixture(params=[10, 50, 100])
def count(request):
return request.param


async def test_insert_small_single(benchmark, aio_engine: AIOEngine, count: int):
instances = list(SmallJournal.get_random_instances("test_write_small", count))

@benchmark
async def _():
for instance in instances:
await aio_engine.save(instance)


async def test_write_small_bulk(
benchmark,
aio_engine: AIOEngine,
count: int,
):
instances = list(SmallJournal.get_random_instances("test_write_small", count))

@benchmark
async def _():
await aio_engine.save_all(instances)


async def test_filter_by_level_small(benchmark, aio_engine: AIOEngine, count: int):
instances = list(SmallJournal.get_random_instances("test_write_small", count))
await aio_engine.save_all(instances)

@benchmark
async def _():
total = 0
for level in VALID_LEVELS:
total += len(
await aio_engine.find(SmallJournal, SmallJournal.level == level)
)


async def test_filter_limit_skip_by_level_small(
benchmark, aio_engine: AIOEngine, count: int
):
instances = list(SmallJournal.get_random_instances("test_write_small", count))
await aio_engine.save_all(instances)

@benchmark
async def _():
total = 0
for level in VALID_LEVELS:
total += len(
await aio_engine.find(
SmallJournal, SmallJournal.level == level, limit=20, skip=20
)
)


async def test_find_one_by_id(benchmark, aio_engine: AIOEngine, count: int):
instances = list(SmallJournal.get_random_instances("test_write_small", count))
await aio_engine.save_all(instances)
ids = [instance.id for instance in instances]

@benchmark
async def _():
for id_ in ids:
await aio_engine.find_one(SmallJournal, SmallJournal.id == id_)
74 changes: 74 additions & 0 deletions tests/integration/benchmarks/test_bench_sync.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import pytest

from odmantic import SyncEngine

from .models import VALID_LEVELS, SmallJournal


@pytest.fixture(params=[10, 50, 100])
def count(request):
return request.param


def test_insert_small_single(benchmark, sync_engine: SyncEngine, count: int):
instances = list(SmallJournal.get_random_instances("test_write_small", count))

@benchmark
def _():
for instance in instances:
sync_engine.save(instance)


def test_write_small_bulk(
benchmark,
sync_engine: SyncEngine,
count: int,
):
instances = list(SmallJournal.get_random_instances("test_write_small", count))

@benchmark
def _():
sync_engine.save_all(instances)


def test_filter_by_level_small(benchmark, sync_engine: SyncEngine, count: int):
instances = list(SmallJournal.get_random_instances("test_write_small", count))
sync_engine.save_all(instances)

@benchmark
def _():
total = 0
for level in VALID_LEVELS:
total += len(
list(sync_engine.find(SmallJournal, SmallJournal.level == level))
)


def test_filter_limit_skip_by_level_small(
benchmark, sync_engine: SyncEngine, count: int
):
instances = list(SmallJournal.get_random_instances("test_write_small", count))
sync_engine.save_all(instances)

@benchmark
def _():
total = 0
for level in VALID_LEVELS:
total += len(
list(
sync_engine.find(
SmallJournal, SmallJournal.level == level, limit=20, skip=20
)
)
)


def test_find_one_by_id(benchmark, sync_engine: SyncEngine, count: int):
instances = list(SmallJournal.get_random_instances("test_write_small", count))
sync_engine.save_all(instances)
ids = [instance.id for instance in instances]

@benchmark
def _():
for id_ in ids:
sync_engine.find_one(SmallJournal, SmallJournal.id == id_)

0 comments on commit a751ba0

Please sign in to comment.