Skip to content

Commit

Permalink
Linting and switch cache to positional argument
Browse files Browse the repository at this point in the history
  • Loading branch information
mikeoconnor0308 committed Jan 10, 2025
1 parent 76e3791 commit 97a732a
Show file tree
Hide file tree
Showing 13 changed files with 40 additions and 87 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ Or as a decorator
@cached(
ttl=10, cache=Cache.REDIS, key="key", serializer=PickleSerializer(), port=6379, namespace="main")
cache=RedisCache(), key="key", serializer=PickleSerializer(), port=6379, namespace="main")
async def cached_call():
print("Sleeping for three seconds zzzz.....")
await asyncio.sleep(3)
Expand Down
15 changes: 7 additions & 8 deletions aiocache/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
import inspect
import logging

from aiocache.backends.memory import SimpleMemoryCache
from aiocache.base import SENTINEL
from aiocache.lock import RedLock

Expand All @@ -24,25 +23,25 @@ class cached:
cache (or `False` to store in the cache).
e.g. to avoid caching `None` results: `lambda r: r is None`
:param cache: cache instance to use when calling the ``set``/``get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
:param noself: bool if you are decorating a class function, by default self is also used to
generate the key. This will result in same function calls done by different class instances
to use different cache keys. Use noself=True if you want to ignore it.
"""

def __init__(
self,
cache,
*,
ttl=SENTINEL,
key_builder=None,
skip_cache_func=lambda x: False,
cache=None,
noself=False,
):
self.ttl = ttl
self.key_builder = key_builder
self.skip_cache_func = skip_cache_func
self.noself = noself
self.cache = cache or SimpleMemoryCache()
self.cache = cache

def __call__(self, f):
@functools.wraps(f)
Expand Down Expand Up @@ -200,6 +199,7 @@ class multi_cached:
value in the cache to be written. If set to False, the write
happens in the background. Enabled by default
:param cache: cache instance to use when calling the ``multi_set``/``multi_get`` operations.
:param keys_from_attr: name of the arg or kwarg in the decorated callable that contains
an iterable that yields the keys returned by the decorated callable.
:param key_builder: Callable that enables mapping the decorated function's keys to the keys
Expand All @@ -211,23 +211,22 @@ class multi_cached:
if that key-value pair should not be cached (or False to store in cache).
The keys and values to be passed are taken from the wrapped function result.
:param ttl: int seconds to store the keys. Default is 0 which means no expiration.
:param cache: cache class to use when calling the ``multi_set``/``multi_get`` operations.
Default is :class:`aiocache.SimpleMemoryCache`.
"""

def __init__(
self,
cache=None,
*,
keys_from_attr,
key_builder=None,
skip_cache_func=lambda k, v: False,
ttl=SENTINEL,
cache=None,
):
self.cache = cache
self.keys_from_attr = keys_from_attr
self.key_builder = key_builder or (lambda key, f, *args, **kwargs: key)
self.skip_cache_func = skip_cache_func
self.ttl = ttl
self.cache = cache or SimpleMemoryCache()

def __call__(self, f):
@functools.wraps(f)
Expand Down
1 change: 0 additions & 1 deletion docs/v1_migration.rst
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,3 @@ The abstraction and factories around cache instantiation have been removed in fa
* The `aiocache.Cache` class has been removed. Instead, use the specific cache class directly. For example, use `aiocache.RedisCache` instead of `aiocache.Cache.REDIS`.
* Caches should be fully instantiated when passed to decorators, rather than being instantiated with a factory function.
* Cache aliases have been removed. Create an instance of the cache class directly instead.

12 changes: 6 additions & 6 deletions examples/alt_key_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ async def demo_decorator_key_builders():

async def demo_ignore_kwargs_decorator():
"""Cache key from positional arguments in call to decorated function"""
@cached(key_builder=ignore_kwargs)
@cached(cache=SimpleMemoryCache(), key_builder=ignore_kwargs)
async def fn(a, b=2, c=3):
return (a, b)

Expand All @@ -221,7 +221,7 @@ async def fn(a, b=2, c=3):

await fn(*args, **kwargs)
cache = fn.cache
decorator = cached(key_builder=ignore_kwargs)
decorator = cached(cache=SimpleMemoryCache(), key_builder=ignore_kwargs)
key = decorator.get_cache_key(fn, args=args, kwargs=kwargs)
exists = await cache.exists(key)
assert exists is True
Expand All @@ -241,7 +241,7 @@ async def fn(a, b=2, c=3):

async def demo_module_override_decorator():
"""Cache key uses custom module name for decorated function"""
@cached(key_builder=module_override)
@cached(cache=SimpleMemoryCache(), key_builder=module_override)
async def fn(a, b=2, c=3):
return (a, b)

Expand All @@ -252,7 +252,7 @@ async def fn(a, b=2, c=3):

await fn(*args, **kwargs)
cache = fn.cache
decorator = cached(key_builder=module_override)
decorator = cached(cache=SimpleMemoryCache, key_builder=module_override)
key = decorator.get_cache_key(fn, args=args, kwargs=kwargs)
exists = await cache.exists(key)
assert exists is True
Expand All @@ -264,7 +264,7 @@ async def fn(a, b=2, c=3):

async def demo_structured_key_decorator():
"""Cache key expresses structure of decorated function call"""
@cached(key_builder=structured_key)
@cached(cache=SimpleMemoryCache(), key_builder=structured_key)
async def fn(a, b=2, c=3):
return (a, b)

Expand All @@ -278,7 +278,7 @@ async def fn(a, b=2, c=3):

await fn(*args, **kwargs)
cache = fn.cache
decorator = cached(key_builder=structured_key)
decorator = cached(cache=SimpleMemoryCache(), key_builder=structured_key)
key = decorator.get_cache_key(fn, args=args, kwargs=kwargs)
exists = await cache.exists(key)
assert exists is True
Expand Down
2 changes: 1 addition & 1 deletion examples/cached_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
cache = RedisCache(namespace="main", client=redis.Redis(), serializer=PickleSerializer())

@cached(
ttl=10, cache=cache, key_builder=lambda *args, **kw: "key")
cache, ttl=10, key_builder=lambda *args, **kw: "key")
async def cached_call():
return Result("content", 200)

Expand Down
7 changes: 4 additions & 3 deletions examples/frameworks/aiohttp_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,12 @@
import logging
from datetime import datetime
from aiohttp import web
from aiocache import cached
from aiocache import cached, SimpleMemoryCache
from aiocache.serializers import JsonSerializer

cache = SimpleMemoryCache(serializer=JsonSerializer())

@cached(key="function_key", serializer=JsonSerializer())
@cached(cache=cache, key_builder=lambda x: "time")
async def time():
return {"time": datetime.now().isoformat()}

Expand Down Expand Up @@ -38,7 +39,7 @@ async def get_from_cache(self, key):
return None


@CachedOverride(key="route_key", serializer=JsonSerializer())
@CachedOverride(cache=cache, key_builder="route")
async def handle2(request):
return web.json_response(await asyncio.sleep(3))

Expand Down
6 changes: 3 additions & 3 deletions examples/frameworks/sanic_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,21 +10,21 @@
from sanic import Sanic
from sanic.response import json
from sanic.log import logger
from aiocache import cached, Cache
from aiocache import cached, SimpleMemoryCache
from aiocache.serializers import JsonSerializer

app = Sanic(__name__)


@cached(key="my_custom_key", serializer=JsonSerializer())
@cached(SimpleMemoryCache(), key_builder = lambda x :"my_custom_key")
async def expensive_call():
logger.info("Expensive has been called")
await asyncio.sleep(3)
return {"test": True}


async def reuse_data():
cache = Cache(serializer=JsonSerializer()) # Not ideal to define here
cache = SimpleMemoryCache(serializer=JsonSerializer()) # Not ideal to define here
data = await cache.get("my_custom_key") # Note the key is defined in `cached` decorator
return data

Expand Down
6 changes: 3 additions & 3 deletions examples/frameworks/tornado_example.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
import tornado.web
import tornado.ioloop
from datetime import datetime
from aiocache import cached
from aiocache import cached, SimpleMemoryCache
from aiocache.serializers import JsonSerializer


class MainHandler(tornado.web.RequestHandler):

# Due some incompatibilities between tornado and asyncio, caches can't use the "timeout" feature
# Due some incompatibilities between tornado and asyncio, caches can't use the "ttl" feature
# in order to make it work, you will have to specify it always to 0
@cached(key="my_custom_key", serializer=JsonSerializer(), timeout=0)
@cached(SimpleMemoryCache(serializer=JsonSerializer, timeout=0), key_builder= lambda x : "my_custom_key")
async def time(self):
return {"time": datetime.now().isoformat()}

Expand Down
4 changes: 2 additions & 2 deletions examples/multicached_decorator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@
cache = RedisCache(namespace="main", client=redis.Redis())


@multi_cached("ids", cache=cache)
@multi_cached(cache=cache, keys_from_attr="ids")
async def multi_cached_ids(ids=None):
return {id_: DICT[id_] for id_ in ids}


@multi_cached("keys", cache=cache)
@multi_cached(cache=cache, keys_from_attr="keys")
async def multi_cached_keys(keys=None):
return {id_: DICT[id_] for id_ in keys}

Expand Down
14 changes: 7 additions & 7 deletions tests/acceptance/test_decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ async def sk_func(x):
res = await sk_func(arg)
assert res

key = decorator().get_cache_key(sk_func, args=(1,), kwargs={})
key = decorator(cache=cache).get_cache_key(sk_func, args=(1,), kwargs={})

assert key
assert await cache.exists(key)
Expand All @@ -64,7 +64,7 @@ async def sk_func(x):

await sk_func(arg)

key = decorator().get_cache_key(sk_func, args=(-1,), kwargs={})
key = decorator(cache=cache).get_cache_key(sk_func, args=(-1,), kwargs={})

assert key
assert not await cache.exists(key)
Expand Down Expand Up @@ -137,7 +137,7 @@ async def cancel_task():

class TestMultiCachedDecorator:
async def test_multi_cached(self, cache):
multi_cached_decorator = multi_cached("keys", cache=cache)
multi_cached_decorator = multi_cached(cache, keys_from_attr="keys")

default_keys = {Keys.KEY, Keys.KEY_1}
await multi_cached_decorator(return_dict)(keys=default_keys)
Expand All @@ -146,7 +146,7 @@ async def test_multi_cached(self, cache):
assert await cache.get(key) is not None

async def test_keys_without_kwarg(self, cache):
@multi_cached("keys", cache=cache)
@multi_cached(cache, keys_from_attr="keys")
async def fn(keys):
return {Keys.KEY: 1}

Expand All @@ -166,7 +166,7 @@ async def fn(self, keys, market="ES"):
assert await cache.exists("fn_" + ensure_key(Keys.KEY_1) + "_ES") is True

async def test_multi_cached_skip_keys(self, cache):
@multi_cached(keys_from_attr="keys", cache=cache, skip_cache_func=lambda _, v: v is None)
@multi_cached(cache, keys_from_attr="keys", skip_cache_func=lambda _, v: v is None)
async def multi_sk_fn(keys, values):
return {k: v for k, v in zip(keys, values)}

Expand All @@ -179,7 +179,7 @@ async def multi_sk_fn(keys, values):
assert not await cache.exists(Keys.KEY_1)

async def test_fn_with_args(self, cache):
@multi_cached("keys", cache=cache)
@multi_cached(cache, keys_from_attr="keys")
async def fn(keys, *args):
assert len(args) == 1
return {Keys.KEY: 1}
Expand All @@ -195,7 +195,7 @@ async def wrapper(*args, **kwargs):
return wrapper

@dummy_d
@multi_cached("keys", cache=cache)
@multi_cached(cache, keys_from_attr="keys")
async def fn(keys):
return {Keys.KEY: 1}

Expand Down
4 changes: 2 additions & 2 deletions tests/performance/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ def __init__(self, backend: str):
from aiocache.backends.memcached import MemcachedCache
cache = MemcachedCache()
elif backend == "memory":
from aiocache.backends.memory import MemoryCache
cache = MemoryCache()
from aiocache.backends.memory import SimpleMemoryCache
cache = SimpleMemoryCache()
else:
raise ValueError("Invalid backend")
self.cache = cache
Expand Down
8 changes: 0 additions & 8 deletions tests/ut/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,17 +4,9 @@
import pytest

from aiocache.plugins import BasePlugin
from aiocache.serializers import NullSerializer
from ..utils import AbstractBaseCache, ConcreteBaseCache


@pytest.fixture()
def simple_memory_cache():
from aiocache.backends.memory import SimpleMemoryCache

return SimpleMemoryCache(serializer=NullSerializer())


@pytest.fixture
def mock_cache(mocker):
return create_autospec(ConcreteBaseCache())
Expand Down
46 changes: 4 additions & 42 deletions tests/ut/test_decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,20 +187,6 @@ async def what():

assert mock_cache.get.call_count == 2

async def test_cache_per_function_by_default(self):
"""Tests that by default, each function has its own SimpleMemoryCache instance."""
@cached()
async def foo():
"""First function."""

@cached()
async def bar():
"""Second function."""

assert isinstance(foo.cache, SimpleMemoryCache)
assert isinstance(bar.cache, SimpleMemoryCache)
assert foo.cache != bar.cache


class TestCachedStampede:
@pytest.fixture
Expand All @@ -216,8 +202,8 @@ def spy_stub(self, mocker):
module = sys.modules[globals()["__name__"]]
mocker.spy(module, "stub")

def test_inheritance(self):
assert isinstance(cached_stampede(), cached)
def test_inheritance(self, mock_cache):
assert isinstance(cached_stampede(mock_cache), cached)

def test_init(self):
cache = SimpleMemoryCache()
Expand Down Expand Up @@ -491,32 +477,8 @@ async def what(self, keys=None, what=1):
assert str(inspect.signature(what)) == "(self, keys=None, what=1)"
assert inspect.getfullargspec(what.__wrapped__).args == ["self", "keys", "what"]

async def test_reuses_cache_instance(self, mock_cache):
# TODO @review can probably just remove this test.
mock_cache.multi_get.return_value = [None]

@multi_cached("keys", cache=mock_cache)
async def what(keys=None):
return {}

await what(keys=["a"])
await what(keys=["a"])

assert mock_cache.multi_get.call_count == 2

async def test_cache_per_function(self):
@multi_cached("keys")
async def foo():
"""First function."""

@multi_cached("keys")
async def bar():
"""Second function."""

assert foo.cache != bar.cache

async def test_key_builder(self):
@multi_cached("keys", key_builder=lambda key, _, keys: key + 1)
async def test_key_builder(self, mock_cache):
@multi_cached(mock_cache, keys_from_attr="keys", key_builder=lambda key, _, keys: key + 1)
async def f(keys=None):
return {k: k * 3 for k in keys}

Expand Down

0 comments on commit 97a732a

Please sign in to comment.