diff --git a/.coveragerc b/.coveragerc index b9823a19..e1b64a65 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,3 +1,10 @@ [run] branch = True -concurrency = multiprocessing +source = aiocache, tests + +[report] +exclude_also = + if TYPE_CHECKING + assert False + : \.\.\.(\s*#.*)?$ + ^ +\.\.\.$ diff --git a/.flake8 b/.flake8 index 50ce4aaa..9d1c63c2 100644 --- a/.flake8 +++ b/.flake8 @@ -19,4 +19,5 @@ import-order-style = pycharm # flake8-quotes inline-quotes = " # flake8-requirements +known-modules = valkey-glide:[glide] requirements-file = requirements-dev.txt diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2e43b8d7..43ff0403 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -51,26 +51,26 @@ jobs: matrix: os: [ubuntu] pyver: ['3.9', '3.10', '3.11', '3.12', '3.13'] - redis: ['latest'] + valkey: ['latest'] ujson: [''] include: - os: ubuntu pyver: pypy-3.9 - redis: 'latest' + valkey: 'latest' - os: ubuntu pyver: '3.9' - redis: '5.0.14' + valkey: '7.2.8' - os: ubuntu pyver: '3.9' - redis: 'latest' + valkey: 'latest' ujson: 'ujson' services: - redis: - image: redis:${{ matrix.redis }} + valkey: + image: valkey/valkey:${{ matrix.valkey }} ports: - 6379:6379 options: >- - --health-cmd "redis-cli ping" + --health-cmd "valkey-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5 @@ -104,12 +104,12 @@ jobs: - name: Run functional tests run: bash examples/run_all.sh - name: Uninstall optional backends - run: pip uninstall -y aiomcache redis + run: pip uninstall -y aiomcache valkey-glide - name: Run unittests with minimal backend set env: COLOR: 'yes' run: | - pytest --cov-report xml --cov-report html --cov-append tests/acceptance tests/ut -m "not memcached and not redis" --ignore "tests/ut/backends/test_memcached.py" --ignore "tests/ut/backends/test_redis.py" + pytest --cov-report xml --cov-report html --cov-append tests/acceptance tests/ut -m "not memcached and not valkey" --ignore "tests/ut/backends/test_memcached.py" --ignore "tests/ut/backends/test_valkey.py" - name: Produce coverage report run: python -m coverage xml - name: Upload coverage diff --git a/Makefile b/Makefile index c2f83481..2eb6b403 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ lint: flake8 tests/ aiocache/ install-dev: - pip install -e .[redis,memcached,msgpack,dev] + pip install -e .[valkey,memcached,msgpack,dev] pylint: pylint --disable=C0111 aiocache diff --git a/aiocache/__init__.py b/aiocache/__init__.py index 4b5abe2f..6e8715d0 100644 --- a/aiocache/__init__.py +++ b/aiocache/__init__.py @@ -11,14 +11,14 @@ _AIOCACHE_CACHES: list[Type[BaseCache[Any]]] = [SimpleMemoryCache] try: - import redis + import glide except ImportError: - logger.debug("redis not installed, RedisCache unavailable") + logger.debug("glide not installed, ValkeyCache unavailable") else: - from aiocache.backends.redis import RedisCache + from aiocache.backends.valkey import ValkeyCache - _AIOCACHE_CACHES.append(RedisCache) - del redis + _AIOCACHE_CACHES.append(ValkeyCache) + del glide try: import aiomcache diff --git a/aiocache/backends/redis.py b/aiocache/backends/redis.py deleted file mode 100644 index 22f175ed..00000000 --- a/aiocache/backends/redis.py +++ /dev/null @@ -1,209 +0,0 @@ -import itertools -from typing import Any, Callable, Optional, TYPE_CHECKING - -import redis.asyncio as redis -from redis.exceptions import ResponseError as IncrbyException - -from aiocache.base import BaseCache -from aiocache.serializers import JsonSerializer - -if TYPE_CHECKING: # pragma: no cover - from aiocache.serializers import BaseSerializer - - -class RedisBackend(BaseCache[str]): - RELEASE_SCRIPT = ( - "if redis.call('get',KEYS[1]) == ARGV[1] then" - " return redis.call('del',KEYS[1])" - " else" - " return 0" - " end" - ) - - CAS_SCRIPT = ( - "if redis.call('get',KEYS[1]) == ARGV[2] then" - " if #ARGV == 4 then" - " return redis.call('set', KEYS[1], ARGV[1], ARGV[3], ARGV[4])" - " else" - " return redis.call('set', KEYS[1], ARGV[1])" - " end" - " else" - " return 0" - " end" - ) - - def __init__( - self, - client: redis.Redis, - **kwargs, - ): - super().__init__(**kwargs) - - # NOTE: decoding can't be controlled on API level after switching to - # redis, we need to disable decoding on global/connection level - # (decode_responses=False), because some of the values are saved as - # bytes directly, like pickle serialized values, which may raise an - # exception when decoded with 'utf-8'. - if client.connection_pool.connection_kwargs['decode_responses']: - raise ValueError("redis client must be constructed with decode_responses set to False") - self.client = client - - async def _get(self, key, encoding="utf-8", _conn=None): - value = await self.client.get(key) - if encoding is None or value is None: - return value - return value.decode(encoding) - - async def _gets(self, key, encoding="utf-8", _conn=None): - return await self._get(key, encoding=encoding, _conn=_conn) - - async def _multi_get(self, keys, encoding="utf-8", _conn=None): - values = await self.client.mget(*keys) - if encoding is None: - return values - return [v if v is None else v.decode(encoding) for v in values] - - async def _set(self, key, value, ttl=None, _cas_token=None, _conn=None): - if _cas_token is not None: - return await self._cas(key, value, _cas_token, ttl=ttl, _conn=_conn) - if ttl is None: - return await self.client.set(key, value) - if isinstance(ttl, float): - ttl = int(ttl * 1000) - return await self.client.psetex(key, ttl, value) - return await self.client.setex(key, ttl, value) - - async def _cas(self, key, value, token, ttl=None, _conn=None): - args = () - if ttl is not None: - args = ("PX", int(ttl * 1000)) if isinstance(ttl, float) else ("EX", ttl) - return await self._raw("eval", self.CAS_SCRIPT, 1, key, value, token, *args, _conn=_conn) - - async def _multi_set(self, pairs, ttl=None, _conn=None): - ttl = ttl or 0 - - flattened = list(itertools.chain.from_iterable((key, value) for key, value in pairs)) - - if ttl: - await self.__multi_set_ttl(flattened, ttl) - else: - await self.client.execute_command("MSET", *flattened) - - return True - - async def __multi_set_ttl(self, flattened, ttl): - async with self.client.pipeline(transaction=True) as p: - p.execute_command("MSET", *flattened) - ttl, exp = (int(ttl * 1000), p.pexpire) if isinstance(ttl, float) else (ttl, p.expire) - for key in flattened[::2]: - exp(key, time=ttl) - await p.execute() - - async def _add(self, key, value, ttl=None, _conn=None): - kwargs = {"nx": True} - if isinstance(ttl, float): - kwargs["px"] = int(ttl * 1000) - else: - kwargs["ex"] = ttl - was_set = await self.client.set(key, value, **kwargs) - if not was_set: - raise ValueError("Key {} already exists, use .set to update the value".format(key)) - return was_set - - async def _exists(self, key, _conn=None): - number = await self.client.exists(key) - return bool(number) - - async def _increment(self, key, delta, _conn=None): - try: - return await self.client.incrby(key, delta) - except IncrbyException: - raise TypeError("Value is not an integer") from None - - async def _expire(self, key, ttl, _conn=None): - if ttl == 0: - return await self.client.persist(key) - return await self.client.expire(key, ttl) - - async def _delete(self, key, _conn=None): - return await self.client.delete(key) - - async def _clear(self, namespace=None, _conn=None): - if namespace: - keys = await self.client.keys("{}:*".format(namespace)) - if keys: - await self.client.delete(*keys) - else: - await self.client.flushdb() - return True - - async def _raw(self, command, *args, encoding="utf-8", _conn=None, **kwargs): - value = await getattr(self.client, command)(*args, **kwargs) - if encoding is not None: - if command == "get" and value is not None: - value = value.decode(encoding) - elif command in {"keys", "mget"}: - value = [v if v is None else v.decode(encoding) for v in value] - return value - - async def _redlock_release(self, key, value): - return await self._raw("eval", self.RELEASE_SCRIPT, 1, key, value) - - def build_key(self, key: str, namespace: Optional[str] = None) -> str: - return self._str_build_key(key, namespace) - - -class RedisCache(RedisBackend): - """ - Redis cache implementation with the following components as defaults: - - serializer: :class:`aiocache.serializers.JsonSerializer` - - plugins: [] - - Config options are: - - :param serializer: obj derived from :class:`aiocache.serializers.BaseSerializer`. - :param plugins: list of :class:`aiocache.plugins.BasePlugin` derived classes. - :param namespace: string to use as default prefix for the key used in all operations of - the backend. Default is an empty string, "". - :param timeout: int or float in seconds specifying maximum timeout for the operations to last. - By default its 5. - :param client: redis.Redis which is an active client for working with redis - """ - - NAME = "redis" - - def __init__( - self, - client: redis.Redis, - serializer: Optional["BaseSerializer"] = None, - namespace: str = "", - key_builder: Callable[[str, str], str] = lambda k, ns: f"{ns}:{k}" if ns else k, - **kwargs: Any, - ): - super().__init__( - client=client, - serializer=serializer or JsonSerializer(), - namespace=namespace, - key_builder=key_builder, - **kwargs, - ) - - @classmethod - def parse_uri_path(cls, path): - """ - Given a uri path, return the Redis specific configuration - options in that path string according to iana definition - http://www.iana.org/assignments/uri-schemes/prov/redis - - :param path: string containing the path. Example: "/0" - :return: mapping containing the options. Example: {"db": "0"} - """ - options = {} - db, *_ = path[1:].split("/") - if db: - options["db"] = db - return options - - def __repr__(self): # pragma: no cover - connection_kwargs = self.client.connection_pool.connection_kwargs - return "RedisCache ({}:{})".format(connection_kwargs['host'], connection_kwargs['port']) diff --git a/aiocache/backends/valkey.py b/aiocache/backends/valkey.py new file mode 100644 index 00000000..b8560acf --- /dev/null +++ b/aiocache/backends/valkey.py @@ -0,0 +1,202 @@ +import logging +import sys +from typing import Any, Callable, Optional + +from glide import ( + ConditionalChange, + ExpirySet, + ExpiryType, + GlideClient, + GlideClientConfiguration, + Transaction, +) +from glide.exceptions import RequestError as IncrbyException + +from aiocache.base import BaseCache +from aiocache.serializers import BaseSerializer, JsonSerializer + +if sys.version_info >= (3, 11): + from typing import Self +else: + from typing import Any as Self + + +logger = logging.getLogger(__name__) + + +class ValkeyBackend(BaseCache[str]): + def __init__(self, config: GlideClientConfiguration, **kwargs): + self.config = config + super().__init__(**kwargs) + + async def __aenter__(self) -> Self: + self.client = await GlideClient.create(self.config) + return self + + async def __aexit__(self, *args, **kwargs) -> None: + await self.client.close() + + async def _get(self, key, encoding="utf-8", _conn=None): + value = await self.client.get(key) + if encoding is None or value is None: + return value + return value.decode(encoding) + + _gets = _get + + async def _multi_get(self, keys, encoding="utf-8", _conn=None): + values = await self.client.mget(keys) + if encoding is None: + return values + return [v if v is None else v.decode(encoding) for v in values] + + async def _set(self, key, value, ttl=None, _cas_token=None, _conn=None): + if isinstance(ttl, float): + ttl = ExpirySet(ExpiryType.MILLSEC, int(ttl * 1000)) + elif ttl: + ttl = ExpirySet(ExpiryType.SEC, ttl) + + if _cas_token is not None: + return await self._cas(key, value, _cas_token, ttl=ttl, _conn=_conn) + + return await self.client.set(key, value, expiry=ttl) == "OK" + + async def _cas(self, key, value, token, ttl=None, _conn=None): + if await self._get(key) == token: + return await self.client.set(key, value, expiry=ttl) == "OK" + return 0 + + async def _multi_set(self, pairs, ttl=None, _conn=None): + values = dict(pairs) + + if ttl: + await self.__multi_set_ttl(values, ttl) + else: + await self.client.mset(values) + + return True + + async def __multi_set_ttl(self, values, ttl): + transaction = Transaction() + transaction.mset(values) + ttl, exp = ( + (int(ttl * 1000), transaction.pexpire) + if isinstance(ttl, float) + else (ttl, transaction.expire) + ) + for key in values: + exp(key, ttl) + await self.client.exec(transaction) + + async def _add(self, key, value, ttl=None, _conn=None): + kwargs = {"conditional_set": ConditionalChange.ONLY_IF_DOES_NOT_EXIST} + if isinstance(ttl, float): + kwargs["expiry"] = ExpirySet(ExpiryType.MILLSEC, int(ttl * 1000)) + elif ttl: + kwargs["expiry"] = ExpirySet(ExpiryType.SEC, ttl) + was_set = await self.client.set(key, value, **kwargs) + if was_set != "OK": + raise ValueError( + "Key {} already exists, use .set to update the value".format(key) + ) + return was_set + + async def _exists(self, key, _conn=None): + return bool(await self.client.exists([key])) + + async def _increment(self, key, delta, _conn=None): + try: + return await self.client.incrby(key, delta) + except IncrbyException: + raise TypeError("Value is not an integer") from None + + async def _expire(self, key, ttl, _conn=None): + if ttl == 0: + return await self.client.persist(key) + return await self.client.expire(key, ttl) + + async def _delete(self, key, _conn=None): + return await self.client.delete([key]) + + async def _clear(self, namespace=None, _conn=None): + if not namespace: + return await self.client.flushdb() + + _, keys = await self.client.scan(b"0", "{}:*".format(namespace)) + if keys: + return bool(await self.client.delete(keys)) + + return True + + async def _raw(self, command, *args, encoding="utf-8", _conn=None, **kwargs): + value = await getattr(self.client, command)(*args, **kwargs) + if encoding is not None: + if command == "get" and value is not None: + value = value.decode(encoding) + return value + + async def _redlock_release(self, key, value): + if await self._get(key) == value: + return await self.client.delete([key]) + return 0 + + def build_key(self, key: str, namespace: Optional[str] = None) -> str: + return self._str_build_key(key, namespace) + + +class ValkeyCache(ValkeyBackend): + """ + Valkey cache implementation with the following components as defaults: + - serializer: :class:`aiocache.serializers.JsonSerializer` + - plugins: [] + + Config options are: + + :param serializer: obj derived from :class:`aiocache.serializers.BaseSerializer`. + :param plugins: list of :class:`aiocache.plugins.BasePlugin` derived classes. + :param namespace: string to use as default prefix for the key used in all operations of + the backend. Default is an empty string, "". + :param timeout: int or float in seconds specifying maximum timeout for the operations to last. + By default its 5. + :param client: glide.GlideClient which is an active client for working with valkey + """ + + NAME = "valkey" + + def __init__( + self, + config: GlideClientConfiguration, + serializer: Optional[BaseSerializer] = None, + namespace: str = "", + key_builder: Callable[[str, str], str] = lambda k, ns: f"{ns}:{k}" if ns else k, + **kwargs: Any, + ): + super().__init__( + config, + serializer=serializer or JsonSerializer(), + namespace=namespace, + key_builder=key_builder, + **kwargs, + ) + + @classmethod + def parse_uri_path(cls, path): + """ + Given a uri path, return the Valkey specific configuration + options in that path string according to iana definition + http://www.iana.org/assignments/uri-schemes/prov/redis + + :param path: string containing the path. Example: "/0" + :return: mapping containing the options. Example: {"db": "0"} + """ + options = {} + db, *_ = path[1:].split("/") + if db: + options["db"] = db + return options + + def __repr__(self): # pragma: no cover + return ( + f"ValkeyCache ({self.client.config.addresses[0].host}" + f":{self.client.config.addresses[0].port})" + ) diff --git a/aiocache/base.py b/aiocache/base.py index f64edeb6..8aed6570 100644 --- a/aiocache/base.py +++ b/aiocache/base.py @@ -161,7 +161,7 @@ async def add(self, key, value, ttl=SENTINEL, dumps_fn=None, namespace=None, _co :param value: obj :param ttl: int the expiration time in seconds. Due to memcached restrictions if you want compatibility use int. In case you - need miliseconds, redis and memory support float ttls + need miliseconds, valkey and memory support float ttls :param dumps_fn: callable alternative to use as dumps function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout @@ -271,7 +271,7 @@ async def set( :param value: obj :param ttl: int the expiration time in seconds. Due to memcached restrictions if you want compatibility use int. In case you - need miliseconds, redis and memory support float ttls + need miliseconds, valkey and memory support float ttls :param dumps_fn: callable alternative to use as dumps function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout @@ -305,7 +305,7 @@ async def multi_set(self, pairs, ttl=SENTINEL, dumps_fn=None, namespace=None, _c :param pairs: list of two element iterables. First is key and second is value :param ttl: int the expiration time in seconds. Due to memcached restrictions if you want compatibility use int. In case you - need miliseconds, redis and memory support float ttls + need miliseconds, valkey and memory support float ttls :param dumps_fn: callable alternative to use as dumps function :param namespace: str alternative namespace to use :param timeout: int or float in seconds specifying maximum timeout diff --git a/aiocache/decorators.py b/aiocache/decorators.py index d2c41b24..3322afce 100644 --- a/aiocache/decorators.py +++ b/aiocache/decorators.py @@ -112,7 +112,7 @@ class cached_stampede(cached): :param cache: cache instance to use when calling the ``set``/``get`` operations. Default is :class:`aiocache.SimpleMemoryCache`. :param lease: int seconds to lock function call to avoid cache stampede effects. - If 0 or None, no locking happens (default is 2). redis and memory backends support + If 0 or None, no locking happens (default is 2). valkey and memory backends support float ttls :param ttl: int seconds to store the function call. Default is None which means no expiration. :param key_from_attr: str arg or kwarg name from the function to use as a key. diff --git a/aiocache/lock.py b/aiocache/lock.py index 34e2299c..4e9912a7 100644 --- a/aiocache/lock.py +++ b/aiocache/lock.py @@ -7,7 +7,7 @@ class RedLock(Generic[CacheKeyType]): """ - Implementation of `Redlock `_ + Implementation of `Redlock `_ with a single instance because aiocache is focused on single instance cache. @@ -27,7 +27,7 @@ class RedLock(Generic[CacheKeyType]): Backend specific implementation: - - Redis implements correctly the redlock algorithm. It sets + - Valkey implements correctly the redlock algorithm. It sets the key if it doesn't exist. To release, it checks the value is the same as the instance trying to release and if it is, it removes the lock. If not it will do nothing @@ -43,10 +43,14 @@ class RedLock(Generic[CacheKeyType]): Example usage:: - from aiocache import Cache + from aiocache import ValkeyCache from aiocache.lock import RedLock + from glide import GlideClientConfiguration, NodeAddress + + addresses = [NodeAddress("localhost", 6379)] + conf = GlideClientConfiguration(addresses=addresses, database_id=0) + cache = ValkeyCache(conf) - cache = Cache(Cache.REDIS) async with RedLock(cache, 'key', lease=1): # Calls will wait here result = await cache.get('key') if result is not None: @@ -62,7 +66,9 @@ class RedLock(Generic[CacheKeyType]): _EVENTS: Dict[str, asyncio.Event] = {} - def __init__(self, client: BaseCache[CacheKeyType], key: str, lease: Union[int, float]): + def __init__( + self, client: BaseCache[CacheKeyType], key: str, lease: Union[int, float] + ): self.client = client self.key = self.client.build_key(key + "-lock") self.lease = lease @@ -110,8 +116,12 @@ class OptimisticLock(Generic[CacheKeyType]): the one we retrieved when the lock started. Example usage:: + from aiocache import ValkeyCache + from glide import GlideClientConfiguration, NodeAddress - cache = Cache(Cache.REDIS) + addresses = [NodeAddress("localhost", 6379)] + conf = GlideClientConfiguration(addresses=addresses, database_id=0) + cache = ValkeyCache(conf) # The value stored in 'key' will be checked here async with OptimisticLock(cache, 'key') as lock: @@ -122,7 +132,7 @@ class OptimisticLock(Generic[CacheKeyType]): an :class:`aiocache.lock.OptimisticLockError` will be raised. A way to make the same call crash would be to change the value inside the lock like:: - cache = Cache(Cache.REDIS) + cache = ValkeyCache(client) # The value stored in 'key' will be checked here async with OptimisticLock(cache, 'key') as lock: @@ -157,7 +167,9 @@ async def cas(self, value: Any, **kwargs: Any) -> bool: :raises: :class:`aiocache.lock.OptimisticLockError` """ - success = await self.client.set(self.key, value, _cas_token=self._token, **kwargs) + success = await self.client.set( + self.key, value, _cas_token=self._token, **kwargs + ) if not success: raise OptimisticLockError("Value has changed since the lock started") return True diff --git a/docker-compose.yml b/docker-compose.yml index 9376ef8b..b81d6634 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ version: '2' services: - redis: - image: redis + valkey: + image: valkey/valkey ports: - "6379:6379" memcached: diff --git a/examples/cached_decorator.py b/examples/cached_decorator.py index f5b9f02f..4d8a619b 100644 --- a/examples/cached_decorator.py +++ b/examples/cached_decorator.py @@ -1,15 +1,17 @@ import asyncio - from collections import namedtuple -import redis.asyncio as redis + +from glide import GlideClientConfiguration, NodeAddress from aiocache import cached -from aiocache import RedisCache +from aiocache import ValkeyCache from aiocache.serializers import PickleSerializer -Result = namedtuple('Result', "content, status") +Result = namedtuple("Result", "content, status") -cache = RedisCache(namespace="main", client=redis.Redis(), serializer=PickleSerializer()) +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) +cache = ValkeyCache(config=config, namespace="main", serializer=PickleSerializer()) @cached(cache, ttl=10, key_builder=lambda *args, **kw: "key") diff --git a/examples/multicached_decorator.py b/examples/multicached_decorator.py index 5fd83494..8b097a07 100644 --- a/examples/multicached_decorator.py +++ b/examples/multicached_decorator.py @@ -1,18 +1,15 @@ import asyncio -import redis.asyncio as redis +from glide import GlideClientConfiguration, NodeAddress from aiocache import multi_cached -from aiocache import RedisCache +from aiocache import ValkeyCache -DICT = { - 'a': "Z", - 'b': "Y", - 'c': "X", - 'd': "W" -} +DICT = {"a": "Z", "b": "Y", "c": "X", "d": "W"} -cache = RedisCache(namespace="main", client=redis.Redis()) +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) +cache = ValkeyCache(config=config, namespace="main") @multi_cached(cache, keys_from_attr="ids") @@ -26,20 +23,20 @@ async def multi_cached_keys(keys=None): async def test_multi_cached(): - await multi_cached_ids(ids=("a", "b")) - await multi_cached_ids(ids=("a", "c")) - await multi_cached_keys(keys=("d",)) - - assert await cache.exists("a") - assert await cache.exists("b") - assert await cache.exists("c") - assert await cache.exists("d") - - await cache.delete("a") - await cache.delete("b") - await cache.delete("c") - await cache.delete("d") - await cache.close() + async with cache: + await multi_cached_ids(ids=("a", "b")) + await multi_cached_ids(ids=("a", "c")) + await multi_cached_keys(keys=("d",)) + + assert await cache.exists("a") + assert await cache.exists("b") + assert await cache.exists("c") + assert await cache.exists("d") + + await cache.delete("a") + await cache.delete("b") + await cache.delete("c") + await cache.delete("d") if __name__ == "__main__": diff --git a/examples/optimistic_lock.py b/examples/optimistic_lock.py index 8b62f917..fdcc4108 100644 --- a/examples/optimistic_lock.py +++ b/examples/optimistic_lock.py @@ -2,45 +2,47 @@ import logging import random -import redis.asyncio as redis -from aiocache import RedisCache +from glide import GlideClientConfiguration, NodeAddress + +from aiocache import ValkeyCache from aiocache.lock import OptimisticLock, OptimisticLockError logger = logging.getLogger(__name__) -cache = RedisCache(namespace="main", client=redis.Redis()) +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) async def expensive_function(): - logger.warning('Expensive is being executed...') + logger.warning("Expensive is being executed...") await asyncio.sleep(random.uniform(0, 2)) - return 'result' - + return "result" -async def my_view(): - async with OptimisticLock(cache, 'key') as lock: +async def my_view(cache): + async with OptimisticLock(cache, "key") as lock: result = await expensive_function() try: await lock.cas(result) except OptimisticLockError: logger.warning( - 'I failed setting the value because it is different since the lock started!') + "I failed setting the value because it is different since the lock started!" + ) return result -async def concurrent(): - await cache.set('key', 'initial_value') +async def concurrent(cache): + await cache.set("key", "initial_value") # All three calls will read 'initial_value' as the value to check and only # the first one finishing will succeed because the others, when trying to set # the value, will see that the value is not the same as when the lock started - await asyncio.gather(my_view(), my_view(), my_view()) + await asyncio.gather(my_view(cache), my_view(cache), my_view(cache)) async def test_redis(): - await concurrent() - await cache.delete("key") - await cache.close() + async with ValkeyCache(config, namespace="main") as cache: + await concurrent(cache) + await cache.delete("key") -if __name__ == '__main__': +if __name__ == "__main__": asyncio.run(test_redis()) diff --git a/examples/python_object.py b/examples/python_object.py index 881b69c4..64ae884d 100644 --- a/examples/python_object.py +++ b/examples/python_object.py @@ -1,17 +1,17 @@ import asyncio - from collections import namedtuple -import redis.asyncio as redis +from glide import GlideClientConfiguration, NodeAddress -from aiocache import RedisCache +from aiocache import ValkeyCache from aiocache.serializers import PickleSerializer MyObject = namedtuple("MyObject", ["x", "y"]) -cache = RedisCache(serializer=PickleSerializer(), namespace="main", client=redis.Redis()) +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) -async def complex_object(): +async def complex_object(cache): obj = MyObject(x=1, y=2) await cache.set("key", obj) my_object = await cache.get("key") @@ -21,9 +21,12 @@ async def complex_object(): async def test_python_object(): - await complex_object() - await cache.delete("key") - await cache.close() + async with ValkeyCache( + config, namespace="main", serializer=PickleSerializer() + ) as cache: + await complex_object(cache) + await cache.delete("key") + await cache.close() if __name__ == "__main__": diff --git a/examples/redlock.py b/examples/redlock.py index 51cbbd73..f4c78254 100644 --- a/examples/redlock.py +++ b/examples/redlock.py @@ -1,43 +1,44 @@ import asyncio import logging -import redis.asyncio as redis +from glide import GlideClientConfiguration, NodeAddress -from aiocache import RedisCache +from aiocache import ValkeyCache from aiocache.lock import RedLock logger = logging.getLogger(__name__) -cache = RedisCache(namespace="main", client=redis.Redis()) +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) async def expensive_function(): - logger.warning('Expensive is being executed...') + logger.warning("Expensive is being executed...") await asyncio.sleep(1) - return 'result' + return "result" -async def my_view(): - - async with RedLock(cache, 'key', lease=2): # Wait at most 2 seconds - result = await cache.get('key') +async def my_view(cache): + async with RedLock(cache, "key", lease=2): # Wait at most 2 seconds + result = await cache.get("key") if result is not None: - logger.info('Found the value in the cache hurray!') + logger.info("Found the value in the cache hurray!") return result result = await expensive_function() - await cache.set('key', result) + await cache.set("key", result) return result -async def concurrent(): - await asyncio.gather(my_view(), my_view(), my_view()) +async def concurrent(cache): + await asyncio.gather(my_view(cache), my_view(cache), my_view(cache)) async def test_redis(): - await concurrent() - await cache.delete("key") - await cache.close() + async with ValkeyCache(config, namespace="main") as cache: + await concurrent(cache) + await cache.delete("key") + await cache.close() -if __name__ == '__main__': +if __name__ == "__main__": asyncio.run(test_redis()) diff --git a/examples/serializer_class.py b/examples/serializer_class.py index 2c25ff60..57dfeb94 100644 --- a/examples/serializer_class.py +++ b/examples/serializer_class.py @@ -1,11 +1,14 @@ import asyncio import zlib -import redis.asyncio as redis +from glide import GlideClientConfiguration, NodeAddress -from aiocache import RedisCache +from aiocache import ValkeyCache from aiocache.serializers import BaseSerializer +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) + class CompressionSerializer(BaseSerializer): @@ -27,17 +30,15 @@ def loads(self, value): return decompressed -cache = RedisCache(serializer=CompressionSerializer(), namespace="main", client=redis.Redis()) - - -async def serializer(): +async def serializer(cache): text = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt" "ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation" "ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in" "reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur" "sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit" - "anim id est laborum.") + "anim id est laborum." + ) await cache.set("key", text) print("-----------------------------------") real_value = await cache.get("key") @@ -46,9 +47,12 @@ async def serializer(): async def test_serializer(): - await serializer() - await cache.delete("key") - await cache.close() + async with ValkeyCache( + config, namespace="main", serializer=CompressionSerializer() + ) as cache: + await serializer(cache) + await cache.delete("key") + await cache.close() if __name__ == "__main__": diff --git a/examples/serializer_function.py b/examples/serializer_function.py index d85b3eb9..741dc54e 100644 --- a/examples/serializer_function.py +++ b/examples/serializer_function.py @@ -1,11 +1,15 @@ import asyncio import json -import redis.asyncio as redis +from glide import GlideClientConfiguration, NodeAddress from marshmallow import Schema, fields, post_load -from aiocache import RedisCache +from aiocache import ValkeyCache + + +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) class MyType: @@ -20,7 +24,7 @@ class MyTypeSchema(Schema): @post_load def build_object(self, data, **kwargs): - return MyType(data['x'], data['y']) + return MyType(data["x"], data["y"]) def dumps(value): @@ -31,10 +35,7 @@ def loads(value): return MyTypeSchema().loads(value) -cache = RedisCache(namespace="main", client=redis.Redis()) - - -async def serializer_function(): +async def serializer_function(cache): await cache.set("key", MyType(1, 2), dumps_fn=dumps) obj = await cache.get("key", loads_fn=loads) @@ -46,9 +47,9 @@ async def serializer_function(): async def test_serializer_function(): - await serializer_function() - await cache.delete("key") - await cache.close() + async with ValkeyCache(config, namespace="main") as cache: + await serializer_function(cache) + await cache.delete("key") if __name__ == "__main__": diff --git a/examples/simple_redis.py b/examples/simple_redis.py deleted file mode 100644 index 6d5553d8..00000000 --- a/examples/simple_redis.py +++ /dev/null @@ -1,28 +0,0 @@ -import asyncio - - -import redis.asyncio as redis - -from aiocache import RedisCache - -cache = RedisCache(namespace="main", client=redis.Redis()) - - -async def redis(): - await cache.set("key", "value") - await cache.set("expire_me", "value", ttl=10) - - assert await cache.get("key") == "value" - assert await cache.get("expire_me") == "value" - assert await cache.raw("ttl", "main:expire_me") > 0 - - -async def test_redis(): - await redis() - await cache.delete("key") - await cache.delete("expire_me") - await cache.close() - - -if __name__ == "__main__": - asyncio.run(test_redis()) diff --git a/examples/simple_valkey.py b/examples/simple_valkey.py new file mode 100644 index 00000000..7a7c0b90 --- /dev/null +++ b/examples/simple_valkey.py @@ -0,0 +1,29 @@ +import asyncio + +from glide import GlideClientConfiguration, NodeAddress + +from aiocache import ValkeyCache + +addresses = [NodeAddress("localhost", 6379)] +config = GlideClientConfiguration(addresses=addresses, database_id=0) + + +async def valkey(cache): + await cache.set("key", "value") + await cache.set("expire_me", "value", ttl=10) + + assert await cache.get("key") == "value" + assert await cache.get("expire_me") == "value" + assert await cache.raw("ttl", "main:expire_me") > 0 + + +async def test_valkey(): + async with ValkeyCache(config, namespace="main") as cache: + await valkey(cache) + await cache.delete("key") + await cache.delete("expire_me") + await cache.close() + + +if __name__ == "__main__": + asyncio.run(test_valkey()) diff --git a/requirements-dev.txt b/requirements-dev.txt index f674f68b..b36b73c9 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -6,6 +6,5 @@ flake8-bugbear==24.12.12 flake8-import-order==0.19.0 flake8-requirements==2.2.1 mypy==1.16.1; implementation_name=="cpython" -types-redis==4.6.0.20241004 types-ujson==5.10.0.20250326 aiocache-dynamodb==1.0.2 # used for documentation diff --git a/requirements.txt b/requirements.txt index 7f1294e2..6ecc224d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,4 +8,4 @@ pytest==8.4.0 pytest-asyncio==1.0.0 pytest-cov==6.2.1 pytest-mock==3.14.1 -redis==5.2.1 +valkey-glide==1.3.5 diff --git a/setup.cfg b/setup.cfg index 679fe00e..0c44b780 100644 --- a/setup.cfg +++ b/setup.cfg @@ -17,7 +17,7 @@ junit_family=xunit2 xfail_strict = true markers = memcached: tests requiring memcached backend - redis: tests requiring redis backend + valkey: tests requiring valkey backend [coverage:run] branch = True diff --git a/setup.py b/setup.py index 92024e00..15ec1e22 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ packages=("aiocache",), install_requires=None, extras_require={ - "redis": ["redis>=5"], + "valkey": ["valkey-glide>=1.3.3"], "memcached": ["aiomcache>=0.5.2"], "msgpack": ["msgpack>=0.5.5"], }, diff --git a/tests/acceptance/conftest.py b/tests/acceptance/conftest.py index 28224293..e692cbcd 100644 --- a/tests/acceptance/conftest.py +++ b/tests/acceptance/conftest.py @@ -6,9 +6,10 @@ @pytest.fixture -async def redis_cache(redis_client): - from aiocache.backends.redis import RedisCache - async with RedisCache(namespace="test", client=redis_client) as cache: +async def valkey_cache(valkey_config): + from aiocache.backends.valkey import ValkeyCache + + async with ValkeyCache(valkey_config, namespace="test") as cache: yield cache await asyncio.gather(*(cache.delete(k) for k in (*Keys, KEY_LOCK))) @@ -16,6 +17,7 @@ async def redis_cache(redis_client): @pytest.fixture async def memory_cache(): from aiocache.backends.memory import SimpleMemoryCache + async with SimpleMemoryCache(namespace="test") as cache: yield cache await asyncio.gather(*(cache.delete(k) for k in (*Keys, KEY_LOCK))) @@ -24,6 +26,7 @@ async def memory_cache(): @pytest.fixture async def memcached_cache(): from aiocache.backends.memcached import MemcachedCache + async with MemcachedCache(namespace="test") as cache: yield cache await asyncio.gather(*(cache.delete(k) for k in (*Keys, KEY_LOCK))) @@ -31,9 +34,10 @@ async def memcached_cache(): @pytest.fixture( params=( - pytest.param("redis_cache", marks=pytest.mark.redis), + pytest.param("valkey_cache", marks=pytest.mark.valkey), "memory_cache", pytest.param("memcached_cache", marks=pytest.mark.memcached), - )) + ) +) def cache(request): return request.getfixturevalue(request.param) diff --git a/tests/acceptance/test_base.py b/tests/acceptance/test_base.py index 1a9e6fc0..614bb0b0 100644 --- a/tests/acceptance/test_base.py +++ b/tests/acceptance/test_base.py @@ -4,6 +4,7 @@ from aiocache.backends.memory import SimpleMemoryCache from aiocache.base import _Conn +from aiocache.serializers import NullSerializer from ..utils import Keys @@ -213,40 +214,51 @@ async def test_close(self, memcached_cache): assert memcached_cache.client._pool._pool.qsize() == 0 -@pytest.mark.redis -class TestRedisCache: +@pytest.mark.valkey +class TestValkeyCache: async def test_accept_explicit_args(self): - from aiocache.backends.redis import RedisCache + from aiocache.backends.valkey import ValkeyCache with pytest.raises(TypeError): - RedisCache(random_attr="wtf") + ValkeyCache(random_attr="wtf") - async def test_float_ttl(self, redis_cache): - await redis_cache.set(Keys.KEY, "value", ttl=0.1) + async def test_float_ttl(self, valkey_cache): + await valkey_cache.set(Keys.KEY, "value", ttl=0.1) await asyncio.sleep(0.15) - assert await redis_cache.get(Keys.KEY) is None + assert await valkey_cache.get(Keys.KEY) is None - async def test_multi_set_float_ttl(self, redis_cache): + async def test_multi_set_float_ttl(self, valkey_cache): pairs = [(Keys.KEY, "value"), [Keys.KEY_1, "random_value"]] - assert await redis_cache.multi_set(pairs, ttl=0.1) is True + assert await valkey_cache.multi_set(pairs, ttl=0.1) is True await asyncio.sleep(0.15) - assert await redis_cache.multi_get([Keys.KEY, Keys.KEY_1]) == [None, None] + assert await valkey_cache.multi_get([Keys.KEY, Keys.KEY_1]) == [None, None] - async def test_raw(self, redis_cache): - await redis_cache.raw("set", "key", "value") - assert await redis_cache.raw("get", "key") == "value" - assert await redis_cache.raw("keys", "k*") == ["key"] + async def test_raw(self, valkey_cache): + await valkey_cache.raw("set", "key", "value") + assert await valkey_cache.raw("get", "key") == "value" + assert await valkey_cache.raw("scan", b"0", "k*") == [b"0", [b"key"]] # .raw() doesn't build key with namespace prefix, clear it manually - await redis_cache.raw("delete", "key") + await valkey_cache.raw("delete", "key") - async def test_clear_with_namespace_redis(self, redis_cache): - await redis_cache.set(Keys.KEY, "value", namespace="test") - await redis_cache.clear(namespace="test") + async def test_raw_no_encoding(self, valkey_config): + from aiocache.backends.valkey import ValkeyCache - assert await redis_cache.exists(Keys.KEY, namespace="test") is False + serializer = NullSerializer(encoding=None) + async with ValkeyCache(valkey_config, namespace="test", serializer=serializer) as cache: + await cache.set(Keys.KEY, "value") - async def test_close(self, redis_cache): - await redis_cache.set(Keys.KEY, "value") - await redis_cache._close() + assert await cache.raw("get", Keys.KEY) == b"value" + + await cache.delete(Keys.KEY) + + async def test_clear_with_namespace_valkey(self, valkey_cache): + await valkey_cache.set(Keys.KEY, "value", namespace="test") + await valkey_cache.clear(namespace="test") + + assert await valkey_cache.exists(Keys.KEY, namespace="test") is False + + async def test_close(self, valkey_cache): + await valkey_cache.set(Keys.KEY, "value") + await valkey_cache._close() diff --git a/tests/acceptance/test_lock.py b/tests/acceptance/test_lock.py index 3e5a5379..64202ae6 100644 --- a/tests/acceptance/test_lock.py +++ b/tests/acceptance/test_lock.py @@ -21,9 +21,9 @@ def build_key_bytes(key, namespace=None): @pytest.fixture -def custom_redis_cache(mocker, redis_cache, build_key=build_key): - mocker.patch.object(redis_cache, "build_key", new=build_key) - yield redis_cache +def custom_valkey_cache(mocker, valkey_cache, build_key=build_key): + mocker.patch.object(valkey_cache, "build_key", new=build_key) + yield valkey_cache @pytest.fixture @@ -127,35 +127,35 @@ async def test_float_lease(self, memory_cache): assert await lock.__aexit__("exc_type", "exc_value", "traceback") is None -@pytest.mark.redis -class TestRedisRedLock: +@pytest.mark.valkey +class TestValkeyRedLock: @pytest.fixture - def lock(self, redis_cache): - return RedLock(redis_cache, Keys.KEY, 20) + def lock(self, valkey_cache): + return RedLock(valkey_cache, Keys.KEY, 20) - async def test_acquire_key_builder(self, custom_redis_cache, lock): - custom_redis_cache.serializer = StringSerializer() + async def test_acquire_key_builder(self, custom_valkey_cache, lock): + custom_valkey_cache.serializer = StringSerializer() async with lock: - assert await custom_redis_cache.get(KEY_LOCK) == lock._value + assert await custom_valkey_cache.get(KEY_LOCK) == lock._value - async def test_acquire_release_key_builder(self, custom_redis_cache, lock): - custom_redis_cache.serializer = StringSerializer() + async def test_acquire_release_key_builder(self, custom_valkey_cache, lock): + custom_valkey_cache.serializer = StringSerializer() async with lock: - assert await custom_redis_cache.get(KEY_LOCK) is not None - assert await custom_redis_cache.get(KEY_LOCK) is None + assert await custom_valkey_cache.get(KEY_LOCK) is not None + assert await custom_valkey_cache.get(KEY_LOCK) is None async def test_release_wrong_token_fails(self, lock): await lock.__aenter__() lock._value = "random" assert await lock.__aexit__("exc_type", "exc_value", "traceback") is None - async def test_release_wrong_client_fails(self, redis_cache, lock): - wrong_lock = RedLock(redis_cache, Keys.KEY, 20) + async def test_release_wrong_client_fails(self, valkey_cache, lock): + wrong_lock = RedLock(valkey_cache, Keys.KEY, 20) await lock.__aenter__() assert await wrong_lock.__aexit__("exc_type", "exc_value", "traceback") is None - async def test_float_lease(self, redis_cache): - lock = RedLock(redis_cache, Keys.KEY, 0.1) + async def test_float_lease(self, valkey_cache): + lock = RedLock(valkey_cache, Keys.KEY, 0.1) await lock.__aenter__() await asyncio.sleep(0.2) assert await lock.__aexit__("exc_type", "exc_value", "traceback") is None @@ -257,23 +257,23 @@ async def test_check_and_set_with_float_ttl(self, memory_cache, lock): assert await memory_cache.get(Keys.KEY) is None -@pytest.mark.redis -class TestRedisOptimisticLock: +@pytest.mark.valkey +class TestValkeyOptimisticLock: @pytest.fixture - def lock(self, redis_cache): - return OptimisticLock(redis_cache, Keys.KEY) + def lock(self, valkey_cache): + return OptimisticLock(valkey_cache, Keys.KEY) - async def test_acquire_key_builder(self, custom_redis_cache, lock): - custom_redis_cache.serializer = StringSerializer() - await custom_redis_cache.set(Keys.KEY, "value") + async def test_acquire_key_builder(self, custom_valkey_cache, lock): + custom_valkey_cache.serializer = StringSerializer() + await custom_valkey_cache.set(Keys.KEY, "value") async with lock: - assert await custom_redis_cache.get(KEY_LOCK) == lock._token - await custom_redis_cache.delete(Keys.KEY, "value") + assert await custom_valkey_cache.get(KEY_LOCK) == lock._token + await custom_valkey_cache.delete(Keys.KEY, "value") - async def test_check_and_set_with_float_ttl(self, redis_cache, lock): - await redis_cache.set(Keys.KEY, "previous_value") + async def test_check_and_set_with_float_ttl(self, valkey_cache, lock): + await valkey_cache.set(Keys.KEY, "previous_value") async with lock as locked: await locked.cas("value", ttl=0.1) await asyncio.sleep(1) - assert await redis_cache.get(Keys.KEY) is None + assert await valkey_cache.get(Keys.KEY) is None diff --git a/tests/conftest.py b/tests/conftest.py index 4482701d..6b4ce5fa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,27 +1,9 @@ import pytest -@pytest.fixture() -def max_conns(): - return None - - -@pytest.fixture() -def decode_responses(): - return False - - @pytest.fixture -async def redis_client(max_conns, decode_responses): - import redis.asyncio as redis +def valkey_config(): + from glide import GlideClientConfiguration, NodeAddress - async with redis.Redis( - host="127.0.0.1", - port=6379, - db=0, - password=None, - decode_responses=decode_responses, - socket_connect_timeout=None, - max_connections=max_conns - ) as r: - yield r + addresses = [NodeAddress("localhost", 6379)] + return GlideClientConfiguration(addresses=addresses, database_id=0) diff --git a/tests/performance/conftest.py b/tests/performance/conftest.py index 03066cbb..d47dd1ff 100644 --- a/tests/performance/conftest.py +++ b/tests/performance/conftest.py @@ -2,16 +2,18 @@ @pytest.fixture -async def redis_cache(redis_client): - # redis connection pool raises ConnectionError but doesn't wait for conn reuse +async def valkey_cache(valkey_config): + # valkey connection pool raises ConnectionError but doesn't wait for conn reuse # when exceeding max pool size. - from aiocache.backends.redis import RedisCache - async with RedisCache(namespace="test", client=redis_client) as cache: + from aiocache.backends.valkey import ValkeyCache + + async with ValkeyCache(valkey_config, namespace="test") as cache: yield cache @pytest.fixture async def memcached_cache(): from aiocache.backends.memcached import MemcachedCache + async with MemcachedCache(namespace="test", pool_size=1) as cache: yield cache diff --git a/tests/performance/server.py b/tests/performance/server.py index c5d53670..41d8bc94 100644 --- a/tests/performance/server.py +++ b/tests/performance/server.py @@ -1,26 +1,29 @@ import asyncio import logging +import sys import uuid +from functools import partial +from types import TracebackType +from typing import AsyncIterator, Optional -import redis.asyncio as redis from aiohttp import web +if sys.version_info >= (3, 11): + from typing import Self +else: + from typing import Any as Self + logging.getLogger("aiohttp.access").propagate = False class CacheManager: def __init__(self, backend: str): - if backend == "redis": - from aiocache.backends.redis import RedisCache - cache = RedisCache( - client=redis.Redis( - host="127.0.0.1", - port=6379, - db=0, - password=None, - decode_responses=False, - ) - ) + if backend == "valkey": + from aiocache.backends.valkey import ValkeyCache + from glide import GlideClientConfiguration, NodeAddress + + config = GlideClientConfiguration(addresses=[NodeAddress()], database_id=0) + cache = ValkeyCache(config) elif backend == "memcached": from aiocache.backends.memcached import MemcachedCache cache = MemcachedCache() @@ -37,8 +40,17 @@ async def get(self, key): async def set(self, key, value): return await self.cache.set(key, value, timeout=0.1) - async def close(self, *_): - await self.cache.close() + async def __aenter__(self) -> Self: + await self.cache.__aenter__() + return self + + async def __aexit__( + self, + exc_type: Optional[type[BaseException]], + exc: Optional[BaseException], + tb: Optional[TracebackType], + ) -> None: + await self.cache.__aexit__(exc_type, exc, tb) cache_key = web.AppKey("cache_key", CacheManager) @@ -49,7 +61,8 @@ async def handler_get(req: web.Request) -> web.Response: data = await req.app[cache_key].get("testkey") if data: return web.Response(text=data) - except asyncio.TimeoutError: + except asyncio.TimeoutError: # pragma: no cover + # This won't be reached if the concurrency tests achieve 100% success rates. return web.Response(status=404) data = str(uuid.uuid4()) @@ -57,9 +70,14 @@ async def handler_get(req: web.Request) -> web.Response: return web.Response(text=str(data)) +async def ctx(app: web.Application, backend: str) -> AsyncIterator[None]: + async with CacheManager(backend) as cm: + app[cache_key] = cm + yield + + def run_server(backend: str) -> None: app = web.Application() - app[cache_key] = CacheManager(backend) - app.on_shutdown.append(app[cache_key].close) + app.cleanup_ctx.append(partial(ctx, backend=backend)) app.router.add_route("GET", "/", handler_get) web.run_app(app) diff --git a/tests/performance/test_concurrency.py b/tests/performance/test_concurrency.py index 4d1803b2..71ebc8f8 100644 --- a/tests/performance/test_concurrency.py +++ b/tests/performance/test_concurrency.py @@ -2,27 +2,27 @@ import re import subprocess import time -from multiprocessing import Process +from multiprocessing import Process, set_start_method import pytest from .server import run_server +# Spawn is needed to avoid potential segfaults in forked processes. +set_start_method("spawn") + # TODO: Fix and readd "memcached" (currently fails >98% of requests) -@pytest.fixture(params=("memory", "redis")) +@pytest.fixture(params=("memory", "valkey")) def server(request): p = Process(target=run_server, args=(request.param,)) p.start() - time.sleep(1) + time.sleep(2) yield p.terminate() p.join(timeout=15) -@pytest.mark.xfail(reason="currently fails >85% of requests on GitHub runner, " - "requires several re-runs to pass", - strict=False) @pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="Not working currently.") def test_concurrency_error_rates(server): """Test with Apache benchmark tool.""" diff --git a/tests/performance/test_footprint.py b/tests/performance/test_footprint.py index 9595759f..fe1af2a8 100644 --- a/tests/performance/test_footprint.py +++ b/tests/performance/test_footprint.py @@ -1,77 +1,81 @@ import platform import time -from typing import AsyncIterator, cast +from typing import AsyncIterator import aiomcache import pytest -import redis.asyncio as redis +from glide import GlideClient, GlideClientConfiguration, NodeAddress @pytest.fixture -async def redis_client() -> AsyncIterator["redis.Redis[str]"]: - async with cast("redis.Redis[str]", - redis.Redis(host="127.0.0.1", port=6379, max_connections=1)) as r: - yield r +async def valkey_client() -> AsyncIterator[GlideClient]: + addresses = [NodeAddress("localhost", 6379)] + conf = GlideClientConfiguration(addresses=addresses) + client = await GlideClient.create(conf) + + yield client + + await client.close() @pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="Too slow") -class TestRedis: - async def test_redis_getsetdel(self, redis_client, redis_cache): +class TestValkey: + async def test_valkey_getsetdel(self, valkey_client, valkey_cache): N = 10000 - redis_total_time = 0 + valkey_total_time = 0 for _n in range(N): start = time.time() - await redis_client.set("hi", "value") - await redis_client.get("hi") - await redis_client.delete("hi") - redis_total_time += time.time() - start + await valkey_client.set("hi", "value") + await valkey_client.get("hi") + await valkey_client.delete(["hi"]) + valkey_total_time += time.time() - start aiocache_total_time = 0 for _n in range(N): start = time.time() - await redis_cache.set("hi", "value", timeout=0) - await redis_cache.get("hi", timeout=0) - await redis_cache.delete("hi", timeout=0) + await valkey_cache.set("hi", "value", timeout=0) + await valkey_cache.get("hi", timeout=0) + await valkey_cache.delete("hi", timeout=0) aiocache_total_time += time.time() - start print( "\n{:0.2f}/{:0.2f}: {:0.2f}".format( - aiocache_total_time, redis_total_time, aiocache_total_time / redis_total_time + aiocache_total_time, valkey_total_time, aiocache_total_time / valkey_total_time ) ) print("aiocache avg call: {:0.5f}s".format(aiocache_total_time / N)) - print("redis avg call: {:0.5f}s".format(redis_total_time / N)) - assert aiocache_total_time / redis_total_time < 1.35 + print("valkey avg call: {:0.5f}s".format(valkey_total_time / N)) + assert aiocache_total_time / valkey_total_time < 1.35 - async def test_redis_multigetsetdel(self, redis_client, redis_cache): + async def test_valkey_multigetsetdel(self, valkey_client, valkey_cache): N = 5000 - redis_total_time = 0 + valkey_total_time = 0 values = ["a", "b", "c", "d", "e", "f"] for _n in range(N): start = time.time() - await redis_client.mset({x: x for x in values}) - await redis_client.mget(values) + await valkey_client.mset({x: x for x in values}) + await valkey_client.mget(values) for k in values: - await redis_client.delete(k) - redis_total_time += time.time() - start + await valkey_client.delete([k]) + valkey_total_time += time.time() - start aiocache_total_time = 0 for _n in range(N): start = time.time() - await redis_cache.multi_set([(x, x) for x in values], timeout=0) - await redis_cache.multi_get(values, timeout=0) + await valkey_cache.multi_set([(x, x) for x in values], timeout=0) + await valkey_cache.multi_get(values, timeout=0) for k in values: - await redis_cache.delete(k, timeout=0) + await valkey_cache.delete(k, timeout=0) aiocache_total_time += time.time() - start print( "\n{:0.2f}/{:0.2f}: {:0.2f}".format( - aiocache_total_time, redis_total_time, aiocache_total_time / redis_total_time + aiocache_total_time, valkey_total_time, aiocache_total_time / valkey_total_time ) ) print("aiocache avg call: {:0.5f}s".format(aiocache_total_time / N)) - print("redis_client avg call: {:0.5f}s".format(redis_total_time / N)) - assert aiocache_total_time / redis_total_time < 1.35 + print("valkey_client avg call: {:0.5f}s".format(valkey_total_time / N)) + assert aiocache_total_time / valkey_total_time < 1.35 @pytest.fixture diff --git a/tests/ut/backends/test_redis.py b/tests/ut/backends/test_redis.py deleted file mode 100644 index 10e5d2de..00000000 --- a/tests/ut/backends/test_redis.py +++ /dev/null @@ -1,209 +0,0 @@ -from unittest.mock import ANY, AsyncMock, create_autospec, patch - -import pytest -from redis.asyncio.client import Pipeline -from redis.exceptions import ResponseError - -from aiocache.backends.redis import RedisBackend, RedisCache -from aiocache.base import BaseCache -from aiocache.serializers import JsonSerializer -from ...utils import Keys, ensure_key - - -@pytest.fixture -def redis(redis_client): - redis = RedisBackend(client=redis_client) - with patch.object(redis, "client", autospec=True) as m: - # These methods actually return an awaitable. - for method in ( - "eval", "expire", "get", "psetex", "setex", "execute_command", "exists", - "incrby", "persist", "delete", "keys", "flushdb", - ): - setattr(m, method, AsyncMock(return_value=None, spec_set=())) - m.mget = AsyncMock(return_value=[None], spec_set=()) - m.set = AsyncMock(return_value=True, spec_set=()) - - m.pipeline.return_value = create_autospec(Pipeline, instance=True) - m.pipeline.return_value.__aenter__.return_value = m.pipeline.return_value - yield redis - - -class TestRedisBackend: - - @pytest.mark.parametrize("decode_responses", [True]) - async def test_redis_backend_requires_client_decode_responses(self, redis_client): - with pytest.raises(ValueError) as ve: - RedisBackend(client=redis_client) - - assert str(ve.value) == ( - "redis client must be constructed with decode_responses set to False" - ) - - async def test_get(self, redis): - redis.client.get.return_value = b"value" - assert await redis._get(Keys.KEY) == "value" - redis.client.get.assert_called_with(Keys.KEY) - - async def test_gets(self, mocker, redis): - mocker.spy(redis, "_get") - await redis._gets(Keys.KEY) - redis._get.assert_called_with(Keys.KEY, encoding="utf-8", _conn=ANY) - - async def test_set(self, redis): - await redis._set(Keys.KEY, "value") - redis.client.set.assert_called_with(Keys.KEY, "value") - - await redis._set(Keys.KEY, "value", ttl=1) - redis.client.setex.assert_called_with(Keys.KEY, 1, "value") - - async def test_set_cas_token(self, mocker, redis): - mocker.spy(redis, "_cas") - await redis._set(Keys.KEY, "value", _cas_token="old_value", _conn=redis.client) - redis._cas.assert_called_with( - Keys.KEY, "value", "old_value", ttl=None, _conn=redis.client - ) - - async def test_cas(self, mocker, redis): - mocker.spy(redis, "_raw") - await redis._cas(Keys.KEY, "value", "old_value", ttl=10, _conn=redis.client) - redis._raw.assert_called_with( - "eval", - redis.CAS_SCRIPT, - 1, - *[Keys.KEY, "value", "old_value", "EX", 10], - _conn=redis.client, - ) - - async def test_cas_float_ttl(self, mocker, redis): - mocker.spy(redis, "_raw") - await redis._cas(Keys.KEY, "value", "old_value", ttl=0.1, _conn=redis.client) - redis._raw.assert_called_with( - "eval", - redis.CAS_SCRIPT, - 1, - *[Keys.KEY, "value", "old_value", "PX", 100], - _conn=redis.client, - ) - - async def test_multi_get(self, redis): - await redis._multi_get([Keys.KEY, Keys.KEY_1]) - redis.client.mget.assert_called_with(Keys.KEY, Keys.KEY_1) - - async def test_multi_set(self, redis): - await redis._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")]) - redis.client.execute_command.assert_called_with( - "MSET", Keys.KEY, "value", Keys.KEY_1, "random" - ) - - async def test_multi_set_with_ttl(self, redis): - await redis._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")], ttl=1) - assert redis.client.pipeline.call_count == 1 - pipeline = redis.client.pipeline.return_value - pipeline.execute_command.assert_called_with( - "MSET", Keys.KEY, "value", Keys.KEY_1, "random" - ) - pipeline.expire.assert_any_call(Keys.KEY, time=1) - pipeline.expire.assert_any_call(Keys.KEY_1, time=1) - assert pipeline.execute.call_count == 1 - - async def test_add(self, redis): - await redis._add(Keys.KEY, "value") - redis.client.set.assert_called_with(Keys.KEY, "value", nx=True, ex=None) - - await redis._add(Keys.KEY, "value", 1) - redis.client.set.assert_called_with(Keys.KEY, "value", nx=True, ex=1) - - async def test_add_existing(self, redis): - redis.client.set.return_value = False - with pytest.raises(ValueError): - await redis._add(Keys.KEY, "value") - - async def test_add_float_ttl(self, redis): - await redis._add(Keys.KEY, "value", 0.1) - redis.client.set.assert_called_with(Keys.KEY, "value", nx=True, px=100) - - async def test_exists(self, redis): - redis.client.exists.return_value = 1 - await redis._exists(Keys.KEY) - redis.client.exists.assert_called_with(Keys.KEY) - - async def test_increment(self, redis): - await redis._increment(Keys.KEY, delta=2) - redis.client.incrby.assert_called_with(Keys.KEY, 2) - - async def test_increment_typerror(self, redis): - redis.client.incrby.side_effect = ResponseError("msg") - with pytest.raises(TypeError): - await redis._increment(Keys.KEY, delta=2) - redis.client.incrby.assert_called_with(Keys.KEY, 2) - - async def test_expire(self, redis): - await redis._expire(Keys.KEY, 1) - redis.client.expire.assert_called_with(Keys.KEY, 1) - await redis._increment(Keys.KEY, 2) - - async def test_expire_0_ttl(self, redis): - await redis._expire(Keys.KEY, ttl=0) - redis.client.persist.assert_called_with(Keys.KEY) - - async def test_delete(self, redis): - await redis._delete(Keys.KEY) - redis.client.delete.assert_called_with(Keys.KEY) - - async def test_clear(self, redis): - redis.client.keys.return_value = ["nm:a", "nm:b"] - await redis._clear("nm") - redis.client.delete.assert_called_with("nm:a", "nm:b") - - async def test_clear_no_keys(self, redis): - redis.client.keys.return_value = [] - await redis._clear("nm") - redis.client.delete.assert_not_called() - - async def test_clear_no_namespace(self, redis): - await redis._clear() - assert redis.client.flushdb.call_count == 1 - - async def test_raw(self, redis): - await redis._raw("get", Keys.KEY) - await redis._raw("set", Keys.KEY, 1) - redis.client.get.assert_called_with(Keys.KEY) - redis.client.set.assert_called_with(Keys.KEY, 1) - - async def test_redlock_release(self, mocker, redis): - mocker.spy(redis, "_raw") - await redis._redlock_release(Keys.KEY, "random") - redis._raw.assert_called_with("eval", redis.RELEASE_SCRIPT, 1, Keys.KEY, "random") - - -class TestRedisCache: - @pytest.fixture - def set_test_namespace(self, redis_cache): - redis_cache.namespace = "test" - yield - redis_cache.namespace = None - - def test_name(self): - assert RedisCache.NAME == "redis" - - def test_inheritance(self, redis_client): - assert isinstance(RedisCache(client=redis_client), BaseCache) - - def test_default_serializer(self, redis_client): - assert isinstance(RedisCache(client=redis_client).serializer, JsonSerializer) - - @pytest.mark.parametrize( - "path,expected", [("", {}), ("/", {}), ("/1", {"db": "1"}), ("/1/2/3", {"db": "1"})] - ) - def test_parse_uri_path(self, path, expected, redis_client): - assert RedisCache(client=redis_client).parse_uri_path(path) == expected - - @pytest.mark.parametrize( - "namespace, expected", - ([None, "test:" + ensure_key(Keys.KEY)], ["", ensure_key(Keys.KEY)], ["my_ns", "my_ns:" + ensure_key(Keys.KEY)]), # noqa: B950 - ) - def test_build_key_double_dot(self, set_test_namespace, redis_cache, namespace, expected): - assert redis_cache.build_key(Keys.KEY, namespace) == expected - - def test_build_key_no_namespace(self, redis_cache): - assert redis_cache.build_key(Keys.KEY, namespace=None) == Keys.KEY diff --git a/tests/ut/backends/test_valkey.py b/tests/ut/backends/test_valkey.py new file mode 100644 index 00000000..a6bb1115 --- /dev/null +++ b/tests/ut/backends/test_valkey.py @@ -0,0 +1,244 @@ +from unittest.mock import AsyncMock, patch + +import pytest +from glide import ConditionalChange, ExpirySet, ExpiryType, Transaction +from glide.exceptions import RequestError + +from aiocache.backends.valkey import ValkeyBackend, ValkeyCache +from aiocache.base import BaseCache +from aiocache.serializers import JsonSerializer +from ...utils import Keys, ensure_key + + +@pytest.fixture +async def valkey(valkey_config): + async with ValkeyBackend(config=valkey_config) as valkey: + with patch.object(valkey, "client", autospec=True) as m: + # These methods actually return an awaitable. + for method in ( + "eval", + "expire", + "get", + "execute_command", + "exists", + "incrby", + "persist", + "delete", + "scan", + "flushdb", + ): + setattr(m, method, AsyncMock(return_value=None, spec_set=())) + m.mget = AsyncMock(return_value=[None], spec_set=()) + m.set = AsyncMock(return_value="OK", spec_set=()) + yield valkey + + +class TestValkeyBackend: + async def test_get(self, valkey): + valkey.client.get.return_value = b"value" + assert await valkey._get(Keys.KEY) == "value" + valkey.client.get.assert_called_with(Keys.KEY) + + async def test_gets(self, valkey): + await valkey._gets(Keys.KEY) + valkey.client.get.assert_called_with(Keys.KEY) + + async def test_set(self, valkey): + await valkey._set(Keys.KEY, "value") + valkey.client.set.assert_called_with(Keys.KEY, "value", expiry=None) + + await valkey._set(Keys.KEY, "value", ttl=1) + valkey.client.set.assert_called_with( + Keys.KEY, "value", expiry=ExpirySet(ExpiryType.SEC, 1) + ) + + async def test_set_cas_token(self, mocker, valkey): + mocker.patch.object(valkey, "_cas") + await valkey._set( + Keys.KEY, "value", _cas_token="old_value", _conn=valkey.client + ) + valkey._cas.assert_called_with( + Keys.KEY, "value", "old_value", ttl=None, _conn=valkey.client + ) + + async def test_set_cas_token_ttl(self, mocker, valkey): + mocker.patch.object(valkey, "_cas") + await valkey._set( + Keys.KEY, "value", ttl=1, _cas_token="old_value", _conn=valkey.client + ) + valkey._cas.assert_called_with( + Keys.KEY, + "value", + "old_value", + ttl=ExpirySet(ExpiryType.SEC, 1), + _conn=valkey.client, + ) + + async def test_set_cas_token_float_ttl(self, mocker, valkey): + mocker.patch.object(valkey, "_cas") + await valkey._set( + Keys.KEY, "value", ttl=1.1, _cas_token="old_value", _conn=valkey.client + ) + valkey._cas.assert_called_with( + Keys.KEY, + "value", + "old_value", + ttl=ExpirySet(ExpiryType.MILLSEC, 1100), + _conn=valkey.client, + ) + + async def test_cas(self, mocker, valkey): + mocker.spy(valkey, "_get") + mocker.spy(valkey, "_cas") + await valkey._cas(Keys.KEY, "value", "old_value", ttl=10, _conn=valkey.client) + valkey._get.assert_called_with(Keys.KEY) + assert valkey._cas.spy_return == 0 + + async def test_cas_float_ttl(self, mocker, valkey): + spy = mocker.spy(valkey, "_get") + await valkey._cas(Keys.KEY, "value", "old_value", ttl=0.1, _conn=valkey.client) + spy.assert_called_with(Keys.KEY) + mocker.stop(spy) + mock = mocker.patch.object(valkey, "_get", return_value="old_value") + await valkey._cas(Keys.KEY, "value", "old_value", ttl=0.1, _conn=valkey.client) + mock.assert_called_once() + valkey.client.set.assert_called_with(Keys.KEY, "value", expiry=0.1) + + async def test_multi_get(self, valkey): + await valkey._multi_get([Keys.KEY, Keys.KEY_1]) + valkey.client.mget.assert_called_with([Keys.KEY, Keys.KEY_1]) + + async def test_multi_set(self, valkey): + await valkey._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")]) + valkey.client.mset.assert_called_with({Keys.KEY: "value", Keys.KEY_1: "random"}) + + async def test_multi_set_with_ttl(self, valkey, mocker): + mock_mset = mocker.patch.object(Transaction, "mset") + mock_expire = mocker.patch.object(Transaction, "expire") + await valkey._multi_set([(Keys.KEY, "value"), (Keys.KEY_1, "random")], ttl=1) + + valkey.client.exec.assert_called() + + assert mock_mset.call_count == 1 + assert mock_expire.call_count == 2 + mock_expire.assert_any_call(Keys.KEY, 1) + mock_expire.assert_any_call(Keys.KEY_1, 1) + + async def test_add(self, valkey): + await valkey._add(Keys.KEY, "value") + valkey.client.set.assert_called_with( + Keys.KEY, "value", conditional_set=ConditionalChange.ONLY_IF_DOES_NOT_EXIST + ) + + await valkey._add(Keys.KEY, "value", 1) + valkey.client.set.assert_called_with( + Keys.KEY, + "value", + conditional_set=ConditionalChange.ONLY_IF_DOES_NOT_EXIST, + expiry=ExpirySet(ExpiryType.SEC, 1), + ) + + async def test_add_existing(self, valkey): + valkey.client.set.return_value = False + with pytest.raises(ValueError): + await valkey._add(Keys.KEY, "value") + + async def test_add_float_ttl(self, valkey): + await valkey._add(Keys.KEY, "value", 0.1) + assert valkey.client.set.call_args.args[0] == Keys.KEY + assert ( + valkey.client.set.call_args.kwargs["conditional_set"] + == ConditionalChange.ONLY_IF_DOES_NOT_EXIST + ) + assert ( + valkey.client.set.call_args.kwargs["expiry"].get_cmd_args() + == ExpirySet(ExpiryType.MILLSEC, 100).get_cmd_args() + ) + + async def test_exists(self, valkey): + valkey.client.exists.return_value = 1 + await valkey._exists(Keys.KEY) + valkey.client.exists.assert_called_with([Keys.KEY]) + + async def test_increment(self, valkey): + await valkey._increment(Keys.KEY, delta=2) + valkey.client.incrby.assert_called_with(Keys.KEY, 2) + + async def test_increment_typerror(self, valkey): + valkey.client.incrby.side_effect = RequestError("msg") + with pytest.raises(TypeError): + await valkey._increment(Keys.KEY, delta=2) + valkey.client.incrby.assert_called_with(Keys.KEY, 2) + + async def test_expire(self, valkey): + await valkey._expire(Keys.KEY, 1) + valkey.client.expire.assert_called_with(Keys.KEY, 1) + await valkey._increment(Keys.KEY, 2) + + async def test_expire_0_ttl(self, valkey): + await valkey._expire(Keys.KEY, ttl=0) + valkey.client.persist.assert_called_with(Keys.KEY) + + async def test_delete(self, valkey): + await valkey._delete(Keys.KEY) + valkey.client.delete.assert_called_with([Keys.KEY]) + + async def test_clear(self, valkey): + valkey.client.scan.return_value = [b"0", ["nm:a", "nm:b"]] + await valkey._clear("nm") + valkey.client.delete.assert_called_with(["nm:a", "nm:b"]) + + async def test_clear_no_keys(self, valkey): + valkey.client.scan.return_value = [b"0", []] + await valkey._clear("nm") + valkey.client.delete.assert_not_called() + + async def test_clear_no_namespace(self, valkey): + await valkey._clear() + assert valkey.client.flushdb.call_count == 1 + + async def test_redlock_release(self, mocker, valkey): + mocker.patch.object(valkey, "_get", return_value="random") + await valkey._redlock_release(Keys.KEY, "random") + valkey._get.assert_called_once_with(Keys.KEY) + valkey.client.delete.assert_called_once_with([Keys.KEY]) + + +class TestValkeyCache: + @pytest.fixture + def set_test_namespace(self, valkey_cache): + valkey_cache.namespace = "test" + yield + valkey_cache.namespace = None + + def test_name(self): + assert ValkeyCache.NAME == "valkey" + + def test_inheritance(self, valkey_config): + assert isinstance(ValkeyCache(config=valkey_config), BaseCache) + + def test_default_serializer(self, valkey_config): + assert isinstance(ValkeyCache(config=valkey_config).serializer, JsonSerializer) + + @pytest.mark.parametrize( + "path,expected", + (("", {}), ("/", {}), ("/1", {"db": "1"}), ("/1/2/3", {"db": "1"})), + ) + def test_parse_uri_path(self, path, expected, valkey_config): + assert ValkeyCache(config=valkey_config).parse_uri_path(path) == expected + + @pytest.mark.parametrize( + "namespace, expected", + ( + (None, "test:" + ensure_key(Keys.KEY)), + ("", ensure_key(Keys.KEY)), + ("my_ns", "my_ns:" + ensure_key(Keys.KEY)), + ), + ) + def test_build_key_double_dot( + self, set_test_namespace, valkey_cache, namespace, expected + ): + assert valkey_cache.build_key(Keys.KEY, namespace) == expected + + def test_build_key_no_namespace(self, valkey_cache): + assert valkey_cache.build_key(Keys.KEY, namespace=None) == Keys.KEY diff --git a/tests/ut/conftest.py b/tests/ut/conftest.py index 38dae44a..5196f800 100644 --- a/tests/ut/conftest.py +++ b/tests/ut/conftest.py @@ -40,10 +40,10 @@ def base_cache(): @pytest.fixture -async def redis_cache(redis_client): - from aiocache.backends.redis import RedisCache +async def valkey_cache(valkey_config): + from aiocache.backends.valkey import ValkeyCache - async with RedisCache(client=redis_client) as cache: + async with ValkeyCache(valkey_config) as cache: yield cache