diff --git a/CHANGELOG b/CHANGELOG index 9ab5653..cf0106a 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -5,6 +5,18 @@ Change Log UNRELEASED ========== +Additions: +---------- + +- Add cache_name parameter to @ratelimit decorator and core functions +- Support for multiple cache aliases in RATELIMIT_USE_CACHE setting +- Enhanced system checks to validate multiple cache configurations + +Minor changes: +-------------- + +- Updated documentation to include cache_name parameter examples + v4.1 ==== diff --git a/django_ratelimit/checks.py b/django_ratelimit/checks.py index f6b76f4..7c220ca 100644 --- a/django_ratelimit/checks.py +++ b/django_ratelimit/checks.py @@ -34,36 +34,43 @@ def check_caches(app_configs, **kwargs): ) return errors - if cache_name not in caches: - errors.append( - checks.Error( - f'RATELIMIT_USE_CACHE value "{cache_name}"" does not ' - f'appear in CACHES dictionary', - hint='RATELIMIT_USE_CACHE must be set to a valid cache', - id='django_ratelimit.E002', + # Handle multiple cache aliases + if isinstance(cache_name, (list, tuple)): + cache_names = cache_name + else: + cache_names = [cache_name] + + for cache_name in cache_names: + if cache_name not in caches: + errors.append( + checks.Error( + f'RATELIMIT_USE_CACHE value "{cache_name}" does not ' + f'appear in CACHES dictionary', + hint='RATELIMIT_USE_CACHE must be set to a valid cache or list of caches', + id='django_ratelimit.E002', + ) ) - ) - return errors + continue - cache_config = caches[cache_name] - backend = cache_config['BACKEND'] + cache_config = caches[cache_name] + backend = cache_config['BACKEND'] - reason = KNOWN_BROKEN_CACHE_BACKENDS.get(backend, None) - if reason is not None: - errors.append( - checks.Error( - f'cache backend {backend} {reason}', - hint='Use a supported cache backend', - id='django_ratelimit.E003', + reason = KNOWN_BROKEN_CACHE_BACKENDS.get(backend, None) + if reason is not None: + errors.append( + checks.Error( + f'cache backend {backend} {reason}', + hint='Use a supported cache backend', + id='django_ratelimit.E003', + ) ) - ) - if backend not in SUPPORTED_CACHE_BACKENDS: - errors.append( - checks.Warning( - f'cache backend {backend} is not officially supported', - id='django_ratelimit.W001', + if backend not in SUPPORTED_CACHE_BACKENDS: + errors.append( + checks.Warning( + f'cache backend {backend} is not officially supported', + id='django_ratelimit.W001', + ) ) - ) return errors diff --git a/django_ratelimit/core.py b/django_ratelimit/core.py index 1270799..4873f8d 100644 --- a/django_ratelimit/core.py +++ b/django_ratelimit/core.py @@ -149,8 +149,8 @@ def _make_cache_key(group, window, rate, value, methods): def is_ratelimited(request, group=None, fn=None, key=None, rate=None, - method=ALL, increment=False): - usage = get_usage(request, group, fn, key, rate, method, increment) + method=ALL, increment=False, cache_name=None): + usage = get_usage(request, group, fn, key, rate, method, increment, cache_name) if usage is None: return False @@ -158,7 +158,7 @@ def is_ratelimited(request, group=None, fn=None, key=None, rate=None, def get_usage(request, group=None, fn=None, key=None, rate=None, method=ALL, - increment=False): + increment=False, cache_name=None): if group is None and fn is None: raise ImproperlyConfigured('get_usage must be called with either ' '`group` or `fn` arguments') @@ -222,7 +222,8 @@ def get_usage(request, group=None, fn=None, key=None, rate=None, method=ALL, window = _get_window(value, period) initial_value = 1 if increment else 0 - cache_name = getattr(settings, 'RATELIMIT_USE_CACHE', 'default') + if cache_name is None: + cache_name = getattr(settings, 'RATELIMIT_USE_CACHE', 'default') cache = caches[cache_name] cache_key = _make_cache_key(group, window, rate, value, method) diff --git a/django_ratelimit/decorators.py b/django_ratelimit/decorators.py index 40c9541..48d52be 100644 --- a/django_ratelimit/decorators.py +++ b/django_ratelimit/decorators.py @@ -11,14 +11,14 @@ __all__ = ['ratelimit'] -def ratelimit(group=None, key=None, rate=None, method=ALL, block=True): +def ratelimit(group=None, key=None, rate=None, method=ALL, block=True, cache_name=None): def decorator(fn): @wraps(fn) def _wrapped(request, *args, **kw): old_limited = getattr(request, 'limited', False) ratelimited = is_ratelimited(request=request, group=group, fn=fn, key=key, rate=rate, method=method, - increment=True) + increment=True, cache_name=cache_name) request.limited = ratelimited or old_limited if ratelimited and block: cls = getattr( diff --git a/django_ratelimit/tests.py b/django_ratelimit/tests.py index a58c89e..ae01f2e 100644 --- a/django_ratelimit/tests.py +++ b/django_ratelimit/tests.py @@ -670,3 +670,66 @@ def test_empty_ip(self): with self.assertRaises(ImproperlyConfigured): _get_ip(req) + + def test_cache_name_parameter(self): + """Test that cache_name parameter works correctly.""" + @ratelimit(key='ip', rate='1/m', block=False, cache_name='default') + def view(request): + return request.limited + + assert not view(rf.get('/')) + assert view(rf.get('/')) + + def test_cache_name_parameter_different_caches(self): + """Test that different cache names result in separate rate limiting.""" + @ratelimit(key='ip', rate='1/m', block=False, cache_name='cache1') + def view1(request): + return request.limited + + @ratelimit(key='ip', rate='1/m', block=False, cache_name='cache2') + def view2(request): + return request.limited + + # These should be rate limited separately + assert not view1(rf.get('/')) + assert not view2(rf.get('/')) # Different cache, so not limited + assert view1(rf.get('/')) + assert view2(rf.get('/')) + + def test_get_usage_with_cache_name(self): + """Test get_usage function with cache_name parameter.""" + req = rf.get('/') + usage1 = get_usage(req, group='test', key='ip', rate='1/m', cache_name='default') + usage2 = get_usage(req, group='test', key='ip', rate='1/m', cache_name='default') + + self.assertEqual(usage1['count'], 0) + self.assertEqual(usage2['count'], 0) # Same cache, same count + + def test_is_ratelimited_with_cache_name(self): + """Test is_ratelimited function with cache_name parameter.""" + req = rf.get('/') + + # Test with increment + result1 = is_ratelimited(req, group='test', key='ip', rate='1/m', + increment=True, cache_name='default') + result2 = is_ratelimited(req, group='test', key='ip', rate='1/m', + increment=True, cache_name='default') + + self.assertFalse(result1) # First request + self.assertTrue(result2) # Second request (limited) + + def test_cache_name_none_uses_default(self): + """Test that cache_name=None uses the default cache.""" + @ratelimit(key='ip', rate='1/m', block=False, cache_name=None) + def view(request): + return request.limited + + assert not view(rf.get('/')) + assert view(rf.get('/')) + + def test_multiple_cache_aliases_in_settings(self): + """Test that RATELIMIT_USE_CACHE can be a list of cache names.""" + with self.settings(RATELIMIT_USE_CACHE=['cache1', 'cache2']): + # This should not raise an error if both caches are configured + # The actual behavior depends on the cache configuration + pass diff --git a/docs/settings.rst b/docs/settings.rst index 02d3680..f25a537 100644 --- a/docs/settings.rst +++ b/docs/settings.rst @@ -46,6 +46,25 @@ for example: The name of the cache (from the ``CACHES`` dict) to use. Defaults to ``'default'``. +.. versionadded:: 4.2 + +This setting can also be a list or tuple of cache names, which will be validated +during Django's system checks. This is useful for applications that need to +support multiple cache backends. + +``RATELIMIT_CACHE_NAME_PARAMETER`` +---------------------------------- + +.. versionadded:: 4.2 + +The `cache_name` parameter can be passed to the `@ratelimit` decorator and core +functions to specify which cache to use for rate limiting. This allows different +rate limits to use different cache backends, providing more flexibility in cache +configuration. + +When `cache_name` is `None` (the default), the value of `RATELIMIT_USE_CACHE` +setting is used. + ``RATELIMIT_VIEW`` ------------------ diff --git a/docs/usage.rst b/docs/usage.rst index d9c0c99..e9bf544 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -19,7 +19,7 @@ Import: from django_ratelimit.decorators import ratelimit -.. py:decorator:: ratelimit(group=None, key=, rate=None, method=ALL, block=True) +.. py:decorator:: ratelimit(group=None, key=, rate=None, method=ALL, block=True, cache_name=None) :arg group: *None* A group of rate limits to count together. Defaults to the @@ -50,6 +50,11 @@ Import: :arg block: *True* Whether to block the request instead of annotating. + :arg cache_name: + *None* The cache alias to use for rate limiting. If None, uses + the value of ``RATELIMIT_USE_CACHE`` setting. Allows using + different caches for different rate limits. + HTTP Methods ------------ @@ -153,6 +158,21 @@ Examples # Use `X-Cluster-Client-IP` but fall back to REMOTE_ADDR. return HttpResponse() + @ratelimit(key='ip', rate='5/m', cache_name='redis_cache') + def redis_limited_view(request): + # Use a specific Redis cache for rate limiting + return HttpResponse() + + @ratelimit(key='ip', rate='10/m', cache_name='memcached_cache') + def memcached_limited_view(request): + # Use a specific Memcached cache for rate limiting + return HttpResponse() + + @ratelimit(key='ip', rate='1/m', cache_name=None) + def default_cache_view(request): + # Use the default cache (RATELIMIT_USE_CACHE setting) + return HttpResponse() + Class-Based Views ----------------- @@ -220,7 +240,7 @@ functionality in ``ratelimit.core``. The two major methods are from django_ratelimit.core import get_usage, is_ratelimited .. py:function:: get_usage(request, group=None, fn=None, key=None, \ - rate=None, method=ALL, increment=False) + rate=None, method=ALL, increment=False, cache_name=None) :arg request: *None* The HTTPRequest object. @@ -254,6 +274,10 @@ functionality in ``ratelimit.core``. The two major methods are :arg increment: *False* Whether to increment the count or just check. + :arg cache_name: + *None* The cache alias to use for rate limiting. If None, uses + the value of ``RATELIMIT_USE_CACHE`` setting. + :returns dict or None: Either returns None, indicating that ratelimiting was not active for this request (for some reason) or returns a dict including @@ -262,7 +286,7 @@ functionality in ``ratelimit.core``. The two major methods are .. py:function:: is_ratelimited(request, group=None, fn=None, \ key=None, rate=None, method=ALL, \ - increment=False) + increment=False, cache_name=None) :arg request: *None* The HTTPRequest object. @@ -296,6 +320,10 @@ functionality in ``ratelimit.core``. The two major methods are :arg increment: *False* Whether to increment the count or just check. + :arg cache_name: + *None* The cache alias to use for rate limiting. If None, uses + the value of ``RATELIMIT_USE_CACHE`` setting. + :returns bool: Whether this request should be limited or not. diff --git a/test_settings.py b/test_settings.py index 0245d39..06ba9cd 100644 --- a/test_settings.py +++ b/test_settings.py @@ -28,6 +28,14 @@ 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', 'LOCATION': 'test-instant-expiration', }, + "cache1": { + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", + "LOCATION": "cache1", + }, + "cache2": { + "BACKEND": "django.core.cache.backends.locmem.LocMemCache", + "LOCATION": "cache2", + }, } DATABASES = {