pax_global_header 0000666 0000000 0000000 00000000064 15001074010 0014476 g ustar 00root root 0000000 0000000 52 comment=d4b390372d839ff1389662cb311a916eb2c992ed
django-cacheops-7.2/ 0000775 0000000 0000000 00000000000 15001074010 0014373 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/.editorconfig 0000664 0000000 0000000 00000000301 15001074010 0017042 0 ustar 00root root 0000000 0000000 root = true
[*]
indent_style = space
indent_size = 4
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true
[Makefile]
indent_style = tab
[*.{yml,yaml}]
indent_size = 2
django-cacheops-7.2/.github/ 0000775 0000000 0000000 00000000000 15001074010 0015733 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/.github/workflows/ 0000775 0000000 0000000 00000000000 15001074010 0017770 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/.github/workflows/ci.yml 0000664 0000000 0000000 00000003414 15001074010 0021110 0 ustar 00root root 0000000 0000000 name: "CI"
on:
push:
branches:
- master
pull_request:
jobs:
lint:
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.13
uses: actions/setup-python@v4
with:
python-version: "3.13"
- name: "Install Dependencies"
run: pip install flake8
- name: Lint
run: flake8
test:
runs-on: ubuntu-22.04
continue-on-error: ${{ matrix.experimental }}
strategy:
fail-fast: false
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13", "pypy3.10"]
experimental: [false]
include:
- python-version: "3.13"
experimental: true
services:
postgres:
image: postgres
env:
POSTGRES_PASSWORD: cacheops
POSTGRES_USER: cacheops
POSTGRES_HOST_AUTH_METHOD: trust
ports:
- 5432:5432
redis:
image: redis
ports:
- 6379:6379
mysql:
image: mysql
env:
MYSQL_ROOT_PASSWORD: cacheops
MYSQL_DATABASE: cacheops
ports:
- 3306:3306
options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=10
name: ${{ matrix.experimental && 'Django main [ok to fail]' || format('Python {0}', matrix.python-version) }}
steps:
- uses: actions/checkout@v3
- name: Setup python
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
architecture: x64
- name: "Install Dependencies"
run: pip install tox tox-gh-actions
- name: "Run tests"
run: tox ${{ matrix.experimental && '-e py313-djmain' || '' }}
env:
MYSQL_HOST: 127.0.0.1
django-cacheops-7.2/.gitignore 0000664 0000000 0000000 00000000072 15001074010 0016362 0 ustar 00root root 0000000 0000000 .idea/
*.pyc
dist
*.egg-info
build
sqlite*.db
.tox
.cache
django-cacheops-7.2/CHANGELOG 0000664 0000000 0000000 00000033216 15001074010 0015612 0 ustar 00root root 0000000 0000000 7.2
- support and test against Python 3.13 and Django 5.1/5.2 (thx to Stu Tomlinson)
- updated md5 to set usedforsecurity for FIPS compliance (Logan Bibby)
- skip ArrayField in exact conds
7.1
- support and test against Python 3.12 and Django 5.0
- prevent dups in conjs and disjs by using proper data-structures (thx Sergey Prokopiev for a test)
- unpin funcy major version
- fixed conj keys TTL in Redis 7.x
- updated and cleaned up tests
7.0.2
- fixed .aggregate()
- fixed big memory usage during migrations
- fixed INSIDEOUT in older redises
- better handle model families with abstracts in them
- allow funcy 2.0+
7.0.1
- made it work with Redis 6.x and older again
- handle abstract models better
- some docs improvements
7.0
- support Django 4.2b and 5.0a
- added a new insideout mode
- made join invalidation more granular
Backwards incompatible changes:
- dropped Python 3.5, 3.6 and Django 2.1, 2.2, 3.0 and 3.1 support
- removed CACHEOPS_LRU
- removed CacheopsRedis, should inherit redis.Redis instead
6.2
- support Python 3.11 and Django 4.1
- added command to clear stale cacheops keys (Bruno Alla)
- fixed `invalidate_m2o` for polymorphic models (#430) (Andrey Alekseev)
- updated README: TOC, link to the post, some explanations
6.1
- support Django 3.2 and 4.0 (thx to Olivér Kecskeméty)
- do not gulp commit errors (Oleg Yamnikov)
- fixed precall key when a prefix is defined (Peter Baumgartner)
- fixed m2o/m2m queries invalidation on object deletion (thx to Sergey Tikhonov)
6.0
- support and test against Python 3.9 and Django 3.1/3.2
- added custom serializers support (thx to Arcady Usov)
- support callable extra in @cached_as() and friends
- made simple cache obey prefix
- skip JSONFields for purposes of invalidation
- configure skipped fields by internal types, classes still supported
- handle `DatabaseError` on transaction cleanup (Roman Gorbil)
- do not query old object if cacheops is disabled
- do not fetch deferred fields during invalidation, fixes #387
Backwards incompatible changes:
- callable `extra` param, including type, now behaves differently
- simple cache now uses prefix
5.1
- support subqueries in annotations (Jeremy Stretch)
- included tests into distro (John Vandenberg)
- fixed .invalidated_update(), if updated QuerySet had related fields selected (M1hacka)
- fixed possible deadlock in .invalidated_update() (M1hacka)
- fixed filtering with expressions
- fixed queries filtering in Exists (thx to Axel Wegener)
- updated min redis-py to 3.0.0
5.0.1
- fixed reverse o2o invalidation (thx to John Anderson)
- fixed unstable cache key when field validors are used
- guard against non-integer timeouts
5.0
- support Python 3.8 and Django 3.0 (thx to Misha Kalyna)
- improve model fields stamping (Alex Lokhman)
- disabled postponed invalidation when no_invalidation is applied (Vladimir)
- fixed custom manager derived from BaseManager (Eric Plaster)
Backwards incompatible changes:
- dropped Python 2.7 and Djangos before 2.1
- Redis 4.0+ required
4.2
- support Django 2.1 and 2.2
- added keep_fresh option to @cached_as (George Lee)
- pass CACHEOPS_SENTINEL options through (David Fuentes Baldomir)
- made SKIP_FIELDS and LONG_DISJUCTION configurable (Nic Wolff)
- fixed .aggregate() over .annotate() fields in Django 2.2
- fixed .bulk_create() in Django 2.2 (Grzegorz Szczepanczyk)
- fixed test database to work in environments without /dev/shm (George Lee)
- fixed proxy/abstract model bug
- added test case for issue #312 (Andy Tzeng)
- some warnings and doc-strings improvements
4.1
- use UNLINK instead of DEL when available, use Redis 4.0+ for that
- request m2m objects for invalidation using correct db
- fixed caching counts, aggregates and exists in writes,
dirty transactions and while cacheops is disabled
- fixed various multidb/invalidation issues (Svan70)
- fixed db in .invalidated_update()
- fixed possible key disrepancy in cache_on_save with multidb
- fixed jinja2 support in Python 3
- documented CACHEOPS_CLIENT_CLASS
4.0.7
- fixed RawSQL() and Subquery() (thx to thakryptex)
- made the Redis client class configurable (Nic Wolff)
- package is now also distributed as a universal wheel (Jon Dufresne)
4.0.6
- fixed m2m invalidation issue with certain configs
- fixed catastrophic backtracking in template extensions
4.0.5
- fixed db selection in invalidation fetch and .invalidated_update() when router fails (M1ha Shvn)
- fixed unlickely "_clone() unexpected keyword '_cacheprofile'" error
- fixed LookupError bug
- fixed .meta.concrete_model not set bug
- fixed docs on template tags
4.0.4
- fixed caching while app registry not ready
- fixed random ordered dicts producing varying SQL for same query
4.0.3
- configure via Sentinel (Tapo4ek)
4.0.2
- fixed caching django migrations
4.0.1
- do not prevent fast path deletes on not cached models
- minor optimization
4.0
- added cache key prefix function
- added .aggregate() caching
- support QuerySet.union() and friends
- added cache_invalidated signal (thx to Kimmo Kiiski)
- cache .first() and .last() on 'get' op
- correctly skip or show nice error message on non-django models/managers
- allow cleaning file cache in non default place
- cache keys no longer change on fields reorder
- fixed .invalidated_update() on updated object versions
- fixed template tags in Django 2.0
- fixed deprecation warnings in Python 3
- use module global CACHEOPS_DEBUG to cache evolving code
- minor optimizations
Backwards incompatible changes:
- dropped Django 1.7 support
- dropped write_only flag
- dropped implicit write_only in .get_or_create(), .select_for_update() and friends
- .iterator() is never cached now
- invalidate_all() works immediately even in transaction
- @cached_as(timeout=0) no longer means timeout is ignored/derived from querysets,
use timeout=None instead.
3.2.1
- fixed CACHEOPS_DEGRADE_ON_FAILURE=True
3.2
- support Django 1.11
- support Python 3.6
- preliminary support for Django 2.0
- support multidb nested transactions
- fixed pk with default (leonardo orozco)
3.1.3
- better dirty sql heuristic
- fixed on commit sequence issue (thx to Irae Hueck Costa)
- fixed dup field__eq queries (Michał Ochman)
3.1.2
- fixed querysets with custom .iterator() (like django-polymorphic)
- support no argument @cached_view
- check that sample is passed into @cached_as()
3.1.1
- fixed unexpected dirty transaction
- fixed a bug with destroying manager
- fixed CACHEOPS setting and upper-case app labels
3.1
- added locking to combat dog-pile effect
- handle transactions smarter (ihucos)
- handle functions in @cached_as() and @cached() args
- do not allow unknown kwargs in @cached_as()
3.0.1
- support Django 1.10 (worked before, but now it's official)
- accept empty CACHEOPS_REDIS setting
- fixed ImportError in cleanfilecache command (Roman)
3.0
- support PyPy
- support Python 3.5
- support Django 1.9
- added transparent transaction handling (Joel Hillacre)
- added .invalidated_update()
- added cache_read signal (Joona Pääkkönen)
- added CACHEOPS_ENABLED setting
- invalidate on intermediate tables in long joins
- support settings override
- made CACHEOPS keys case insensitive
- allow redis connection settings to be specified by a URL (Tim Savage)
- fixed router support
- fixed clone cache settings affecting original queryset
- more fixes for non-ascii in str params
- calc func cache keys smarter
- no_invalidation optimizations
Backwards incompatible changes:
- Django 1.6 and earlier no longer supported
- dropped old CACHEOPS setting format
- removed CACHEOPS_FAKE setting
- removed .cached_call() from basic and file caches
- disabled cacheops for fake migration models
2.4.5
- backport: disabled cacheops for fake migration models
- disabled cacheops for south models
- fixed get_queryset() on custom qs rename bug
2.4.3
- fixed .get() on reverse fk related manager
- fixed memory leak on migrations
2.4.2
- fixed .values() and .values_list() in Django 1.9
- stopped sharing cache between proxies and base
2.4.1
- export FileCache and RedisCache from cacheops
- allow non-ascii in str params
- fixed subqueries with different db
2.4
- added @decorator_tag to easily create cached template tags
- create redis client lazily
- let invalidate @cached_view()
- support template responses used by generic CBV
- support argumentless no parentheses form for @cached and @cached_view
- removed unneeded invalidation calls in .bulk_create()
- allow decorating built-in and external functions
- added .cached_call() to simple and file cache
- added key_func argument to @cached_as()
- check that timeout is specified in CACHEOPS setting
- fixed m2m invalidation on reverse changes
- fixed passing kwargs in @cached_as()
- fixed @cached with no parentheses
- fixed .bulk_create() API in Django 1.4
2.3.2
- made cacheops invalidate before other post_* signals (Emil Stenström)
- fixed invalidation on proxy/base model changes
- added no_invalidation to fake cacheops
- test against MySQL
2.3.1
- updated support for Django 1.8 (Andriy Sokolovskiy)
- fixed bulk_create() to return objects instead of None (Ilya Baryshev)
2.3
- Django 1.8 support and preliminary Django 1.9 support
- made 'ops' config option to accept single string
- added invalidate_fragment()
- added a way to get/set/delete function cache easily
- added redis.TimeoutError to degradation handling (George Kappel)
- fixed invalidation on QuerySet.bulk_create(),
worked only from Manager previously
- fixed .bulk_create() API to comply with Django
- minor optimizations
2.2.1
- fixed thread local error
- fixed ops = 'all' both in config and .cache()
2.2
- switched to new CACHEOPS setting style
- added CACHEOPS_DEFAULTS setting
- work as LRU cache
- cache .exists() calls in Django 1.6+
- invalidate on .bulk_create()
- added no_invalidation context manager/decorator
- documented local_get and cache_on_save
- fixed saving objects with F-expression fields (Fedor Nasyrov)
- fixed queries with Django 1.7 transforms
- fixed binary fields in python 3
- stopped using simplejson
- simpler queryset key calculation,
third party fields should be more compatible now
- removed bogus clone param from .nocache() call
2.1.1
- fixed bug in m2m invalidation
- fixed bug with null geometry fields
- fixed unpickling objects with BinaryFields
2.1
- support Django 1.7
- do not fetch from cache when doing .select_for_update() and similar,
but do write to cache if it's enabled
- fixed inherited models exception in admin,
multi-table models are still not really supported!
- fixed fake cacheops
- fixed deprecation warning in Django 1.6 admin
2.0
- conditions on related models will now invalidate queryset
- m2m invalidation is much more granular
- removed requirement that timeout should not be greater than default
- lua scripting is used to save and invalidate cache, making things faster
- better invalidation for complex and custom fields
- silent stringify of unknown objects by default
- support caching django.contrib.gis queries (koodjo)
- cacheops is now thread-safe
- added a way to no-op cacheops
- added @cached_view() and @cached_view_as()
- pass several samples for @cached_as() and @cached_view_as()
- fixed working with querysets created by non-patched constructor (#3 and dups)
- fixed invalidate_model() for proxy models
- fixed deepcopy(queryset) bug
- fixed possible collisions when cached functions passed complex structured arguments
- fixed StringifyError on timedelta (mpyatishev)
Backwards incompatible changes:
- filters on TextFields no longer affect invalidation
- @cached_as() cache key will now depend on function arguments
- @cached_as() and @cached() will now depend on function line in a code,
permitting usage of lambdas and same named methods without passing extra
- @cached_as() and @cached() will now take timeout as first argument and extra as second.
Anyway using them as keyword arguments is recommended
- Django 1.2 no longer supported
- Redis 2.6+ is required
1.3.1
- fixed bug with negating "some"-conditions
- fixed bug with schemes unsync when invalidating model
Backwards incompatible changes:
- reverted .cache(write_only=...) behaviour to enable caching for all ops
- .cache(timeout=...) call will enable caching for all ops
1.3.0
- support generic relations (erthalion)
- support filtering by time equality
- optimizations for python 3
Backwards incompatible changes:
- .cache(write_only=...) doesn't enable caching for all ops anymore (not really intended)
1.2.1
- set six minimum version right (crazyzubr)
1.2
- Python 3 support
1.1.1
- fixed Django 1.5- compatibility (aykutozat)
1.1
- Django 1.6+ support
- added Django template tags
- fixed caching querysets combined with | and & operators
1.0.3
- added db_agnostic option to cache profile
- partial support for Django 1.6+
1.0.2
- fixed cached_on_save
- fixed .inplace() altering cache key
1.0.1
- .delete() method for simple cache
- .invalidate() method for @cached() and file_cache.cached() functions
1.0.0
- defend against model changes corrupting cache (ttyS15)
- support F-expressions (Yuego)
- fixed local_get with unhashable arg TypeError
- fixed caching of raw queries (Yuego)
0.9.9
- fixed file cache md5 reference
0.9.8
- support isnull lookup for better invalidation
- fixed 'Query' has no len()
- dumped django.utils.hashcompat in favor of hashlib
0.9.7
- support for flex models
- support @cached_as(SomeModel)
- file cache default dir changed to /tmp/cacheops_file_cache
- better support for tuples in extra param in @cached and jinja2 tags
0.9.6
- support gracefull degradation on redis fail (Beres Botond)
0.9.5
- support for proxy models
0.9.4
- fixed occasional redis 100% cpu use (tumb1er)
0.9.3
- invalidate and cleanfilecache commands added to dist
- django 1.5 compatability (jhpinson)
0.9.2
- compatability with latest redis-py
- many other bug fixes
- minor optimizations
- better docs, including PERFORMANCE section
... lost in ancient history ...
django-cacheops-7.2/LICENSE 0000664 0000000 0000000 00000002775 15001074010 0015413 0 ustar 00root root 0000000 0000000 Copyright (c) 2011-2020, Alexander Schepanovski.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of cacheops nor the names of its contributors may
be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
django-cacheops-7.2/MANIFEST.in 0000664 0000000 0000000 00000000321 15001074010 0016125 0 ustar 00root root 0000000 0000000 include LICENSE
include README.rst
include CHANGELOG
include cacheops/lua/*
include manage.py bench.py
include requirements-test.txt
include tox.ini
recursive-include tests *.json
recursive-include tests *.py
django-cacheops-7.2/README.rst 0000664 0000000 0000000 00000067773 15001074010 0016106 0 ustar 00root root 0000000 0000000 Cacheops |Build Status|
========
A slick app that supports automatic or manual queryset caching and `automatic
granular event-driven invalidation `_.
It uses `redis `_ as backend for ORM cache and redis or
filesystem for simple time-invalidated one.
And there is more to it:
- decorators to cache any user function or view as a queryset or by time
- extensions for django and jinja2 templates
- transparent transaction support
- dog-pile prevention mechanism
- a couple of hacks to make django faster
.. contents:: Contents
:local:
:backlinks: top
Requirements
++++++++++++
Python 3.8+, Django 3.2+ and Redis 4.0+.
Installation
++++++++++++
Using pip:
.. code:: bash
$ pip install django-cacheops
# Or from github directly
$ pip install git+https://github.com/Suor/django-cacheops.git@master
Setup
+++++
Add ``cacheops`` to your ``INSTALLED_APPS``.
Setup redis connection and enable caching for desired models:
.. code:: python
CACHEOPS_REDIS = {
'host': 'localhost', # redis-server is on same machine
'port': 6379, # default redis port
'db': 1, # SELECT non-default redis database
# using separate redis db or redis instance
# is highly recommended
'socket_timeout': 3, # connection timeout in seconds, optional
'password': '...', # optional
'unix_socket_path': '' # replaces host and port
}
# Alternatively the redis connection can be defined using a URL:
CACHEOPS_REDIS = "redis://localhost:6379/1"
# or
CACHEOPS_REDIS = "unix://path/to/socket?db=1"
# or with password (note a colon)
CACHEOPS_REDIS = "redis://:password@localhost:6379/1"
# If you want to use sentinel, specify this variable
CACHEOPS_SENTINEL = {
'locations': [('localhost', 26379)], # sentinel locations, required
'service_name': 'mymaster', # sentinel service name, required
'socket_timeout': 0.1, # connection timeout in seconds, optional
'db': 0 # redis database, default: 0
... # everything else is passed to Sentinel()
}
# Use your own redis client class, should be compatible or subclass redis.Redis
CACHEOPS_CLIENT_CLASS = 'your.redis.ClientClass'
CACHEOPS = {
# Automatically cache any User.objects.get() calls for 15 minutes
# This also includes .first() and .last() calls,
# as well as request.user or post.author access,
# where Post.author is a foreign key to auth.User
'auth.user': {'ops': 'get', 'timeout': 60*15},
# Automatically cache all gets and queryset fetches
# to other django.contrib.auth models for an hour
'auth.*': {'ops': {'fetch', 'get'}, 'timeout': 60*60},
# Cache all queries to Permission
# 'all' is an alias for {'get', 'fetch', 'count', 'aggregate', 'exists'}
'auth.permission': {'ops': 'all', 'timeout': 60*60},
# Enable manual caching on all other models with default timeout of an hour
# Use Post.objects.cache().get(...)
# or Tags.objects.filter(...).order_by(...).cache()
# to cache particular ORM request.
# Invalidation is still automatic
'*.*': {'ops': (), 'timeout': 60*60},
# And since ops is empty by default you can rewrite last line as:
'*.*': {'timeout': 60*60},
# NOTE: binding signals has its overhead, like preventing fast mass deletes,
# you might want to only register whatever you cache and dependencies.
# Finally you can explicitely forbid even manual caching with:
'some_app.*': None,
}
You can configure default profile setting with ``CACHEOPS_DEFAULTS``. This way you can rewrite the config above:
.. code:: python
CACHEOPS_DEFAULTS = {
'timeout': 60*60
}
CACHEOPS = {
'auth.user': {'ops': 'get', 'timeout': 60*15},
'auth.*': {'ops': ('fetch', 'get')},
'auth.permission': {'ops': 'all'},
'*.*': {},
}
Using ``'*.*'`` with non-empty ``ops`` is **not recommended**
since it will easily cache something you don't intent to or even know about like migrations tables.
The better approach will be restricting by app with ``'app_name.*'``.
Besides ``ops`` and ``timeout`` options you can also use:
``local_get: True``
To cache simple gets for this model in process local memory.
This is very fast, but is not invalidated in any way until process is restarted.
Still could be useful for extremely rarely changed things.
``cache_on_save=True | 'field_name'``
To write an instance to cache upon save.
Cached instance will be retrieved on ``.get(field_name=...)`` request.
Setting to ``True`` causes caching by primary key.
Additionally, you can tell cacheops to degrade gracefully on redis fail with:
.. code:: python
CACHEOPS_DEGRADE_ON_FAILURE = True
There is also a possibility to make all cacheops methods and decorators no-op, e.g. for testing:
.. code:: python
from django.test import override_settings
@override_settings(CACHEOPS_ENABLED=False)
def test_something():
# ...
assert cond
Usage
+++++
| **Automatic caching**
It's automatic you just need to set it up.
| **Manual caching**
You can force any queryset to use cache by calling its ``.cache()`` method:
.. code:: python
Article.objects.filter(tag=2).cache()
Here you can specify which ops should be cached for the queryset, for example, this code:
.. code:: python
qs = Article.objects.filter(tag=2).cache(ops=['count'])
paginator = Paginator(objects, ipp)
articles = list(pager.page(page_num)) # hits database
will cache count call in ``Paginator`` but not later articles fetch.
There are five possible actions - ``get``, ``fetch``, ``count``, ``aggregate`` and ``exists``.
You can pass any subset of this ops to ``.cache()`` method even empty - to turn off caching.
There is, however, a shortcut for the latter:
.. code:: python
qs = Article.objects.filter(visible=True).nocache()
qs1 = qs.filter(tag=2) # hits database
qs2 = qs.filter(category=3) # hits it once more
It is useful when you want to disable automatic caching on particular queryset.
You can also override default timeout for particular queryset with ``.cache(timeout=...)``.
| **Function caching**
You can cache and invalidate result of a function the same way as a queryset.
Cached results of the next function will be invalidated on any ``Article`` change,
addition or deletion:
.. code:: python
from cacheops import cached_as
@cached_as(Article, timeout=120)
def article_stats():
return {
'tags': list(Article.objects.values('tag').annotate(Count('id')))
'categories': list(Article.objects.values('category').annotate(Count('id')))
}
Note that we are using list on both querysets here, it's because we don't want
to cache queryset objects but their results.
Also note that if you want to filter queryset based on arguments,
e.g. to make invalidation more granular, you can use a local function:
.. code:: python
def articles_block(category, count=5):
qs = Article.objects.filter(category=category)
@cached_as(qs, extra=count)
def _articles_block():
articles = list(qs.filter(photo=True)[:count])
if len(articles) < count:
articles += list(qs.filter(photo=False)[:count-len(articles)])
return articles
return _articles_block()
We added ``extra`` here to make different keys for calls with same ``category`` but different
``count``. Cache key will also depend on function arguments, so we could just pass ``count`` as
an argument to inner function. We also omitted ``timeout`` here, so a default for the model
will be used.
Another possibility is to make function cache invalidate on changes to any one of several models:
.. code:: python
@cached_as(Article.objects.filter(public=True), Tag)
def article_stats():
return {...}
As you can see, we can mix querysets and models here.
| **View caching**
You can also cache and invalidate a view as a queryset. This works mostly the same way as function
caching, but only path of the request parameter is used to construct cache key:
.. code:: python
from cacheops import cached_view_as
@cached_view_as(News)
def news_index(request):
# ...
return render(...)
You can pass ``timeout``, ``extra`` and several samples the same way as to ``@cached_as()``. Note that you can pass a function as ``extra``:
.. code:: python
@cached_view_as(News, extra=lambda req: req.user.is_staff)
def news_index(request):
# ... add extra things for staff
return render(...)
A function passed as ``extra`` receives the same arguments as the cached function.
Class based views can also be cached:
.. code:: python
class NewsIndex(ListView):
model = News
news_index = cached_view_as(News, ...)(NewsIndex.as_view())
Invalidation
++++++++++++
Cacheops uses both time and event-driven invalidation. The event-driven one
listens on model signals and invalidates appropriate caches on ``Model.save()``, ``.delete()``
and m2m changes.
Invalidation tries to be granular which means it won't invalidate a queryset
that cannot be influenced by added/updated/deleted object judging by query
conditions. Most of the time this will do what you want, if it won't you can use
one of the following:
.. code:: python
from cacheops import invalidate_obj, invalidate_model, invalidate_all
invalidate_obj(some_article) # invalidates queries affected by some_article
invalidate_model(Article) # invalidates all queries for model
invalidate_all() # flush redis cache database
And last there is ``invalidate`` command::
./manage.py invalidate articles.Article.34 # same as invalidate_obj
./manage.py invalidate articles.Article # same as invalidate_model
./manage.py invalidate articles # invalidate all models in articles
And the one that FLUSHES cacheops redis database::
./manage.py invalidate all
Don't use that if you share redis database for both cache and something else.
| **Turning off and postponing invalidation**
There is also a way to turn off invalidation for a while:
.. code:: python
from cacheops import no_invalidation
with no_invalidation:
# ... do some changes
obj.save()
Also works as decorator:
.. code:: python
@no_invalidation
def some_work(...):
# ... do some changes
obj.save()
Combined with ``try ... finally`` it could be used to postpone invalidation:
.. code:: python
try:
with no_invalidation:
# ...
finally:
invalidate_obj(...)
# ... or
invalidate_model(...)
Postponing invalidation can speed up batch jobs.
| **Mass updates**
Normally `qs.update(...)` doesn't emit any events and thus doesn't trigger invalidation.
And there is no transparent and efficient way to do that: trying to act on conditions will
invalidate too much if update conditions are orthogonal to many queries conditions,
and to act on specific objects we will need to fetch all of them,
which `QuerySet.update()` users generally try to avoid.
In the case you actually want to perform the latter cacheops provides a shortcut:
.. code:: python
qs.invalidated_update(...)
Note that all the updated objects are fetched twice, prior and post the update.
Components
++++++++++
Simple time-invalidated cache
-----------------------------
To cache result of a function call or a view for some time use:
.. code:: python
from cacheops import cached, cached_view
@cached(timeout=number_of_seconds)
def top_articles(category):
return ... # Some costly queries
@cached_view(timeout=number_of_seconds)
def top_articles(request, category=None):
# Some costly queries
return HttpResponse(...)
``@cached()`` will generate separate entry for each combination of decorated function and its
arguments. Also you can use ``extra`` same way as in ``@cached_as()``, most useful for nested
functions:
.. code:: python
@property
def articles_json(self):
@cached(timeout=10*60, extra=self.category_id)
def _articles_json():
...
return json.dumps(...)
return _articles_json()
You can manually invalidate or update a result of a cached function:
.. code:: python
top_articles.invalidate(some_category)
top_articles.key(some_category).set(new_value)
To invalidate cached view you can pass absolute uri instead of request:
.. code:: python
top_articles.invalidate('http://example.com/page', some_category)
Cacheops also provides get/set primitives for simple cache:
.. code:: python
from cacheops import cache
cache.set(cache_key, data, timeout=None)
cache.get(cache_key)
cache.delete(cache_key)
``cache.get`` will raise ``CacheMiss`` if nothing is stored for given key:
.. code:: python
from cacheops import cache, CacheMiss
try:
result = cache.get(key)
except CacheMiss:
... # deal with it
File Cache
----------
File based cache can be used the same way as simple time-invalidated one:
.. code:: python
from cacheops import file_cache
@file_cache.cached(timeout=number_of_seconds)
def top_articles(category):
return ... # Some costly queries
@file_cache.cached_view(timeout=number_of_seconds)
def top_articles(request, category):
# Some costly queries
return HttpResponse(...)
# later, on appropriate event
top_articles.invalidate(some_category)
# or
top_articles.key(some_category).set(some_value)
# primitives
file_cache.set(cache_key, data, timeout=None)
file_cache.get(cache_key)
file_cache.delete(cache_key)
It has several improvements upon django built-in file cache, both about high load.
First, it's safe against concurrent writes. Second, it's invalidation is done as separate task,
you'll need to call this from crontab for that to work::
/path/manage.py cleanfilecache
/path/manage.py cleanfilecache /path/to/non-default/cache/dir
Django templates integration
----------------------------
Cacheops provides tags to cache template fragments. They mimic ``@cached_as``
and ``@cached`` decorators, however, they require explicit naming of each fragment:
.. code:: django
{% load cacheops %}
{% cached_as [ ...] %}
... some template code ...
{% endcached_as %}
{% cached [ ...] %}
... some template code ...
{% endcached %}
You can use ``None`` for timeout in ``@cached_as`` to use it's default value for model.
To invalidate cached fragment use:
.. code:: python
from cacheops import invalidate_fragment
invalidate_fragment(fragment_name, extra1, ...)
If you have more complex fragment caching needs, cacheops provides a helper to
make your own template tags which decorate a template fragment in a way
analogous to decorating a function with ``@cached`` or ``@cached_as``.
This is **experimental** feature for now.
To use it create ``myapp/templatetags/mycachetags.py`` and add something like this there:
.. code:: python
from cacheops import cached_as, CacheopsLibrary
register = CacheopsLibrary()
@register.decorator_tag(takes_context=True)
def cache_menu(context, menu_name):
from django.utils import translation
from myapp.models import Flag, MenuItem
request = context.get('request')
if request and request.user.is_staff():
# Use noop decorator to bypass caching for staff
return lambda func: func
return cached_as(
# Invalidate cache if any menu item or a flag for menu changes
MenuItem,
Flag.objects.filter(name='menu'),
# Vary for menu name and language, also stamp it as "menu" to be safe
extra=("menu", menu_name, translation.get_language()),
timeout=24 * 60 * 60
)
``@decorator_tag`` here creates a template tag behaving the same as returned decorator
upon wrapped template fragment. Resulting template tag could be used as follows:
.. code:: django
{% load mycachetags %}
{% cache_menu "top" %}
... the top menu template code ...
{% endcache_menu %}
... some template code ..
{% cache_menu "bottom" %}
... the bottom menu template code ...
{% endcache_menu %}
Jinja2 extension
----------------
Add ``cacheops.jinja2.cache`` to your extensions and use:
.. code:: jinja
{% cached_as [, timeout=] [, extra=] %}
... some template code ...
{% endcached_as %}
or
.. code:: jinja
{% cached [timeout=] [, extra=] %}
...
{% endcached %}
Tags work the same way as corresponding decorators.
Special topics
++++++++++++++
Transactions
------------
Cacheops transparently supports transactions. This is implemented by following simple rules:
1. Once transaction is dirty (has changes) caching turns off. The reason is that the state of database at this point is only visible to current transaction and should not affect other users and vice versa.
2. Any invalidating calls are scheduled to run on the outer commit of transaction.
3. Savepoints and rollbacks are also handled appropriately.
Mind that simple and file cache don't turn itself off in transactions but work as usual.
Dog-pile effect prevention
--------------------------
There is optional locking mechanism to prevent several threads or processes simultaneously performing same heavy task. It works with ``@cached_as()`` and querysets:
.. code:: python
@cached_as(qs, lock=True)
def heavy_func(...):
# ...
for item in qs.cache(lock=True):
# ...
It is also possible to specify ``lock: True`` in ``CACHEOPS`` setting but that would probably be a waste. Locking has no overhead on cache hit though.
Multiple database support
-------------------------
By default cacheops considers query result is same for same query, not depending
on database queried. That could be changed with ``db_agnostic`` cache profile option:
.. code:: python
CACHEOPS = {
'some.model': {'ops': 'get', 'db_agnostic': False, 'timeout': ...}
}
Sharing redis instance
----------------------
Cacheops provides a way to share a redis instance by adding prefix to cache keys:
.. code:: python
CACHEOPS_PREFIX = lambda query: ...
# or
CACHEOPS_PREFIX = 'some.module.cacheops_prefix'
A most common usage would probably be a prefix by host name:
.. code:: python
# get_request() returns current request saved to threadlocal by some middleware
cacheops_prefix = lambda _: get_request().get_host()
A ``query`` object passed to callback also enables reflection on used databases and tables:
.. code:: python
def cacheops_prefix(query):
query.dbs # A list of databases queried
query.tables # A list of tables query is invalidated on
if set(query.tables) <= HELPER_TABLES:
return 'helper:'
if query.tables == ['blog_post']:
return 'blog:'
Custom serialization
--------------------
Cacheops uses ``pickle`` by default, employing it's default protocol. But you can specify your own
it might be any module or a class having ``.dumps()`` and ``.loads()`` functions. For example you can use ``dill`` instead, which can serialize more things like anonymous functions:
.. code:: python
CACHEOPS_SERIALIZER = 'dill'
One less obvious use is to fix pickle protocol, to use cacheops cache across python versions:
.. code:: python
import pickle
class CACHEOPS_SERIALIZER:
dumps = lambda data: pickle.dumps(data, 3)
loads = pickle.loads
Using memory limit
------------------
Cacheops offers an "insideout" mode, which idea is instead of conj sets contatining cache keys, cache values contain a checksum of random stamps stored in conj keys, which are checked on each read to stay the same. To use that add to settings:
.. code:: python
CACHEOPS_INSIDEOUT = True # Might become default in future
And set up ``maxmemory`` and ``maxmemory-policy`` in redis config::
maxmemory 4gb
maxmemory-policy volatile-lru # or other volatile-*
Note that using any of ``allkeys-*`` policies might drop important invalidation structures of cacheops and lead to stale cache.
Memory usage cleanup
--------------------
**This does not apply to "insideout" mode. This issue doesn't happen there.**
In some cases, cacheops may leave some conjunction keys of expired cache keys in redis without being able to invalidate them. Those will still expire with age, but in the meantime may cause issues like slow invalidation (even "BUSY Redis ...") and extra memory usage. To prevent that it is advised to not cache complex queries, see `Perfomance tips <#performance-tips>`_, 5.
Cacheops ships with a ``cacheops.reap_conjs`` function that can clean up these keys,
ignoring conjunction sets with some reasonable size. It can be called using the ``reapconjs`` management command::
./manage.py reapconjs --chunk-size=100 --min-conj-set-size=10000 # with custom values
./manage.py reapconjs # with default values (chunks=1000, min size=1000)
The command is a small wrapper that calls a function with the main logic. You can also call it from your code, for example from a Celery task:
.. code:: python
from cacheops import reap_conjs
@app.task
def reap_conjs_task():
reap_conjs(
chunk_size=2000,
min_conj_set_size=100,
)
Keeping stats
-------------
Cacheops provides ``cache_read`` and ``cache_invalidated`` signals for you to keep track.
Cache read signal is emitted immediately after each cache lookup. Passed arguments are: ``sender`` - model class if queryset cache is fetched,
``func`` - decorated function and ``hit`` - fetch success as boolean value.
Here is a simple stats implementation:
.. code:: python
from cacheops.signals import cache_read
from statsd.defaults.django import statsd
def stats_collector(sender, func, hit, **kwargs):
event = 'hit' if hit else 'miss'
statsd.incr('cacheops.%s' % event)
cache_read.connect(stats_collector)
Cache invalidation signal is emitted after object, model or global invalidation passing ``sender`` and ``obj_dict`` args. Note that during normal operation cacheops only uses object invalidation, calling it once for each model create/delete and twice for update: passing old and new object dictionary.
Troubleshooting
+++++++++++++++
CAVEATS
-------
1. Conditions other than ``__exact``, ``__in`` and ``__isnull=True`` don't make invalidation
more granular.
2. Conditions on TextFields, FileFields and BinaryFields don't make it either.
One should not test on their equality anyway. See `CACHEOPS_SKIP_FIELDS` though.
3. Update of "select_related" object does not invalidate cache for queryset.
Use ``.prefetch_related()`` instead.
4. Mass updates don't trigger invalidation by default. But see ``.invalidated_update()``.
5. Sliced queries are invalidated as non-sliced ones.
6. Doesn't work with ``.raw()`` and other sql queries.
7. Conditions on subqueries don't affect invalidation.
8. Doesn't work right with multi-table inheritance.
Here 1, 2, 3, 5 are part of the design compromise, trying to solve them will make
things complicated and slow. 7 can be implemented if needed, but it's
probably counter-productive since one can just break queries into simpler ones,
which cache better. 4 is a deliberate choice, making it "right" will flush
cache too much when update conditions are orthogonal to most queries conditions,
see, however, `.invalidated_update()`. 8 is postponed until it will gain
more interest or a champion willing to implement it emerges.
All unsupported things could still be used easily enough with the help of ``@cached_as()``.
Performance tips
----------------
Here come some performance tips to make cacheops and Django ORM faster.
1. When you use cache you pickle and unpickle lots of django model instances, which could be slow. You can optimize django models serialization with `django-pickling `_.
2. Constructing querysets is rather slow in django, mainly because most of ``QuerySet`` methods clone self, then change it and return the clone. Original queryset is usually thrown away. Cacheops adds ``.inplace()`` method, which makes queryset mutating, preventing useless cloning:
.. code:: python
items = Item.objects.inplace().filter(category=12).order_by('-date')[:20]
You can revert queryset to cloning state using ``.cloning()`` call. Note that this is a micro-optimization technique. Using it is only desirable in the hottest places, not everywhere.
3. Use template fragment caching when possible, it's way more fast because you don't need to generate anything. Also pickling/unpickling a string is much faster than a list of model instances.
4. Run separate redis instance for cache with disabled `persistence `_. You can manually call `SAVE `_ or `BGSAVE `_ to stay hot upon server restart.
5. If you filter queryset on many different or complex conditions cache could degrade performance (comparing to uncached db calls) in consequence of frequent cache misses. Disable cache in such cases entirely or on some heuristics which detect if this request would be probably hit. E.g. enable cache if only some primary fields are used in filter.
Caching querysets with large amount of filters also slows down all subsequent invalidation on that model (negligable for "insideout" mode). You can disable caching if more than some amount of fields is used in filter simultaneously.
6. Split database queries into smaller ones when you cache them. This goes against usual approach, but this allows invalidation to be more granular: smaller parts will be invalidated independently and each part will invalidate more precisely.
.. code:: python
Post.objects.filter(category__slug="foo")
# A single database query, but will be invalidated not only on
# any Category with .slug == "foo" change, but also for any Post change
Post.objects.filter(category=Category.objects.get(slug="foo"))
# Two queries, each invalidates only on a granular event:
# either category.slug == "foo" or Post with .category_id ==
Writing a test
--------------
Writing a test for an issue you are experiencing can speed up its resolution a lot.
Here is how you do that. I suppose you have some application code causing it.
1. Make a fork.
2. Install all from ``requirements-test.txt``.
3. Ensure you can run tests with ``pytest``.
4. Copy relevant models code to ``tests/models.py``.
5. Go to ``tests/tests.py`` and paste code causing exception to ``IssueTests.test_{issue_number}``.
6. Execute ``pytest -k {issue_number}`` and see it failing.
7. Cut down model and test code until error disappears and make a step back.
8. Commit changes and make a pull request.
TODO
++++
- faster .get() handling for simple cases such as get by pk/id, with simple key calculation
- integrate previous one with prefetch_related()
- shard cache between multiple redises
- respect subqueries?
- respect headers in @cached_view*?
- group invalidate_obj() calls?
- a postpone invalidation context manager/decorator?
- fast mode: store cache in local memory, but check in with redis if it's valid
- an interface for complex fields to extract exact on parts or transforms: ArrayField.len => field__len=?, ArrayField[0] => field__0=?, JSONField['some_key'] => field__some_key=?
- custom cache eviction strategy in lua
- cache a string directly (no pickle) for direct serving (custom key function?)
.. |Build Status| image:: https://github.com/Suor/django-cacheops/actions/workflows/ci.yml/badge.svg
:target: https://github.com/Suor/django-cacheops/actions/workflows/ci.yml?query=branch%3Amaster
django-cacheops-7.2/bench.py 0000775 0000000 0000000 00000005032 15001074010 0016027 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import os, time, gc, sys, shutil
from funcy import re_tester
os.environ['DJANGO_SETTINGS_MODULE'] = 'tests.settings'
verbosity = 1
interactive = False
fixtures = ['basic']
HEADER_TEMPLATE = '==================== %-20s ===================='
def run_benchmarks(tests):
for name, test in tests:
if 'h' in flags:
print(HEADER_TEMPLATE % name)
time = bench_test(test)
print('%-18s time: %.3fms' % (name, time * 1000))
def bench_test(test):
prepared = None
if 'prepare_once' in test:
prepared = test['prepare_once']()
if 'h' in flags:
print('-' * 62)
if 'p' in flags:
test['run'] = profile(test['run'])
total = 0
n = 1
while total < 2:
gc.disable()
durations = [bench_once(test, prepared) for _ in range(n)]
gc.enable()
if '1' in flags:
break
total = sum(d for _, d in durations)
n *= 2
return min(d for d, _ in durations)
def bench_once(test, prepared=None):
zero_start = time.time()
if 'prepare' in test:
prepared = test['prepare']()
if 'h' in flags:
print('-' * 62)
start = time.time()
if prepared is None:
test['run']()
else:
test['run'](prepared)
now = time.time()
return now - start, now - zero_start
import django
from django.db import connection
from django.core.management import call_command
django.setup()
# Parse command line arguments
flags = ''.join(arg[1:] for arg in sys.argv[1:] if arg.startswith('-'))
args = [arg for arg in sys.argv[1:] if not arg.startswith('-')]
selector = args[0] if args else ''
select = selector[1:].__eq__ if selector.startswith('=') else re_tester(selector)
if 'p' in flags:
from profilehooks import profile
db_name = None
try:
shutil.rmtree('tests/migrations', True)
call_command('makemigrations', 'tests', verbosity=0)
db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive)
call_command('loaddata', *fixtures, **{'verbosity': verbosity})
from cacheops.redis import redis_client
redis_client.flushdb()
from tests.bench import TESTS # import is here because it executes queries
if selector:
tests = [(name, test) for name, test in TESTS if select(name)]
else:
tests = TESTS
run_benchmarks(tests)
except KeyboardInterrupt:
pass
finally:
if db_name:
connection.creation.destroy_test_db(db_name, verbosity=verbosity)
shutil.rmtree('tests/migrations')
django-cacheops-7.2/cacheops/ 0000775 0000000 0000000 00000000000 15001074010 0016160 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/__init__.py 0000664 0000000 0000000 00000000361 15001074010 0020271 0 ustar 00root root 0000000 0000000 __version__ = '7.2'
VERSION = tuple(map(int, __version__.split('.')))
from .simple import * # noqa
from .query import * # noqa
from .invalidation import * # noqa
from .reaper import * # noqa
from .templatetags.cacheops import * # noqa
django-cacheops-7.2/cacheops/apps.py 0000664 0000000 0000000 00000000456 15001074010 0017502 0 ustar 00root root 0000000 0000000 from django.apps import AppConfig
from cacheops.query import install_cacheops
from cacheops.transaction import install_cacheops_transaction_support
class CacheopsConfig(AppConfig):
name = 'cacheops'
def ready(self):
install_cacheops()
install_cacheops_transaction_support()
django-cacheops-7.2/cacheops/conf.py 0000664 0000000 0000000 00000007030 15001074010 0017457 0 ustar 00root root 0000000 0000000 from importlib import import_module
from funcy import memoize, merge
from django.conf import settings as base_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
ALL_OPS = {'get', 'fetch', 'count', 'aggregate', 'exists'}
class Defaults:
CACHEOPS_ENABLED = True
CACHEOPS_REDIS = {}
CACHEOPS_DEFAULTS = {}
CACHEOPS = {}
CACHEOPS_PREFIX = lambda query: ''
CACHEOPS_INSIDEOUT = False
CACHEOPS_CLIENT_CLASS = None
CACHEOPS_DEGRADE_ON_FAILURE = False
CACHEOPS_SENTINEL = {}
# NOTE: we don't use this fields in invalidator conditions since their values could be very long
# and one should not filter by their equality anyway.
CACHEOPS_SKIP_FIELDS = "FileField", "TextField", "BinaryField", "JSONField", "ArrayField"
CACHEOPS_LONG_DISJUNCTION = 8
CACHEOPS_SERIALIZER = 'pickle'
FILE_CACHE_DIR = '/tmp/cacheops_file_cache'
FILE_CACHE_TIMEOUT = 60*60*24*30
class Settings(object):
def __getattr__(self, name):
res = getattr(base_settings, name, getattr(Defaults, name))
if name in ['CACHEOPS_PREFIX', 'CACHEOPS_SERIALIZER']:
res = import_string(res) if isinstance(res, str) else res
# Convert old list of classes to list of strings
if name == 'CACHEOPS_SKIP_FIELDS':
res = [f if isinstance(f, str) else f.get_internal_type(res) for f in res]
# Save to dict to speed up next access, __getattr__ won't be called
self.__dict__[name] = res
return res
settings = Settings()
setting_changed.connect(lambda setting, **kw: settings.__dict__.pop(setting, None), weak=False)
def import_string(path):
if "." in path:
module, attr = path.rsplit(".", 1)
return getattr(import_module(module), attr)
else:
return import_module(path)
@memoize
def prepare_profiles():
"""
Prepares a dict 'app.model' -> profile, for use in model_profile()
"""
profile_defaults = {
'ops': (),
'local_get': False,
'db_agnostic': True,
'lock': False,
}
profile_defaults.update(settings.CACHEOPS_DEFAULTS)
model_profiles = {}
for app_model, profile in settings.CACHEOPS.items():
if profile is None:
model_profiles[app_model.lower()] = None
continue
model_profiles[app_model.lower()] = mp = merge(profile_defaults, profile)
if mp['ops'] == 'all':
mp['ops'] = ALL_OPS
# People will do that anyway :)
if isinstance(mp['ops'], str):
mp['ops'] = {mp['ops']}
mp['ops'] = set(mp['ops'])
if 'timeout' not in mp:
raise ImproperlyConfigured(
'You must specify "timeout" option in "%s" CACHEOPS profile' % app_model)
if not isinstance(mp['timeout'], int):
raise ImproperlyConfigured(
'"timeout" option in "%s" CACHEOPS profile should be an integer' % app_model)
return model_profiles
def model_profile(model):
"""
Returns cacheops profile for a model
"""
assert not model._meta.abstract, "Can't get profile for %s" % model
# Django migrations create lots of fake models, just skip them
if model.__module__ == '__fake__':
return None
model_profiles = prepare_profiles()
app = model._meta.app_label.lower()
model_name = model._meta.model_name
for guess in ('%s.%s' % (app, model_name), '%s.*' % app, '*.*'):
if guess in model_profiles:
return model_profiles[guess]
else:
return None
django-cacheops-7.2/cacheops/getset.py 0000664 0000000 0000000 00000010422 15001074010 0020024 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
import hashlib
import json
import random
from .conf import settings
from .redis import redis_client, handle_connection_failure, load_script
from .transaction import transaction_states
LOCK_TIMEOUT = 60
@handle_connection_failure
def cache_thing(prefix, cache_key, data, cond_dnfs, timeout, dbs=(), precall_key='',
expected_checksum=''):
"""
Writes data to cache and creates appropriate invalidators.
If precall_key is not the empty string, the data will only be cached if the
precall_key is set to avoid caching stale data.
If expected_checksum is set and does not match the actual one then cache won't be written.
"""
# Could have changed after last check, sometimes superficially
if transaction_states.is_dirty(dbs):
return
if settings.CACHEOPS_INSIDEOUT:
schemes = dnfs_to_schemes(cond_dnfs)
conj_keys = dnfs_to_conj_keys(prefix, cond_dnfs)
return load_script('cache_thing_insideout')(
keys=[prefix, cache_key],
args=[
settings.CACHEOPS_SERIALIZER.dumps(data),
json.dumps(schemes),
json.dumps(conj_keys),
timeout,
# Need to pass it from here since random inside is not seeded in Redis pre 7.0
random.random(),
expected_checksum,
]
)
else:
if prefix and precall_key == "":
precall_key = prefix
load_script('cache_thing')(
keys=[prefix, cache_key, precall_key],
args=[
settings.CACHEOPS_SERIALIZER.dumps(data),
json.dumps(cond_dnfs, default=str),
timeout
]
)
@contextmanager
def getting(key, cond_dnfs, prefix, lock=False):
if not lock:
yield _read(key, cond_dnfs, prefix)
else:
locked = False
try:
data = _get_or_lock(key, cond_dnfs, prefix)
locked = data is None
yield data
finally:
if locked:
_release_lock(key)
@handle_connection_failure
def _read(key, cond_dnfs, prefix):
if not settings.CACHEOPS_INSIDEOUT:
return redis_client.get(key)
conj_keys = dnfs_to_conj_keys(prefix, cond_dnfs)
coded, *stamps = redis_client.mget(key, *conj_keys)
if coded is None or coded == b'LOCK':
return coded
if None in stamps:
redis_client.unlink(key)
return None
stamp_checksum, data = coded.split(b':', 1)
if stamp_checksum.decode() != join_stamps(stamps):
redis_client.unlink(key)
return None
return data
@handle_connection_failure
def _get_or_lock(key, cond_dnfs, prefix):
_lock = redis_client.register_script("""
local locked = redis.call('set', KEYS[1], 'LOCK', 'nx', 'ex', ARGV[1])
if locked then
redis.call('del', KEYS[2])
end
return locked
""")
signal_key = key + ':signal'
while True:
data = _read(key, cond_dnfs, prefix)
if data is None:
if _lock(keys=[key, signal_key], args=[LOCK_TIMEOUT]):
return None
elif data != b'LOCK':
return data
# No data and not locked, wait
redis_client.brpoplpush(signal_key, signal_key, timeout=LOCK_TIMEOUT)
@handle_connection_failure
def _release_lock(key):
_unlock = redis_client.register_script("""
if redis.call('get', KEYS[1]) == 'LOCK' then
redis.call('del', KEYS[1])
end
redis.call('lpush', KEYS[2], 1)
redis.call('expire', KEYS[2], 1)
""")
signal_key = key + ':signal'
_unlock(keys=[key, signal_key])
# Key manipulation helpers
def join_stamps(stamps):
return hashlib.sha1(b' '.join(stamps)).hexdigest()
def dnfs_to_conj_keys(prefix, cond_dnfs):
def _conj_cache_key(table, conj):
conj_str = '&'.join(f'{field}={val}' for field, val in sorted(conj.items()))
return f'{prefix}conj:{table}:{conj_str}'
return [_conj_cache_key(table, conj) for table, disj in cond_dnfs.items()
for conj in disj]
def dnfs_to_schemes(cond_dnfs):
return {table: list({",".join(sorted(conj)) for conj in disj})
for table, disj in cond_dnfs.items() if disj}
django-cacheops-7.2/cacheops/invalidation.py 0000664 0000000 0000000 00000007651 15001074010 0021224 0 ustar 00root root 0000000 0000000 import json
import threading
from funcy import memoize, post_processing, ContextDecorator, decorator, walk_values
from django.db import DEFAULT_DB_ALIAS
from django.db.models.expressions import F, Expression
from .conf import settings
from .sharding import get_prefix
from .redis import redis_client, handle_connection_failure, load_script
from .signals import cache_invalidated
from .transaction import queue_when_in_transaction
__all__ = ('invalidate_obj', 'invalidate_model', 'invalidate_all', 'no_invalidation')
@decorator
def skip_on_no_invalidation(call):
if not settings.CACHEOPS_ENABLED or no_invalidation.active:
return
return call()
@skip_on_no_invalidation
@queue_when_in_transaction
@handle_connection_failure
def invalidate_dict(model, obj_dict, using=DEFAULT_DB_ALIAS):
if no_invalidation.active or not settings.CACHEOPS_ENABLED:
return
model = model._meta.concrete_model
prefix = get_prefix(_cond_dnfs=[(model._meta.db_table, list(obj_dict.items()))], dbs=[using])
if settings.CACHEOPS_INSIDEOUT:
script = 'invalidate_insideout'
serialized_dict = json.dumps(walk_values(str, obj_dict))
else:
script = 'invalidate'
serialized_dict = json.dumps(obj_dict, default=str)
load_script(script)(keys=[prefix], args=[model._meta.db_table, serialized_dict])
cache_invalidated.send(sender=model, obj_dict=obj_dict)
@skip_on_no_invalidation
def invalidate_obj(obj, using=DEFAULT_DB_ALIAS):
"""
Invalidates caches that can possibly be influenced by object
"""
model = obj.__class__._meta.concrete_model
invalidate_dict(model, get_obj_dict(model, obj), using=using)
@skip_on_no_invalidation
@queue_when_in_transaction
@handle_connection_failure
def invalidate_model(model, using=DEFAULT_DB_ALIAS):
"""
Invalidates all caches for given model.
NOTE: This is a heavy artillery which uses redis KEYS request,
which could be relatively slow on large datasets.
"""
model = model._meta.concrete_model
# NOTE: if we use sharding dependent on DNF then this will fail,
# which is ok, since it's hard/impossible to predict all the shards
prefix = get_prefix(tables=[model._meta.db_table], dbs=[using])
conjs_keys = redis_client.keys('%sconj:%s:*' % (prefix, model._meta.db_table))
if conjs_keys:
if settings.CACHEOPS_INSIDEOUT:
redis_client.unlink(*conjs_keys)
else:
cache_keys = redis_client.sunion(conjs_keys)
keys = list(cache_keys) + conjs_keys
redis_client.unlink(*keys)
cache_invalidated.send(sender=model, obj_dict=None)
@skip_on_no_invalidation
@handle_connection_failure
def invalidate_all():
redis_client.flushdb()
cache_invalidated.send(sender=None, obj_dict=None)
class InvalidationState(threading.local):
def __init__(self):
self.depth = 0
class _no_invalidation(ContextDecorator):
state = InvalidationState()
def __enter__(self):
self.state.depth += 1
def __exit__(self, type, value, traceback):
self.state.depth -= 1
@property
def active(self):
return self.state.depth
no_invalidation = _no_invalidation()
### ORM instance serialization
@memoize
def serializable_fields(model):
return {f for f in model._meta.fields
if f.get_internal_type() not in settings.CACHEOPS_SKIP_FIELDS}
@post_processing(dict)
def get_obj_dict(model, obj):
for field in serializable_fields(model):
# Skip deferred fields, in post_delete trying to fetch them results in error anyway.
# In post_save we rely on deferred values be the same as in pre_save.
if field.attname not in obj.__dict__:
continue
value = getattr(obj, field.attname)
if value is None:
yield field.attname, None
elif isinstance(value, (F, Expression)):
continue
else:
yield field.attname, field.get_prep_value(value)
django-cacheops-7.2/cacheops/jinja2.py 0000664 0000000 0000000 00000004364 15001074010 0017716 0 ustar 00root root 0000000 0000000 from jinja2 import nodes
from jinja2.ext import Extension
import cacheops
from cacheops.utils import carefully_strip_whitespace
__all__ = ['cache']
class CacheopsExtension(Extension):
tags = ['cached_as', 'cached']
def parse(self, parser):
lineno = parser.stream.current.lineno
tag_name = parser.stream.current.value
tag_location = '%s:%s' % (parser.name, lineno)
next(parser.stream)
args, kwargs = self.parse_args(parser)
args = [nodes.Const(tag_name), nodes.Const(tag_location)] + args
block_call = self.call_method('handle_tag', args, kwargs)
body = parser.parse_statements(['name:end%s' % tag_name], drop_needle=True)
return nodes.CallBlock(block_call, [], [], body).set_lineno(lineno)
def handle_tag(self, tag_name, tag_location, *args, **kwargs):
caller = kwargs.pop('caller')
cacheops_decorator = getattr(cacheops, tag_name)
kwargs.setdefault('extra', '')
if isinstance(kwargs['extra'], tuple):
kwargs['extra'] += (tag_location,)
else:
kwargs['extra'] = str(kwargs['extra']) + tag_location
@cacheops_decorator(*args, **kwargs)
def _handle_tag():
content = caller()
# TODO: make this cache preparation configurable
return carefully_strip_whitespace(content)
return _handle_tag()
def parse_args(self, parser):
args = []
kwargs = []
require_comma = False
while parser.stream.current.type != 'block_end':
if require_comma:
parser.stream.expect('comma')
if parser.stream.current.type == 'name' and parser.stream.look().type == 'assign':
key = parser.stream.current.value
parser.stream.skip(2)
value = parser.parse_expression()
kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
else:
if kwargs:
parser.fail('Invalid argument syntax for CacheopsExtension tag',
parser.stream.current.lineno)
args.append(parser.parse_expression())
require_comma = True
return args, kwargs
cache = CacheopsExtension
django-cacheops-7.2/cacheops/lua/ 0000775 0000000 0000000 00000000000 15001074010 0016741 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/lua/cache_thing.lua 0000664 0000000 0000000 00000003574 15001074010 0021711 0 ustar 00root root 0000000 0000000 local prefix = KEYS[1]
local key = KEYS[2]
local precall_key = KEYS[3]
local data = ARGV[1]
local dnfs = cjson.decode(ARGV[2])
local timeout = tonumber(ARGV[3])
if precall_key ~= prefix and redis.call('exists', precall_key) == 0 then
-- Cached data was invalidated during the function call. The data is
-- stale and should not be cached.
return
end
-- Write data to cache
redis.call('setex', key, timeout, data)
-- A pair of funcs
-- NOTE: we depend here on keys order being stable
local conj_schema = function (conj)
local parts = {}
for field, _ in pairs(conj) do
table.insert(parts, field)
end
return table.concat(parts, ',')
end
local conj_cache_key = function (db_table, conj)
local parts = {}
for field, val in pairs(conj) do
table.insert(parts, field .. '=' .. tostring(val))
end
return prefix .. 'conj:' .. db_table .. ':' .. table.concat(parts, '&')
end
-- Update schemes and invalidators
for db_table, disj in pairs(dnfs) do
for _, conj in ipairs(disj) do
-- Ensure scheme is known
redis.call('sadd', prefix .. 'schemes:' .. db_table, conj_schema(conj))
-- Add new cache_key to list of dependencies
local conj_key = conj_cache_key(db_table, conj)
redis.call('sadd', conj_key, key)
-- NOTE: an invalidator should live longer than any key it references.
-- So we update its ttl on every key if needed.
-- NOTE: we also can't use "EXPIRE conj_key timeout GT" because it will have no effect on
-- newly created and thus involatile conj keys.
local conj_ttl = redis.call('ttl', conj_key)
if conj_ttl < timeout then
-- We set conj_key life with a margin over key life to call expire rarer
-- And add few extra seconds to be extra safe
redis.call('expire', conj_key, timeout * 2 + 10)
end
end
end
django-cacheops-7.2/cacheops/lua/cache_thing_insideout.lua 0000664 0000000 0000000 00000003563 15001074010 0023772 0 ustar 00root root 0000000 0000000 local prefix = KEYS[1]
local key = KEYS[2]
local data = ARGV[1]
local schemes = cjson.decode(ARGV[2])
local conj_keys = cjson.decode(ARGV[3])
local timeout = tonumber(ARGV[4])
local rnd = ARGV[5] -- A new value for empty stamps
local expected_checksum = ARGV[6]
-- Ensure schemes are known
for db_table, _schemes in pairs(schemes) do
redis.call('sadd', prefix .. 'schemes:' .. db_table, unpack(_schemes))
end
-- Fill in invalidators and collect stamps
local stamps = {}
for _, conj_key in ipairs(conj_keys) do
-- REDIS_7
local stamp = redis.call('set', conj_key, rnd, 'nx', 'get') or rnd
-- /REDIS_7
-- REDIS_4
local stamp = redis.call('get', conj_key)
if not stamp then
stamp = rnd
redis.call('set', conj_key, rnd)
end
-- /REDIS_4
table.insert(stamps, stamp)
-- NOTE: an invalidator should live longer than any key it references.
-- So we update its ttl on every key if needed.
-- NOTE: we also can't use "EXPIRE conj_key timeout GT" because it will have no effect on
-- newly created and thus involatile conj keys.
local conj_ttl = redis.call('ttl', conj_key)
if conj_ttl < timeout then
-- We set conj_key life with a margin over key life to call expire rarer
-- And add few extra seconds to be extra safe
redis.call('expire', conj_key, timeout * 2 + 10)
end
end
-- Write data to cache along with a checksum of the stamps to see if any of them changed
local all_stamps = table.concat(stamps, ' ')
local stamp_checksum = redis.sha1hex(all_stamps)
if expected_checksum ~= '' and stamp_checksum ~= expected_checksum then
-- Cached data was invalidated during the function call. The data is
-- stale and should not be cached.
return stamp_checksum -- This one is used for keep_fresh implementation
end
redis.call('set', key, stamp_checksum .. ':' .. data, 'ex', timeout)
django-cacheops-7.2/cacheops/lua/invalidate.lua 0000664 0000000 0000000 00000002450 15001074010 0021565 0 ustar 00root root 0000000 0000000 local prefix = KEYS[1]
local db_table = ARGV[1]
local obj = cjson.decode(ARGV[2])
-- Utility functions
local conj_cache_key = function (db_table, scheme, obj)
local parts = {}
for field in string.gmatch(scheme, "[^,]+") do
table.insert(parts, field .. '=' .. tostring(obj[field]))
end
return prefix .. 'conj:' .. db_table .. ':' .. table.concat(parts, '&')
end
local call_in_chunks = function (command, args)
local step = 1000
for i = 1, #args, step do
redis.call(command, unpack(args, i, math.min(i + step - 1, #args)))
end
end
-- Calculate conj keys
local conj_keys = {}
local schemes = redis.call('smembers', prefix .. 'schemes:' .. db_table)
for _, scheme in ipairs(schemes) do
table.insert(conj_keys, conj_cache_key(db_table, scheme, obj))
end
-- Delete cache keys and refering conj keys
if next(conj_keys) ~= nil then
local cache_keys = redis.call('sunion', unpack(conj_keys))
-- we delete cache keys since they are invalid
-- and conj keys as they will refer only deleted keys
redis.call("unlink", unpack(conj_keys))
if next(cache_keys) ~= nil then
-- NOTE: can't just do redis.call('del', unpack(...)) cause there is limit on number
-- of return values in lua.
call_in_chunks('del', cache_keys)
end
end
django-cacheops-7.2/cacheops/lua/invalidate_insideout.lua 0000664 0000000 0000000 00000001251 15001074010 0023646 0 ustar 00root root 0000000 0000000 local prefix = KEYS[1]
local db_table = ARGV[1]
local obj = cjson.decode(ARGV[2])
local conj_cache_key = function (db_table, scheme, obj)
local parts = {}
for field in string.gmatch(scheme, "[^,]+") do
-- All obj values are strings, we still use tostring() in case obj does not contain field
table.insert(parts, field .. '=' .. tostring(obj[field]))
end
return prefix .. 'conj:' .. db_table .. ':' .. table.concat(parts, '&')
end
-- Drop conj keys
local conj_keys = {}
local schemes = redis.call('smembers', prefix .. 'schemes:' .. db_table)
for _, scheme in ipairs(schemes) do
redis.call('unlink', conj_cache_key(db_table, scheme, obj))
end
django-cacheops-7.2/cacheops/management/ 0000775 0000000 0000000 00000000000 15001074010 0020274 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/management/__init__.py 0000664 0000000 0000000 00000000000 15001074010 0022373 0 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/management/commands/ 0000775 0000000 0000000 00000000000 15001074010 0022075 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/management/commands/__init__.py 0000664 0000000 0000000 00000000000 15001074010 0024174 0 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/management/commands/cleanfilecache.py 0000664 0000000 0000000 00000000767 15001074010 0025367 0 ustar 00root root 0000000 0000000 import os
from django.core.management.base import BaseCommand
from cacheops.conf import settings
class Command(BaseCommand):
help = 'Clean filebased cache'
def add_arguments(self, parser):
parser.add_argument('path', nargs='*', default=['default'])
def handle(self, **options):
for path in options['path']:
if path == 'default':
path = settings.FILE_CACHE_DIR
os.system(r'find %s -type f \! -iname "\." -mmin +0 -delete' % path)
django-cacheops-7.2/cacheops/management/commands/invalidate.py 0000664 0000000 0000000 00000003634 15001074010 0024575 0 ustar 00root root 0000000 0000000 from django.core.management.base import LabelCommand, CommandError
from django.apps import apps
from cacheops.invalidation import *
class Command(LabelCommand):
help = 'Invalidates cache for entire app, model or particular instance'
args = '(all | | . | ..) +'
label = 'app or model or object'
def handle_label(self, label, **options):
if label == 'all':
self.handle_all()
else:
app_n_model = label.split('.')
if len(app_n_model) == 1:
self.handle_app(app_n_model[0])
elif len(app_n_model) == 2:
self.handle_model(*app_n_model)
elif len(app_n_model) == 3:
self.handle_obj(*app_n_model)
else:
raise CommandError('Wrong model/app name syntax: %s\n'
'Type or .' % label)
def handle_all(self):
invalidate_all()
def handle_app(self, app_name):
for model in self.get_app(app_name).get_models(include_auto_created=True):
invalidate_model(model)
def handle_model(self, app_name, model_name):
invalidate_model(self.get_model(app_name, model_name))
def handle_obj(self, app_name, model_name, obj_pk):
model = self.get_model(app_name, model_name)
try:
obj = model.objects.get(pk=obj_pk)
except model.DoesNotExist:
raise CommandError('No %s.%s with pk = %s' % (app_name, model_name, obj_pk))
invalidate_obj(obj)
def get_app(self, app_name):
try:
return apps.get_app_config(app_name)
except LookupError as e:
raise CommandError(e)
def get_model(self, app_name, model_name):
try:
return apps.get_app_config(app_name).get_model(model_name)
except LookupError as e:
raise CommandError(e)
django-cacheops-7.2/cacheops/management/commands/reapconjs.py 0000664 0000000 0000000 00000001307 15001074010 0024434 0 ustar 00root root 0000000 0000000 from argparse import ArgumentParser
from django.core.management.base import BaseCommand
from cacheops.reaper import reap_conjs
class Command(BaseCommand):
help = 'Removes expired conjunction keys from cacheops.'
def add_arguments(self, parser: ArgumentParser):
parser.add_argument('--chunk-size', type=int, default=1000)
parser.add_argument('--min-conj-set-size', type=int, default=1000)
parser.add_argument('--dry-run', action='store_true')
def handle(self, chunk_size: int, min_conj_set_size: int, dry_run: bool, **kwargs):
reap_conjs(
chunk_size=chunk_size,
min_conj_set_size=min_conj_set_size,
dry_run=dry_run,
)
django-cacheops-7.2/cacheops/query.py 0000664 0000000 0000000 00000062443 15001074010 0017710 0 ustar 00root root 0000000 0000000 import sys
import threading
from random import random
from funcy import select_keys, cached_property, once, once_per, monkey, wraps, walk, chain
from funcy import lmap, lcat, join_with
from django.utils.encoding import force_str
from django.core.exceptions import ImproperlyConfigured, EmptyResultSet
from django.db import DEFAULT_DB_ALIAS, connections, models
from django.db.models.manager import BaseManager
from django.db.models.query import MAX_GET_RESULTS
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
from django.db.transaction import atomic
from .conf import model_profile, settings, ALL_OPS
from .utils import monkey_mix, stamp_fields, get_cache_key, cached_view_fab, family_has_profile
from .utils import md5
from .getset import cache_thing, getting
from .sharding import get_prefix
from .tree import dnfs
from .invalidation import invalidate_obj, invalidate_dict, skip_on_no_invalidation
from .transaction import transaction_states
from .signals import cache_read
__all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
_local_get_cache = {}
def cached_as(*samples, timeout=None, extra=None, lock=None, keep_fresh=False):
"""
Caches results of a function and invalidates them same way as given queryset(s).
NOTE: Ignores queryset cached ops settings, always caches.
If keep_fresh is True, this will prevent caching if the given querysets are
invalidated during the function call. This prevents prolonged caching of
stale data.
"""
if not samples:
raise TypeError('Pass a queryset, a model or an object to cache like')
# If we unexpectedly get list instead of queryset return identity decorator.
# Paginator could do this when page.object_list is empty.
if len(samples) == 1 and isinstance(samples[0], list):
return lambda func: func
def _get_queryset(sample):
if isinstance(sample, models.Model):
queryset = sample.__class__.objects.filter(pk=sample.pk)
elif isinstance(sample, type) and issubclass(sample, models.Model):
queryset = sample.objects.all()
else:
queryset = sample
queryset._require_cacheprofile()
return queryset
querysets = lmap(_get_queryset, samples)
dbs = list({qs.db for qs in querysets})
cond_dnfs = join_with(lcat, map(dnfs, querysets)) # TODO: use cached version?
qs_keys = [qs._cache_key(prefix=False) for qs in querysets]
if timeout is None:
timeout = min(qs._cacheprofile['timeout'] for qs in querysets)
if lock is None:
lock = any(qs._cacheprofile['lock'] for qs in querysets)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not settings.CACHEOPS_ENABLED or transaction_states.is_dirty(dbs):
return func(*args, **kwargs)
prefix = get_prefix(func=func, _cond_dnfs=cond_dnfs, dbs=dbs)
extra_val = extra(*args, **kwargs) if callable(extra) else extra
cache_key = prefix + 'as:' + get_cache_key(func, args, kwargs, qs_keys, extra_val)
with getting(cache_key, cond_dnfs, prefix, lock=lock) as cache_data:
cache_read.send(sender=None, func=func, hit=cache_data is not None)
if cache_data is not None:
return settings.CACHEOPS_SERIALIZER.loads(cache_data)
else:
precall_key = ''
expected_checksum = ''
if keep_fresh and settings.CACHEOPS_INSIDEOUT:
# The conj stamps should not be dropped while we calculate the function.
# But being filled in concurrently is a normal concurrent cache write.
# However, if they are filled in and then dropped, we cannot detect that.
# Unless we fill them ourselves and get expected checksum now. We also need
# to fill in schemes, so we just reuse the cache_thing().
expected_checksum = cache_thing(prefix, cache_key, '', cond_dnfs, timeout,
dbs=dbs, expected_checksum='never match')
elif keep_fresh:
# We call this "asp" for "as precall" because this key is
# cached before the actual function is called. We randomize
# the key to prevent falsely thinking the key was not
# invalidated when in fact it was invalidated and the
# function was called again in another process.
suffix = get_cache_key(func, args, kwargs, qs_keys, extra_val, random())
precall_key = prefix + 'asp:' + suffix
# Cache a precall_key to watch for invalidation during
# the function call. Its value does not matter. If and
# only if it remains valid before, during, and after the
# call, the result can be cached and returned.
cache_thing(prefix, precall_key, 'PRECALL', cond_dnfs, timeout, dbs=dbs)
result = func(*args, **kwargs)
cache_thing(prefix, cache_key, result, cond_dnfs, timeout, dbs=dbs,
precall_key=precall_key, expected_checksum=expected_checksum)
return result
return wrapper
return decorator
def cached_view_as(*samples, **kwargs):
return cached_view_fab(cached_as)(*samples, **kwargs)
class QuerySetMixin(object):
@cached_property
def _cacheprofile(self):
profile = model_profile(self.model)
return profile.copy() if profile else None
@cached_property
def _cloning(self):
return 1000
def _require_cacheprofile(self):
if self._cacheprofile is None:
raise ImproperlyConfigured(
'Cacheops is not enabled for %s.%s model.\n'
'If you don\'t want to cache anything by default '
'you can configure it with empty ops.'
% (self.model._meta.app_label, self.model._meta.model_name))
def _cache_key(self, prefix=True):
"""
Compute a cache key for this queryset
"""
md = md5()
md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
# Vary cache key for proxy models
md.update('%s.%s' % (self.model.__module__, self.model.__name__))
# Protect from field list changes in model
md.update(stamp_fields(self.model))
# Use query SQL as part of a key
try:
sql, params = self.query.get_compiler(self.db).as_sql()
try:
sql_str = sql % params
except UnicodeDecodeError:
sql_str = sql % walk(force_str, params)
md.update(force_str(sql_str))
except EmptyResultSet:
pass
# If query results differ depending on database
if self._cacheprofile and not self._cacheprofile['db_agnostic']:
md.update(self.db)
# Iterable class pack results differently
it_class = self._iterable_class
md.update('%s.%s' % (it_class.__module__, it_class.__name__))
cache_key = 'q:%s' % md.hexdigest()
return self._prefix + cache_key if prefix else cache_key
@cached_property
def _prefix(self):
return get_prefix(_queryset=self)
@cached_property
def _cond_dnfs(self):
return dnfs(self)
def _cache_results(self, cache_key, results):
cache_thing(self._prefix, cache_key, results,
self._cond_dnfs, self._cacheprofile['timeout'], dbs=[self.db])
def _should_cache(self, op):
# If cache and op are enabled and not within write or dirty transaction
return settings.CACHEOPS_ENABLED \
and self._cacheprofile and op in self._cacheprofile['ops'] \
and not self._for_write \
and not transaction_states[self.db].is_dirty()
def cache(self, ops=None, timeout=None, lock=None):
"""
Enables caching for given ops
ops - a subset of {'get', 'fetch', 'count', 'exists', 'aggregate'},
ops caching to be turned on, all enabled by default
timeout - override default cache timeout
lock - use lock to prevent dog-pile effect
NOTE: you actually can disable caching by omitting corresponding ops,
.cache(ops=[]) disables caching for this queryset.
"""
self._require_cacheprofile()
if ops is None or ops == 'all':
ops = ALL_OPS
if isinstance(ops, str):
ops = {ops}
self._cacheprofile['ops'] = set(ops)
if timeout is not None:
self._cacheprofile['timeout'] = timeout
if lock is not None:
self._cacheprofile['lock'] = lock
return self
def nocache(self):
"""
Convinience method, turns off caching for this queryset
"""
# cache profile not present means caching is not enabled for this model
if self._cacheprofile is None:
return self
else:
return self.cache(ops=[])
def cloning(self, cloning=1000):
self._cloning = cloning
return self
def inplace(self):
return self.cloning(0)
def _clone(self, **kwargs):
if self._cloning:
return self.clone(**kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, **kwargs):
clone = self._no_monkey._clone(self, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
# NOTE: need to copy profile so that clone changes won't affect this queryset
if self.__dict__.get('_cacheprofile'):
clone._cacheprofile = self._cacheprofile.copy()
return clone
def _fetch_all(self):
# If already fetched or should pass by then fall back
if self._result_cache is not None or not self._should_cache('fetch'):
return self._no_monkey._fetch_all(self)
cache_key = self._cache_key()
lock = self._cacheprofile['lock']
with getting(cache_key, self._cond_dnfs, self._prefix, lock=lock) as cache_data:
cache_read.send(sender=self.model, func=None, hit=cache_data is not None)
if cache_data is not None:
self._result_cache = settings.CACHEOPS_SERIALIZER.loads(cache_data)
else:
self._result_cache = list(self._iterable_class(self))
self._cache_results(cache_key, self._result_cache)
return self._no_monkey._fetch_all(self)
def count(self):
if self._should_cache('count'):
# Optmization borrowed from overridden method:
# if queryset cache is already filled just return its len
if self._result_cache is not None:
return len(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.count(self))()
else:
return self._no_monkey.count(self)
def aggregate(self, *args, **kwargs):
if self._should_cache('aggregate'):
# Apply all aggregates the same way original .aggregate() does, but do not perform sql.
# This code is mostly taken from QuerySet.aggregate().
normalized_kwargs = kwargs.copy()
for arg in args:
try:
normalized_kwargs[arg.default_alias] = arg
except (AttributeError, TypeError):
# Let Django raise a proper error
return self._no_monkey.aggregate(*args, **kwargs)
# Simulate Query.get_aggregation() preparations, this adds proper joins to qs.query
if not normalized_kwargs:
return {}
qs = self._clone()
aggregates = {}
for alias, aggregate_expr in normalized_kwargs.items():
aggregate = aggregate_expr.resolve_expression(
qs.query, allow_joins=True, reuse=None, summarize=True
)
if not aggregate.contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
aggregates[alias] = aggregate
# Use resulting qs as a ref, aggregates still contain names, etc
func = lambda: self._no_monkey.aggregate(self, *args, **kwargs)
return cached_as(qs, extra=aggregates)(func)()
else:
return self._no_monkey.aggregate(self, *args, **kwargs)
def get(self, *args, **kwargs):
# .get() uses the same ._fetch_all() method to fetch data,
# so here we add 'fetch' to ops
if self._should_cache('get'):
# NOTE: local_get=True enables caching of simple gets in local memory,
# which is very fast, but not invalidated.
# Don't bother with Q-objects, select_related and previous filters,
# simple gets - thats what we are really up to here.
#
# TODO: this checks are far from adequate, at least these are missed:
# - self._fields (values, values_list)
# - annotations
# - ...
# TODO: don't distinguish between pk, pk__exaxt, id, id__exact
# TOOD: work with .filter(**kwargs).get() ?
if self._cacheprofile['local_get'] \
and not args \
and not self.query.select_related \
and not self.query.where.children:
# NOTE: We use simpler way to generate a cache key to cut costs.
# Some day it could produce same key for different requests.
key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
try:
return _local_get_cache[key]
except KeyError:
_local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
return _local_get_cache[key]
except TypeError:
# If some arg is unhashable we can't save it to dict key,
# we just skip local cache in that case
pass
if 'fetch' in self._cacheprofile['ops']:
qs = self
else:
qs = self._clone().cache()
else:
qs = self
return qs._no_monkey.get(qs, *args, **kwargs)
def first(self):
if self._should_cache('get'):
return self._no_monkey.first(self._clone().cache())
return self._no_monkey.first(self)
def last(self):
if self._should_cache('get'):
return self._no_monkey.last(self._clone().cache())
return self._no_monkey.last(self)
def exists(self):
if self._should_cache('exists'):
if self._result_cache is not None:
return bool(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.exists(self))()
else:
return self._no_monkey.exists(self)
def bulk_create(self, objs, *args, **kwargs):
objs = self._no_monkey.bulk_create(self, objs, *args, **kwargs)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj, using=self.db)
return objs
def invalidated_update(self, **kwargs):
clone = self._clone().nocache().select_related(None)
clone._for_write = True # affects routing
with atomic(using=clone.db):
objects = list(clone.select_for_update())
rows = clone.update(**kwargs)
# TODO: do not refetch objects but update with kwargs in simple cases?
# We use clone database to fetch new states, as this is the db they were written to.
# Using router with new_objects may fail, using self may return slave during lag.
pks = {obj.pk for obj in objects}
new_objects = self.model.objects.filter(pk__in=pks).using(clone.db)
for obj in chain(objects, new_objects):
invalidate_obj(obj, using=clone.db)
return rows
def connect_first(signal, receiver, sender):
old_receivers = signal.receivers
signal.receivers = []
signal.connect(receiver, sender=sender, weak=False)
signal.receivers += old_receivers
# We need to stash old object before Model.save() to invalidate on its properties
_old_objs = threading.local()
class ManagerMixin(object):
@once_per('cls')
def _install_cacheops(self, cls):
# Set up signals
connect_first(pre_save, self._pre_save, sender=cls)
connect_first(post_save, self._post_save, sender=cls)
connect_first(post_delete, self._post_delete, sender=cls)
# Install auto-created models as their module attributes to make them picklable
module = sys.modules[cls.__module__]
if not hasattr(module, cls.__name__):
setattr(module, cls.__name__, cls)
# This is probably still needed if models are created dynamically or imported late
def contribute_to_class(self, cls, name):
self._no_monkey.contribute_to_class(self, cls, name)
# NOTE: we check it here rather then inside _install_cacheops()
# because we don't want @once_per() and family_has_profile() memory to hold refs.
# Otherwise, temporary classes made for migrations might hoard lots of memory.
if cls.__module__ != '__fake__' and family_has_profile(cls):
self._install_cacheops(cls)
@skip_on_no_invalidation
def _pre_save(self, sender, instance, using, **kwargs):
if instance.pk is not None and not instance._state.adding:
try:
# TODO: do not fetch non-serializable fields
_old_objs.__dict__[sender, instance.pk] \
= sender.objects.using(using).get(pk=instance.pk)
except sender.DoesNotExist:
pass
@skip_on_no_invalidation
def _post_save(self, sender, instance, using, **kwargs):
# Invoke invalidations for both old and new versions of saved object
old = _old_objs.__dict__.pop((sender, instance.pk), None)
if old:
invalidate_obj(old, using=using)
invalidate_obj(instance, using=using)
invalidate_o2o(sender, old, instance, using=using)
# We run invalidations but skip caching if we are dirty
if transaction_states[using].is_dirty():
return
# NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
# but its base having one. Or vice versa.
# We still need to invalidate in this case, but cache on save better be skipped.
cacheprofile = model_profile(instance.__class__)
if not cacheprofile:
return
# Enabled cache_on_save makes us write saved object to cache.
# Later it can be retrieved with .get(=)
# is pk unless specified.
# This sweet trick saves a db request and helps with slave lag.
cache_on_save = cacheprofile.get('cache_on_save')
if cache_on_save:
# HACK: We get this object "from field" so it can contain
# some undesirable attributes or other objects attached.
# RelatedField accessors do that, for example.
#
# So we strip down any _*_cache attrs before saving
# and later reassign them
unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
for k in unwanted_dict:
del instance.__dict__[k]
key = 'pk' if cache_on_save is True else cache_on_save
cond = {key: getattr(instance, key)}
qs = sender.objects.inplace().using(using).filter(**cond).order_by()
# Mimic Django .get() logic
if MAX_GET_RESULTS and (
not qs.query.select_for_update
or connections[qs.db].features.supports_select_for_update_with_limit):
qs.query.set_limits(high=MAX_GET_RESULTS)
qs._cache_results(qs._cache_key(), [instance])
# Reverting stripped attributes
instance.__dict__.update(unwanted_dict)
def _post_delete(self, sender, instance, using, **kwargs):
"""
Invalidation upon object deletion.
"""
# NOTE: this will behave wrong if someone changed object fields
# before deletion (why anyone will do that?)
invalidate_obj(instance, using=using)
# NOTE: this is needed because m2m_changed is not sent on such deletion:
# https://code.djangoproject.com/ticket/17688
invalidate_m2o(sender, instance, using)
def inplace(self):
return self.get_queryset().inplace()
def cache(self, *args, **kwargs):
return self.get_queryset().cache(*args, **kwargs)
def nocache(self):
return self.get_queryset().nocache()
def invalidated_update(self, **kwargs):
return self.get_queryset().inplace().invalidated_update(**kwargs)
def invalidate_o2o(sender, old, instance, using=DEFAULT_DB_ALIAS):
"""Invoke invalidation for o2o reverse queries"""
o2o_fields = [f for f in sender._meta.fields if isinstance(f, models.OneToOneField)]
for f in o2o_fields:
old_value = getattr(old, f.attname, None)
value = getattr(instance, f.attname)
if old_value != value:
rmodel, rfield = f.related_model, f.remote_field.field_name
if old:
invalidate_dict(rmodel, {rfield: old_value}, using=using)
invalidate_dict(rmodel, {rfield: value}, using=using)
def invalidate_m2o(sender, instance, using=DEFAULT_DB_ALIAS):
"""Invoke invalidation for m2o and m2m queries to a deleted instance"""
all_fields = sender._meta.get_fields(include_hidden=True, include_parents=True)
m2o_fields = [f for f in all_fields if isinstance(f, models.ManyToOneRel)]
fk_fields_names_map = {
f.name: f.attname
for f in all_fields if isinstance(f, models.ForeignKey)
}
for f in m2o_fields:
attr = fk_fields_names_map.get(f.field_name, f.field_name)
value = getattr(instance, attr)
rmodel, rfield = f.related_model, f.remote_field.attname
invalidate_dict(rmodel, {rfield: value}, using=using)
def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
using=DEFAULT_DB_ALIAS, **kwargs):
"""
Invoke invalidation on m2m changes.
"""
# Skip this machinery for explicit through tables,
# since post_save and post_delete events are triggered for them
if not sender._meta.auto_created:
return
if action not in ('pre_clear', 'post_add', 'pre_remove'):
return
m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
if m2m.remote_field.through == sender)
instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
if reverse:
instance_column, model_column = model_column, instance_column
# TODO: optimize several invalidate_objs/dicts at once
if action == 'pre_clear':
objects = sender.objects.using(using).filter(**{instance_column: instance.pk})
for obj in objects:
invalidate_obj(obj, using=using)
elif action in ('post_add', 'pre_remove'):
# NOTE: we don't need to query through objects here,
# cause we already know all their meaningful attributes.
for pk in pk_set:
invalidate_dict(sender, {
instance_column: instance.pk,
model_column: pk
}, using=using)
@once
def install_cacheops():
"""
Installs cacheops by numerous monkey patches
"""
monkey_mix(BaseManager, ManagerMixin)
monkey_mix(models.QuerySet, QuerySetMixin)
# Use app registry to introspect used apps
from django.apps import apps
# Install profile and signal handlers for any earlier created models
for model in apps.get_models(include_auto_created=True):
if family_has_profile(model):
if not isinstance(model._default_manager, BaseManager):
raise ImproperlyConfigured("Can't install cacheops for %s.%s model:"
" non-django model class or manager is used."
% (model._meta.app_label, model._meta.model_name))
model._default_manager._install_cacheops(model)
# Bind m2m changed handlers
m2ms = (f for f in model._meta.get_fields(include_hidden=True) if f.many_to_many)
for m2m in m2ms:
rel = m2m if hasattr(m2m, 'through') else m2m.remote_field
opts = rel.through._meta
m2m_changed.connect(invalidate_m2m, sender=rel.through,
dispatch_uid=(opts.app_label, opts.model_name))
# Turn off caching in admin
if apps.is_installed('django.contrib.admin'):
from django.contrib.admin.options import ModelAdmin
@monkey(ModelAdmin)
def get_queryset(self, request):
return get_queryset.original(self, request).nocache()
# Make buffers/memoryviews pickleable to serialize binary field data
import copyreg
copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
django-cacheops-7.2/cacheops/reaper.py 0000664 0000000 0000000 00000004565 15001074010 0020022 0 ustar 00root root 0000000 0000000 import logging
from django.db import DEFAULT_DB_ALIAS
from .redis import redis_client
from .sharding import get_prefix
logger = logging.getLogger(__name__)
def reap_conjs(
chunk_size: int = 1000,
min_conj_set_size: int = 1000,
using=DEFAULT_DB_ALIAS,
dry_run: bool = False,
):
"""
Remove expired cache keys from invalidation sets.
Cacheops saves each DB resultset cache key in a "conj set" so it can delete it later if it
thinks it should be invalidated due to a saved record with matching values. But the resultset
caches time out after 30 minutes, and their cache keys live in those conj sets forever!
So conj sets for frequent queries on tables that aren't updated often end up containing
millions of already-expired cache keys and maybe a few thousand actually useful ones,
and block Redis for multiple - or many - seconds when cacheops finally decides
to invalidate them.
This function scans cacheops' conj keys for already-expired cache keys and removes them.
"""
logger.info('Starting scan for large conj sets')
prefix = get_prefix(dbs=[using])
for conj_key in redis_client.scan_iter(prefix + 'conj:*', count=chunk_size):
total = redis_client.scard(conj_key)
if total < min_conj_set_size:
continue
logger.info('Found %s cache keys in %s, scanning for expired keys', total, conj_key)
_clear_conj_key(conj_key, chunk_size, dry_run)
logger.info('Done scan for large conj sets')
def _clear_conj_key(conj_key: bytes, chunk_size: int, dry_run: bool):
"""Scan the cache keys in a conj set in batches and remove any that have expired."""
count, removed = 0, 0
for keys in _iter_keys_chunk(chunk_size, conj_key):
count += len(keys)
values = redis_client.mget(keys)
expired = [k for k, v in zip(keys, values) if not v]
if expired:
if not dry_run:
redis_client.srem(conj_key, *expired)
removed += len(expired)
logger.info('Removed %s/%s cache keys from %s', removed, count, conj_key)
if removed and not dry_run:
redis_client.execute_command('MEMORY PURGE')
def _iter_keys_chunk(chunk_size, key):
cursor = 0
while True:
cursor, items = redis_client.sscan(key, cursor, count=chunk_size)
if items:
yield items
if cursor == 0:
break
django-cacheops-7.2/cacheops/redis.py 0000664 0000000 0000000 00000004614 15001074010 0017645 0 ustar 00root root 0000000 0000000 import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
from funcy import decorator, identity, memoize, omit, LazyObject
import redis
from redis.sentinel import Sentinel
from .conf import settings
if settings.CACHEOPS_DEGRADE_ON_FAILURE:
@decorator
def handle_connection_failure(call):
try:
return call()
except redis.ConnectionError as e:
warnings.warn("The cacheops cache is unreachable! Error: %s" % e, RuntimeWarning)
except redis.TimeoutError as e:
warnings.warn("The cacheops cache timed out! Error: %s" % e, RuntimeWarning)
else:
handle_connection_failure = identity
@LazyObject
def redis_client():
if settings.CACHEOPS_REDIS and settings.CACHEOPS_SENTINEL:
raise ImproperlyConfigured("CACHEOPS_REDIS and CACHEOPS_SENTINEL are mutually exclusive")
client_class = redis.Redis
if settings.CACHEOPS_CLIENT_CLASS:
client_class = import_string(settings.CACHEOPS_CLIENT_CLASS)
if settings.CACHEOPS_SENTINEL:
if not {'locations', 'service_name'} <= set(settings.CACHEOPS_SENTINEL):
raise ImproperlyConfigured("Specify locations and service_name for CACHEOPS_SENTINEL")
sentinel = Sentinel(
settings.CACHEOPS_SENTINEL['locations'],
**omit(settings.CACHEOPS_SENTINEL, ('locations', 'service_name', 'db')))
return sentinel.master_for(
settings.CACHEOPS_SENTINEL['service_name'],
redis_class=client_class,
db=settings.CACHEOPS_SENTINEL.get('db', 0)
)
# Allow client connection settings to be specified by a URL.
if isinstance(settings.CACHEOPS_REDIS, str):
return client_class.from_url(settings.CACHEOPS_REDIS)
else:
return client_class(**settings.CACHEOPS_REDIS)
### Lua script loader
import os.path
import re
@memoize
def load_script(name):
filename = os.path.join(os.path.dirname(__file__), 'lua/%s.lua' % name)
with open(filename) as f:
code = f.read()
if is_redis_7():
code = re.sub(r'REDIS_4.*?/REDIS_4', '', code, flags=re.S)
else:
code = re.sub(r'REDIS_7.*?/REDIS_7', '', code, flags=re.S)
return redis_client.register_script(code)
@memoize
def is_redis_7():
redis_version = redis_client.info('server')['redis_version']
return int(redis_version.split('.')[0]) >= 7
django-cacheops-7.2/cacheops/serializers.py 0000664 0000000 0000000 00000000316 15001074010 0021066 0 ustar 00root root 0000000 0000000 import pickle
class PickleSerializer:
# properties
PickleError = pickle.PickleError
HIGHEST_PROTOCOL = pickle.HIGHEST_PROTOCOL
# methods
dumps = pickle.dumps
loads = pickle.loads
django-cacheops-7.2/cacheops/sharding.py 0000664 0000000 0000000 00000002307 15001074010 0020333 0 ustar 00root root 0000000 0000000 from funcy import cached_property
from django.core.exceptions import ImproperlyConfigured
from .conf import settings
def get_prefix(**kwargs):
return settings.CACHEOPS_PREFIX(PrefixQuery(**kwargs))
class PrefixQuery(object):
def __init__(self, **kwargs):
assert set(kwargs) <= {'func', '_queryset', '_cond_dnfs', 'dbs', 'tables'}
kwargs.setdefault('func', None)
self.__dict__.update(kwargs)
@cached_property
def dbs(self):
return [self._queryset.db]
@cached_property
def db(self):
if len(self.dbs) > 1:
dbs_str = ', '.join(self.dbs)
raise ImproperlyConfigured('Single db required, but several used: ' + dbs_str)
return self.dbs[0]
# TODO: think if I should expose it and how. Same for queryset.
@cached_property
def _cond_dnfs(self):
return self._queryset._cond_dnfs
@cached_property
def tables(self):
return list(self._cond_dnfs)
@cached_property
def table(self):
if len(self.tables) > 1:
tables_str = ', '.join(self.tables)
raise ImproperlyConfigured('Single table required, but several used: ' + tables_str)
return self.tables[0]
django-cacheops-7.2/cacheops/signals.py 0000664 0000000 0000000 00000000220 15001074010 0020164 0 ustar 00root root 0000000 0000000 import django.dispatch
cache_read = django.dispatch.Signal() # args: func, hit
cache_invalidated = django.dispatch.Signal() # args: obj_dict
django-cacheops-7.2/cacheops/simple.py 0000664 0000000 0000000 00000012502 15001074010 0020023 0 ustar 00root root 0000000 0000000 import os
import time
from funcy import wraps
from .conf import settings
from .utils import get_cache_key, cached_view_fab, md5hex
from .redis import redis_client, handle_connection_failure
from .sharding import get_prefix
__all__ = ('cache', 'cached', 'cached_view', 'file_cache', 'CacheMiss', 'FileCache', 'RedisCache')
class CacheMiss(Exception):
pass
class CacheKey(str):
@classmethod
def make(cls, value, cache=None, timeout=None):
self = CacheKey(value)
self.cache = cache
self.timeout = timeout
return self
def get(self):
self.cache._get(self)
def set(self, value):
self.cache._set(self, value, self.timeout)
def delete(self):
self.cache._delete(self)
class BaseCache(object):
"""
Simple cache with time-based invalidation
"""
def cached(self, timeout=None, extra=None):
"""
A decorator for caching function calls
"""
# Support @cached (without parentheses) form
if callable(timeout):
return self.cached()(timeout)
def _get_key(func, args, kwargs):
extra_val = extra(*args, **kwargs) if callable(extra) else extra
return get_prefix(func=func) + 'c:' + get_cache_key(func, args, kwargs, extra_val)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if not settings.CACHEOPS_ENABLED:
return func(*args, **kwargs)
cache_key = _get_key(func, args, kwargs)
try:
result = self._get(cache_key)
except CacheMiss:
result = func(*args, **kwargs)
self._set(cache_key, result, timeout)
return result
def invalidate(*args, **kwargs):
self._delete(_get_key(func, args, kwargs))
wrapper.invalidate = invalidate
def key(*args, **kwargs):
return CacheKey.make(_get_key(func, args, kwargs), cache=self, timeout=timeout)
wrapper.key = key
return wrapper
return decorator
def cached_view(self, timeout=None, extra=None):
if callable(timeout):
return self.cached_view()(timeout)
return cached_view_fab(self.cached)(timeout=timeout, extra=extra)
def get(self, cache_key):
return self._get(get_prefix() + cache_key)
def set(self, cache_key, data, timeout=None):
self._set(get_prefix() + cache_key, data, timeout)
def delete(self, cache_key):
self._delete(get_prefix() + cache_key)
class RedisCache(BaseCache):
def __init__(self, conn):
self.conn = conn
def _get(self, cache_key):
data = self.conn.get(cache_key)
if data is None:
raise CacheMiss
return settings.CACHEOPS_SERIALIZER.loads(data)
@handle_connection_failure
def _set(self, cache_key, data, timeout=None):
pickled_data = settings.CACHEOPS_SERIALIZER.dumps(data)
if timeout is not None:
self.conn.setex(cache_key, timeout, pickled_data)
else:
self.conn.set(cache_key, pickled_data)
@handle_connection_failure
def _delete(self, cache_key):
self.conn.delete(cache_key)
cache = RedisCache(redis_client)
cached = cache.cached
cached_view = cache.cached_view
class FileCache(BaseCache):
"""
A file cache which fixes bugs and misdesign in django default one.
Uses mtimes in the future to designate expire time. This makes unnecessary
reading stale files.
"""
def __init__(self, path, timeout=settings.FILE_CACHE_TIMEOUT):
self._dir = path
self._default_timeout = timeout
def _key_to_filename(self, key):
"""
Returns a filename corresponding to cache key
"""
digest = md5hex(key)
return os.path.join(self._dir, digest[-2:], digest[:-2])
def _get(self, key):
filename = self._key_to_filename(key)
try:
# Remove file if it's stale
if time.time() >= os.stat(filename).st_mtime:
self.delete(filename)
raise CacheMiss
with open(filename, 'rb') as f:
return settings.CACHEOPS_SERIALIZER.load(f)
except (IOError, OSError, EOFError):
raise CacheMiss
def _set(self, key, data, timeout=None):
filename = self._key_to_filename(key)
dirname = os.path.dirname(filename)
if timeout is None:
timeout = self._default_timeout
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
# Use open with exclusive rights to prevent data corruption
f = os.open(filename, os.O_EXCL | os.O_WRONLY | os.O_CREAT)
try:
os.write(f, settings.CACHEOPS_SERIALIZER.dumps(data))
finally:
os.close(f)
# Set mtime to expire time
os.utime(filename, (0, time.time() + timeout))
except (IOError, OSError):
pass
def _delete(self, fname):
try:
os.remove(fname)
# Trying to remove directory in case it's empty
dirname = os.path.dirname(fname)
os.rmdir(dirname)
except (IOError, OSError):
pass
file_cache = FileCache(settings.FILE_CACHE_DIR)
django-cacheops-7.2/cacheops/templatetags/ 0000775 0000000 0000000 00000000000 15001074010 0020652 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/templatetags/__init__.py 0000664 0000000 0000000 00000000000 15001074010 0022751 0 ustar 00root root 0000000 0000000 django-cacheops-7.2/cacheops/templatetags/cacheops.py 0000664 0000000 0000000 00000004407 15001074010 0023016 0 ustar 00root root 0000000 0000000 from inspect import getfullargspec, unwrap
from functools import partial
from django.template import Library
from django.template.library import TagHelperNode, parse_bits
import cacheops
from cacheops.utils import carefully_strip_whitespace
__all__ = ['CacheopsLibrary', 'invalidate_fragment']
class CacheopsLibrary(Library):
def decorator_tag(self, func=None, takes_context=False):
if func is None:
return partial(self.decorator_tag, takes_context=takes_context)
name = func.__name__
params, varargs, varkw, defaults, kwonly, kwonly_defaults, _ = getfullargspec(unwrap(func))
def _compile(parser, token):
# content
nodelist = parser.parse(('end' + name,))
parser.delete_first_token()
# args
bits = token.split_contents()[1:]
args, kwargs = parse_bits(
parser, bits, params, varargs, varkw, defaults,
kwonly, kwonly_defaults, takes_context, name,
)
return CachedNode(func, takes_context, args, kwargs, nodelist)
self.tag(name=name, compile_function=_compile)
return func
register = CacheopsLibrary()
class CachedNode(TagHelperNode):
def __init__(self, func, takes_context, args, kwargs, nodelist):
super(CachedNode, self).__init__(func, takes_context, args, kwargs)
self.nodelist = nodelist
def render(self, context):
args, kwargs = self.get_resolved_arguments(context)
decorator = self.func(*args, **kwargs)
render = _make_render(context, self.nodelist)
return decorator(render)()
def _make_render(context, nodelist):
def render():
# TODO: make this cache preparation configurable
return carefully_strip_whitespace(nodelist.render(context))
return render
@register.decorator_tag
def cached(timeout, fragment_name, *extra):
return cacheops.cached(timeout=timeout, extra=(fragment_name,) + extra)
def invalidate_fragment(fragment_name, *extra):
render = _make_render(None, None)
cached(None, fragment_name, *extra)(render).invalidate()
@register.decorator_tag
def cached_as(queryset, timeout, fragment_name, *extra):
return cacheops.cached_as(queryset, timeout=timeout, extra=(fragment_name,) + extra)
django-cacheops-7.2/cacheops/transaction.py 0000664 0000000 0000000 00000010056 15001074010 0021061 0 ustar 00root root 0000000 0000000 import threading
from collections import defaultdict
from funcy import once, decorator
from django.db import DEFAULT_DB_ALIAS, DatabaseError
from django.db.backends.utils import CursorWrapper
from django.db.transaction import Atomic, get_connection, on_commit
from .utils import monkey_mix
__all__ = ('queue_when_in_transaction', 'install_cacheops_transaction_support',
'transaction_states')
class TransactionState(list):
def begin(self):
self.append({'cbs': [], 'dirty': False})
def commit(self):
context = self.pop()
if self:
# savepoint
self[-1]['cbs'].extend(context['cbs'])
self[-1]['dirty'] = self[-1]['dirty'] or context['dirty']
else:
# transaction
for func, args, kwargs in context['cbs']:
func(*args, **kwargs)
def rollback(self):
self.pop()
def push(self, item):
self[-1]['cbs'].append(item)
def mark_dirty(self):
self[-1]['dirty'] = True
def is_dirty(self):
return any(context['dirty'] for context in self)
class TransactionStates(threading.local):
def __init__(self):
super(TransactionStates, self).__init__()
self._states = defaultdict(TransactionState)
def __getitem__(self, key):
return self._states[key or DEFAULT_DB_ALIAS]
def is_dirty(self, dbs):
return any(self[db].is_dirty() for db in dbs)
transaction_states = TransactionStates()
@decorator
def queue_when_in_transaction(call):
if transaction_states[call.using]:
transaction_states[call.using].push((call, (), {}))
else:
return call()
class AtomicMixIn(object):
def __enter__(self):
entering = not transaction_states[self.using]
transaction_states[self.using].begin()
self._no_monkey.__enter__(self)
if entering:
on_commit(transaction_states[self.using].commit, self.using)
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
try:
self._no_monkey.__exit__(self, exc_type, exc_value, traceback)
except DatabaseError:
transaction_states[self.using].rollback()
raise
else:
if not connection.closed_in_transaction and exc_type is None and \
not connection.needs_rollback:
if transaction_states[self.using]:
transaction_states[self.using].commit()
else:
transaction_states[self.using].rollback()
class CursorWrapperMixin(object):
def callproc(self, procname, params=None):
result = self._no_monkey.callproc(self, procname, params)
if transaction_states[self.db.alias]:
transaction_states[self.db.alias].mark_dirty()
return result
def execute(self, sql, params=None):
result = self._no_monkey.execute(self, sql, params)
if transaction_states[self.db.alias] and is_sql_dirty(sql):
transaction_states[self.db.alias].mark_dirty()
return result
def executemany(self, sql, param_list):
result = self._no_monkey.executemany(self, sql, param_list)
if transaction_states[self.db.alias] and is_sql_dirty(sql):
transaction_states[self.db.alias].mark_dirty()
return result
CHARS = set('abcdefghijklmnoprqstuvwxyz_')
def is_sql_dirty(sql):
# This should not happen as using bytes in Python 3 is against db protocol,
# but some people will pass it anyway
if isinstance(sql, bytes):
sql = sql.decode()
# NOTE: not using regex here for speed
sql = sql.lower()
for action in ('update', 'insert', 'delete'):
p = sql.find(action)
if p == -1:
continue
start, end = p - 1, p + len(action)
if (start < 0 or sql[start] not in CHARS) and (end >= len(sql) or sql[end] not in CHARS):
return True
else:
return False
@once
def install_cacheops_transaction_support():
monkey_mix(Atomic, AtomicMixIn)
monkey_mix(CursorWrapper, CursorWrapperMixin)
django-cacheops-7.2/cacheops/tree.py 0000664 0000000 0000000 00000015020 15001074010 0017467 0 ustar 00root root 0000000 0000000 from itertools import product
from funcy import group_by, join_with, lcat, lmap, cat
from django.db.models import Subquery
from django.db.models.query import QuerySet
from django.db.models.sql import OR
from django.db.models.sql.datastructures import Join
from django.db.models.sql.query import Query, ExtraWhere
from django.db.models.sql.where import NothingNode
from django.db.models.lookups import Lookup, Exact, In, IsNull
from django.db.models.expressions import BaseExpression, Exists
from .conf import settings
from .invalidation import serializable_fields
# This existed prior to Django 5.2
try:
from django.db.models.sql.where import SubqueryConstraint
except ImportError:
class SubqueryConstraint(object):
pass
def dnfs(qs):
"""
Converts query condition tree into a DNF of eq conds.
Separately for each alias.
Any negations, conditions with lookups other than __exact or __in,
conditions on joined models and subrequests are ignored.
__in is converted into = or = or = ...
"""
SOME = Some()
SOME_TREE = {frozenset({(None, None, SOME, True)})}
def negate(term):
return (term[0], term[1], term[2], not term[3])
def _dnf(where):
"""
Constructs DNF of where tree consisting of terms in form:
(alias, attribute, value, negation)
meaning `alias.attribute = value`
or `not alias.attribute = value` if negation is False
Any conditions other then eq are dropped.
"""
if isinstance(where, Lookup):
# If where.lhs don't refer to a field then don't bother
if not hasattr(where.lhs, 'target'):
return SOME_TREE
# Don't bother with complex right hand side either
if isinstance(where.rhs, (QuerySet, Query, BaseExpression)):
return SOME_TREE
# Skip conditions on non-serialized fields
if where.lhs.target not in serializable_fields(where.lhs.target.model):
return SOME_TREE
attname = where.lhs.target.attname
if isinstance(where, Exact):
return {frozenset({(where.lhs.alias, attname, where.rhs, True)})}
elif isinstance(where, IsNull):
return {frozenset({(where.lhs.alias, attname, None, where.rhs)})}
elif isinstance(where, In) and len(where.rhs) < settings.CACHEOPS_LONG_DISJUNCTION:
return {frozenset({(where.lhs.alias, attname, v, True)}) for v in where.rhs}
else:
return SOME_TREE
elif isinstance(where, NothingNode):
return set()
elif isinstance(where, (ExtraWhere, SubqueryConstraint, Exists)):
return SOME_TREE
elif len(where) == 0:
return {frozenset()}
else:
children_dnfs = lmap(_dnf, where.children)
if len(children_dnfs) == 0:
return {frozenset()}
elif len(children_dnfs) == 1:
result = children_dnfs[0]
else:
# Just unite children joined with OR
if where.connector == OR:
result = set(cat(children_dnfs))
# Use Cartesian product to AND children
else:
result = {frozenset(cat(conjs)) for conjs in product(*children_dnfs)}
# Negating and expanding brackets
if where.negated:
result = {frozenset(map(negate, conjs)) for conjs in product(*result)}
return result
def clean_conj(conj, for_alias):
conds = {}
for alias, attname, value, negation in conj:
# "SOME" conds, negated conds and conds for other aliases should be stripped
if value is not SOME and negation and alias == for_alias:
# Conjs with fields eq 2 different values will never cause invalidation
if attname in conds and conds[attname] != value:
return None
conds[attname] = value
return conds
def clean_dnf(tree, aliases):
cleaned = [clean_conj(conj, alias) for conj in tree for alias in aliases]
# Remove deleted conjunctions
cleaned = [conj for conj in cleaned if conj is not None]
# Any empty conjunction eats up the rest
# NOTE: a more elaborate DNF reduction is not really needed,
# just keep your querysets sane.
if not all(cleaned):
return [{}]
return cleaned
def add_join_conds(dnf, query):
from collections import defaultdict
# A cond on parent (alias, col) means the same cond applies to target and vice a versa
join_exts = defaultdict(list)
for alias, join in query.alias_map.items():
if query.alias_refcount[alias] and isinstance(join, Join):
for parent_col, target_col in join.join_cols:
join_exts[join.parent_alias, parent_col].append((join.table_alias, target_col))
join_exts[join.table_alias, target_col].append((join.parent_alias, parent_col))
if not join_exts:
return dnf
return {
conj | {
(join_alias, join_col, v, negation)
for alias, col, v, negation in conj
for join_alias, join_col in join_exts[alias, col]
}
for conj in dnf
}
def query_dnf(query):
def table_for(alias):
return alias if alias == main_alias else query.alias_map[alias].table_name
dnf = _dnf(query.where)
dnf = add_join_conds(dnf, query)
# NOTE: we exclude content_type as it never changes and will hold dead invalidation info
main_alias = query.model._meta.db_table
aliases = {alias for alias, join in query.alias_map.items()
if query.alias_refcount[alias]} \
| {main_alias} - {'django_content_type'}
tables = group_by(table_for, aliases)
return {table: clean_dnf(dnf, table_aliases) for table, table_aliases in tables.items()}
if qs.query.combined_queries:
dnfs_ = join_with(lcat, (query_dnf(q) for q in qs.query.combined_queries))
else:
dnfs_ = query_dnf(qs.query)
# Add any subqueries used for annotation
if qs.query.annotations:
subqueries = (query_dnf(getattr(q, 'query', None))
for q in qs.query.annotations.values() if isinstance(q, Subquery))
dnfs_.update(join_with(lcat, subqueries))
return dnfs_
class Some:
def __str__(self):
return 'SOME'
__repr__ = __str__
django-cacheops-7.2/cacheops/utils.py 0000664 0000000 0000000 00000011620 15001074010 0017672 0 ustar 00root root 0000000 0000000 import re
import json
import inspect
import sys
from funcy import memoize, compose, wraps, any, any_fn, select_values, mapcat
from django.db import models
from django.http import HttpRequest
from .conf import model_profile
def model_family(model):
"""
The family is models sharing a database table, events on one should affect each other.
We simply collect a list of all proxy models, including subclasess, superclasses and siblings.
Two descendants of an abstract model are not family - they cannot affect each other.
"""
if model._meta.abstract: # No table - no family
return set()
@memoize
def class_tree(cls):
# NOTE: we also list multitable submodels here, we just don't care.
# Cacheops doesn't support them anyway.
return {cls} | set(mapcat(class_tree, cls.__subclasses__()))
table_bases = {b for b in model.__mro__ if issubclass(b, models.Model) and b is not models.Model
and not b._meta.proxy and not b._meta.abstract}
family = set(mapcat(class_tree, table_bases))
return {cls for cls in family if not cls._meta.abstract}
@memoize
def family_has_profile(cls):
return any(model_profile, model_family(cls))
class MonkeyProxy(object):
pass
def monkey_mix(cls, mixin):
"""
Mixes a mixin into existing class.
Does not use actual multi-inheritance mixins, just monkey patches methods.
Mixin methods can call copies of original ones stored in `_no_monkey` proxy:
class SomeMixin(object):
def do_smth(self, arg):
... do smth else before
self._no_monkey.do_smth(self, arg)
... do smth else after
"""
assert not hasattr(cls, '_no_monkey'), 'Multiple monkey mix not supported'
cls._no_monkey = MonkeyProxy()
test = any_fn(inspect.isfunction, inspect.ismethoddescriptor)
methods = select_values(test, mixin.__dict__)
for name, method in methods.items():
if hasattr(cls, name):
setattr(cls._no_monkey, name, getattr(cls, name))
setattr(cls, name, method)
@memoize
def stamp_fields(model):
"""
Returns serialized description of model fields.
"""
def _stamp(field):
name, class_name, *_ = field.deconstruct()
return name, class_name, field.attname, field.column
stamp = str(sorted(map(_stamp, model._meta.fields)))
return md5hex(stamp)
### Cache keys calculation
def obj_key(obj):
if isinstance(obj, models.Model):
return '%s.%s.%s' % (obj._meta.app_label, obj._meta.model_name, obj.pk)
elif hasattr(obj, 'build_absolute_uri'):
return obj.build_absolute_uri() # Only vary HttpRequest by uri
elif inspect.isfunction(obj):
factors = [obj.__module__, obj.__name__]
# Really useful to ignore this while code still in development
if hasattr(obj, '__code__') and not obj.__globals__.get('CACHEOPS_DEBUG'):
factors.append(obj.__code__.co_firstlineno)
return factors
else:
return str(obj)
def get_cache_key(*factors):
return md5hex(json.dumps(factors, sort_keys=True, default=obj_key))
def cached_view_fab(_cached):
def force_render(response):
if hasattr(response, 'render') and callable(response.render):
response.render()
return response
def cached_view(*dargs, **dkwargs):
def decorator(func):
cached_func = _cached(*dargs, **dkwargs)(compose(force_render, func))
@wraps(func)
def wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), \
"A view should be passed with HttpRequest as first argument"
if request.method not in ('GET', 'HEAD'):
return func(request, *args, **kwargs)
return cached_func(request, *args, **kwargs)
if hasattr(cached_func, 'invalidate'):
wrapper.invalidate = cached_func.invalidate
wrapper.key = cached_func.key
return wrapper
return decorator
return cached_view
### Whitespace handling for template tags
from django.utils.safestring import mark_safe
NEWLINE_BETWEEN_TAGS = mark_safe('>\n<')
SPACE_BETWEEN_TAGS = mark_safe('> <')
def carefully_strip_whitespace(text):
def repl(m):
return NEWLINE_BETWEEN_TAGS if '\n' in m.group(0) else SPACE_BETWEEN_TAGS
text = re.sub(r'>\s{2,}<', repl, text)
return text
### hashing helpers
import hashlib
class md5:
def __init__(self, s=None):
# set usedforsecurity for FIPS compliance
kwargs = {'usedforsecurity': False} if sys.version_info >= (3, 9) else {}
self.md5 = hashlib.md5(**kwargs)
if s is not None:
self.update(s)
def update(self, s):
return self.md5.update(s.encode('utf-8'))
def hexdigest(self):
return self.md5.hexdigest()
def md5hex(s):
return md5(s).hexdigest()
django-cacheops-7.2/manage.py 0000775 0000000 0000000 00000000371 15001074010 0016201 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
django-cacheops-7.2/publish.sh 0000775 0000000 0000000 00000000446 15001074010 0016404 0 ustar 00root root 0000000 0000000 #!/usr/bin/bash
set -ex
NAME=django-cacheops
VERSION=`awk '/__version__ = /{gsub(/'\''/, "", $3); print $3}' cacheops/__init__.py`
echo "Publishing $NAME-$VERSION..."
python setup.py sdist bdist_wheel
twine check dist/$NAME-$VERSION*
twine upload --skip-existing -uSuor dist/$NAME-$VERSION*
django-cacheops-7.2/pytest.ini 0000664 0000000 0000000 00000000141 15001074010 0016420 0 ustar 00root root 0000000 0000000 [pytest]
DJANGO_SETTINGS_MODULE=tests.settings
python_files = test*.py
addopts = --no-migrations
django-cacheops-7.2/requirements-test.txt 0000664 0000000 0000000 00000000156 15001074010 0020636 0 ustar 00root root 0000000 0000000 pytest==8.3.5
pytest-django==4.11.1
django>=3.2
redis>=3.0.0
funcy>=1.8
before_after==1.0.0
jinja2>=2.10
dill
django-cacheops-7.2/setup.cfg 0000664 0000000 0000000 00000000077 15001074010 0016220 0 ustar 00root root 0000000 0000000 [bdist_wheel]
universal = 1
[metadata]
license_file = LICENSE
django-cacheops-7.2/setup.py 0000664 0000000 0000000 00000003377 15001074010 0016117 0 ustar 00root root 0000000 0000000 from setuptools import setup
# Remove build status
README = open('README.rst').read().replace('|Build Status|', '', 1)
setup(
name='django-cacheops',
version='7.2',
author='Alexander Schepanovski',
author_email='suor.web@gmail.com',
description='A slick ORM cache with automatic granular event-driven invalidation for Django.',
long_description=README,
url='http://github.com/Suor/django-cacheops',
license='BSD',
packages=[
'cacheops',
'cacheops.management',
'cacheops.management.commands',
'cacheops.templatetags'
],
python_requires='>=3.7',
install_requires=[
'django>=3.2',
'redis>=3.0.0',
'funcy>=1.8',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3.11',
'Programming Language :: Python :: 3.12',
'Programming Language :: Python :: 3.13',
'Framework :: Django',
'Framework :: Django :: 3.2',
'Framework :: Django :: 4.0',
'Framework :: Django :: 4.1',
'Framework :: Django :: 4.2',
'Framework :: Django :: 5.0',
'Framework :: Django :: 5.1',
'Framework :: Django :: 5.2',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
zip_safe=False,
include_package_data=True,
)
django-cacheops-7.2/tests/ 0000775 0000000 0000000 00000000000 15001074010 0015535 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/tests/__init__.py 0000664 0000000 0000000 00000000000 15001074010 0017634 0 ustar 00root root 0000000 0000000 django-cacheops-7.2/tests/bench.py 0000664 0000000 0000000 00000013075 15001074010 0017174 0 ustar 00root root 0000000 0000000 from cacheops import invalidate_obj, invalidate_model
from cacheops.conf import settings
from cacheops.redis import redis_client
from cacheops.tree import dnfs
from .models import Category, Post, Extra
posts = list(Post.objects.cache().all())
posts_pickle = settings.CACHEOPS_SERIALIZER.dumps(posts)
def do_pickle():
settings.CACHEOPS_SERIALIZER.dumps(posts)
def do_unpickle():
settings.CACHEOPS_SERIALIZER.loads(posts_pickle)
get_key = Category.objects.filter(pk=1).order_by()._cache_key()
def invalidate_get():
redis_client.delete(get_key)
def do_get():
Category.objects.cache().get(pk=1)
def do_get_nocache():
Category.objects.nocache().get(pk=1)
c = Category.objects.first()
def invalidate_count():
invalidate_obj(c)
def do_count():
Category.objects.cache().count()
def do_count_nocache():
Category.objects.nocache().count()
fetch_qs = Category.objects.all()
fetch_key = fetch_qs._cache_key()
def invalidate_fetch():
redis_client.delete(fetch_key)
def do_fetch():
list(Category.objects.cache().all())
def do_fetch_nocache():
list(Category.objects.nocache().all())
def do_fetch_construct():
Category.objects.all()
def do_fetch_cache_key():
fetch_qs._cache_key()
filter_qs = Category.objects.filter(pk=1)
def do_filter_cache_key():
filter_qs._cache_key()
def do_common_construct():
return Category.objects.filter(pk=1).exclude(title__contains='Hi').order_by('title')[:20]
def do_common_inplace():
return Category.objects.inplace() \
.filter(pk=1).exclude(title__contains='Hi').order_by('title')[:20]
common_qs = do_common_construct()
common_key = common_qs._cache_key()
def do_common_cache_key():
common_qs._cache_key()
def do_common_dnfs():
dnfs(common_qs)
def do_common():
qs = Category.objects.filter(pk=1).exclude(title__contains='Hi').order_by('title').cache()[:20]
list(qs)
def do_common_nocache():
qs = Category.objects.filter(pk=1).exclude(title__contains='Hi').order_by('title') \
.nocache()[:20]
list(qs)
def invalidate_common():
redis_client.delete(common_key)
def prepare_obj():
return Category.objects.cache().get(pk=1)
def do_invalidate_obj(obj):
invalidate_obj(obj)
def do_save_obj(obj):
obj.save()
### Complex queryset
from django.db.models import Q
def do_complex_construct():
return Post.objects.filter(id__gt=1, title='Hi').exclude(category__in=[10, 20]) \
.filter(Q(id__range=(10, 20)) | ~Q(title__contains='abc')) \
.select_related('category').prefetch_related('category') \
.order_by('title')[:10]
def do_complex_inplace():
return Post.objects.inplace() \
.filter(id__gt=1, title='Hi').exclude(category__in=[10, 20]) \
.filter(Q(id__range=(10, 20)) | ~Q(title__contains='abc')) \
.select_related('category').prefetch_related('category') \
.order_by('title')[:10]
complex_qs = do_complex_construct()
def do_complex_cache_key():
complex_qs._cache_key()
def do_complex_dnfs():
dnfs(complex_qs)
### More invalidation
def prepare_cache():
def _variants(*args, **kwargs):
qs = Extra.objects.cache().filter(*args, **kwargs)
qs.count()
list(qs)
list(qs[:2])
list(qs.values())
_variants(pk=1)
_variants(post=1)
_variants(tag=5)
_variants(to_tag=10)
_variants(pk=1, post=1)
_variants(pk=1, tag=5)
_variants(post=1, tag=5)
_variants(pk=1, post=1, tag=5)
_variants(pk=1, post=1, to_tag=10)
_variants(Q(pk=1) | Q(tag=5))
_variants(Q(pk=1) | Q(tag=1))
_variants(Q(pk=1) | Q(tag=2))
_variants(Q(pk=1) | Q(tag=3))
_variants(Q(pk=1) | Q(tag=4))
return Extra.objects.cache().get(pk=1)
def do_invalidate_model(obj):
invalidate_model(obj.__class__)
TESTS = [
('pickle', {'run': do_pickle}),
('unpickle', {'run': do_unpickle}),
('get_nocache', {'run': do_get_nocache}),
('get_hit', {'prepare_once': do_get, 'run': do_get}),
('get_miss', {'prepare': invalidate_get, 'run': do_get}),
('count_nocache', {'run': do_count_nocache}),
('count_hit', {'prepare_once': do_count, 'run': do_count}),
('count_miss', {'prepare': invalidate_count, 'run': do_count}),
('fetch_construct', {'run': do_fetch_construct}),
('fetch_nocache', {'run': do_fetch_nocache}),
('fetch_hit', {'prepare_once': do_fetch, 'run': do_fetch}),
('fetch_miss', {'prepare': invalidate_fetch, 'run': do_fetch}),
('fetch_cache_key', {'run': do_fetch_cache_key}),
('filter_cache_key', {'run': do_filter_cache_key}),
('common_construct', {'run': do_common_construct}),
('common_inplace', {'run': do_common_inplace}),
('common_cache_key', {'run': do_common_cache_key}),
('common_dnfs', {'run': do_common_dnfs}),
('common_nocache', {'run': do_common_nocache}),
('common_hit', {'prepare_once': do_common, 'run': do_common}),
('common_miss', {'prepare': invalidate_common, 'run': do_common}),
('invalidate_obj', {'prepare': prepare_obj, 'run': do_invalidate_obj}),
('save_obj', {'prepare': prepare_obj, 'run': do_save_obj}),
('complex_construct', {'run': do_complex_construct}),
('complex_inplace', {'run': do_complex_inplace}),
('complex_cache_key', {'run': do_complex_cache_key}),
('complex_dnfs', {'run': do_complex_dnfs}),
('big_invalidate', {'prepare': prepare_cache, 'run': do_invalidate_obj}),
('model_invalidate', {'prepare': prepare_cache, 'run': do_invalidate_model}),
]
django-cacheops-7.2/tests/fixtures/ 0000775 0000000 0000000 00000000000 15001074010 0017406 5 ustar 00root root 0000000 0000000 django-cacheops-7.2/tests/fixtures/basic.json 0000664 0000000 0000000 00000002262 15001074010 0021364 0 ustar 00root root 0000000 0000000 [
{
"model": "tests.category",
"pk": 1,
"fields": {
"title": "Django"
}
},
{
"model": "tests.category",
"pk": 2,
"fields": {
"title": "Rails"
}
},
{
"model": "tests.category",
"pk": 3,
"fields": {
"title": "Perl"
}
},
{
"model": "tests.category",
"pk": 4,
"fields": {
"title": "Python"
}
},
{
"model": "tests.category",
"pk": 5,
"fields": {
"title": "Node.js"
}
},
{
"model": "tests.post",
"pk": 1,
"fields": {
"title": "Cacheops",
"category": 1,
"visible": true
}
},
{
"model": "tests.post",
"pk": 2,
"fields": {
"title": "Implicit variable as pronoun",
"category": 3,
"visible": true
}
},
{
"model": "tests.post",
"pk": 3,
"fields": {
"title": "Perl 6 hyperoperator",
"category": 3,
"visible": false
}
},
{
"model": "tests.extra",
"pk": 1,
"fields": {
"post": 1,
"tag": 5,
"to_tag": 10
}
},
{
"model": "tests.extra",
"pk": 2,
"fields": {
"post": 2,
"tag": 10,
"to_tag": 5
}
}
]
django-cacheops-7.2/tests/models.py 0000664 0000000 0000000 00000021005 15001074010 0017370 0 ustar 00root root 0000000 0000000 import os
import uuid
from datetime import date, time
from django.db import models
from django.db.models.query import QuerySet
from django.db.models import sql, manager
from django.contrib.auth.models import User
from django.utils import timezone
### For basic tests and bench
class Category(models.Model):
title = models.CharField(max_length=128)
def __unicode__(self):
return self.title
class Post(models.Model):
title = models.CharField(max_length=128)
category = models.ForeignKey(Category, on_delete=models.CASCADE, related_name='posts')
visible = models.BooleanField(default=True)
def __unicode__(self):
return self.title
class Extra(models.Model):
post = models.OneToOneField(Post, on_delete=models.CASCADE)
tag = models.IntegerField(db_column='custom_column_name', unique=True)
to_tag = models.ForeignKey('self', on_delete=models.CASCADE, to_field='tag', null=True)
def __unicode__(self):
return 'Extra(post_id=%s, tag=%s)' % (self.post_id, self.tag)
### Specific and custom fields
class CustomValue(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __eq__(self, other):
return isinstance(other, CustomValue) and self.value == other.value
class CustomField(models.Field):
def db_type(self, connection):
return 'text'
def to_python(self, value):
if isinstance(value, CustomValue):
return value
return CustomValue(value)
def from_db_value(self, value, expession, conn):
return self.to_python(value)
def get_prep_value(self, value):
return value.value
class CustomWhere(sql.where.WhereNode):
pass
class CustomQuery(sql.Query):
pass
class CustomManager(models.Manager):
def get_query_set(self):
q = CustomQuery(self.model, CustomWhere)
return QuerySet(self.model, q)
get_queryset = get_query_set
class IntegerArrayField(models.Field):
def db_type(self, connection):
return 'text'
def to_python(self, value):
if value in (None, ''):
return None
if isinstance(value, list):
return value
return [int(v) for v in value.split(',')]
def from_db_value(self, value, expession, conn):
return self.to_python(value)
def get_prep_value(self, value):
return ','.join(map(str, value))
def custom_value_default():
return CustomValue('default')
class Weird(models.Model):
date_field = models.DateField(default=date(2000, 1, 1))
datetime_field = models.DateTimeField(default=timezone.now)
time_field = models.TimeField(default=time(10, 10))
list_field = IntegerArrayField(default=list, blank=True)
custom_field = CustomField(default=custom_value_default)
binary_field = models.BinaryField()
objects = models.Manager()
customs = CustomManager()
# TODO: check other new fields:
# - PostgreSQL ones: HStoreField, RangeFields, unaccent
# - Other: DurationField
if os.environ.get('CACHEOPS_DB') in {'postgresql', 'postgis'}:
from django.contrib.postgres.fields import ArrayField
try:
from django.db.models import JSONField
except ImportError:
try:
from django.contrib.postgres.fields import JSONField # Used before Django 3.1
except ImportError:
JSONField = None
class TaggedPost(models.Model):
name = models.CharField(max_length=200)
tags = ArrayField(models.IntegerField())
if JSONField:
meta = JSONField()
# 16
class Profile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
tag = models.IntegerField()
# Proxy model
class Video(models.Model):
title = models.CharField(max_length=128)
class VideoProxy(Video):
class Meta:
proxy = True
class NonCachedVideoProxy(Video):
class Meta:
proxy = True
class NonCachedMedia(models.Model):
title = models.CharField(max_length=128)
class MediaProxy(NonCachedMedia):
class Meta:
proxy = True
class MediaType(models.Model):
name = models.CharField(max_length=50)
# Multi-table inheritance
class Media(models.Model):
name = models.CharField(max_length=128)
media_type = models.ForeignKey(
MediaType,
on_delete=models.CASCADE,
)
def __str__(self):
return str(self.media_type)
class Movie(Media):
year = models.IntegerField()
class Scene(models.Model):
"""Model with FK to submodel."""
name = models.CharField(max_length=50)
movie = models.ForeignKey(
Movie,
on_delete=models.CASCADE,
related_name="scenes",
)
# M2M models
class Label(models.Model):
text = models.CharField(max_length=127, blank=True, default='')
class Brand(models.Model):
labels = models.ManyToManyField(Label, related_name='brands')
# M2M with explicit through models
class LabelT(models.Model):
text = models.CharField(max_length=127, blank=True, default='')
class BrandT(models.Model):
labels = models.ManyToManyField(LabelT, related_name='brands', through='Labeling')
class Labeling(models.Model):
label = models.ForeignKey(LabelT, on_delete=models.CASCADE)
brand = models.ForeignKey(BrandT, on_delete=models.CASCADE)
tag = models.IntegerField()
class PremiumBrand(Brand):
extra = models.CharField(max_length=127, blank=True, default='')
# local_get
class Local(models.Model):
tag = models.IntegerField(null=True)
# 45
class CacheOnSaveModel(models.Model):
title = models.CharField(max_length=32)
# 47
class DbAgnostic(models.Model):
pass
class DbBinded(models.Model):
pass
# contrib.postgis
if os.environ.get('CACHEOPS_DB') == 'postgis':
from django.contrib.gis.db import models as gis_models
class Geometry(gis_models.Model):
point = gis_models.PointField(geography=True, dim=3, blank=True, null=True, default=None)
# 145
class One(models.Model):
boolean = models.BooleanField(default=False)
def set_boolean_true(sender, instance, created, **kwargs):
if created:
return
dialog = One.objects.cache().get(id=instance.id)
assert dialog.boolean is True
from django.db.models.signals import post_save
post_save.connect(set_boolean_true, sender=One)
# 312
class Device(models.Model):
uid = models.UUIDField(default=uuid.uuid4)
model = models.CharField(max_length=64)
# 333
class CustomQuerySet(QuerySet):
pass
class CustomFromQSManager(manager.BaseManager.from_queryset(CustomQuerySet)):
use_for_related_fields = True
class CustomFromQSModel(models.Model):
boolean = models.BooleanField(default=False)
objects = CustomFromQSManager()
# 352
class CombinedField(models.CharField):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.another_field = models.CharField(*args, **kwargs)
def contribute_to_class(self, cls, name, **kwargs):
super().contribute_to_class(cls, name, private_only=True)
self.another_field.contribute_to_class(cls, name, **kwargs)
class CombinedFieldModel(models.Model):
text = CombinedField(max_length=8, default='example')
# 353
class Foo(models.Model):
pass
class Bar(models.Model):
foo = models.OneToOneField(
to="Foo",
on_delete=models.SET_NULL,
related_name='bar',
blank=True,
null=True
)
# 385
class Client(models.Model):
def __init__(self, *args, **kwargs):
# copied from Django 2.1.5 (not exists in Django 3.1.5 installed by current requirements)
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*args, *moreargs, **{**kwargs, **morekwargs})
return _curried
super().__init__(*args, **kwargs)
setattr(self, '_get_private_data', curry(sum, [1, 2, 3, 4]))
name = models.CharField(max_length=255)
# Abstract models
class Abs(models.Model):
class Meta:
abstract = True
class Concrete1(Abs):
pass
class AbsChild(Abs):
class Meta:
abstract = True
class Concrete2(AbsChild):
pass
class NoProfile(models.Model):
title = models.CharField(max_length=128)
class NoProfileProxy(NoProfile):
class Meta:
proxy = True
class AbsNoProfile(NoProfile):
class Meta:
abstract = True
class NoProfileChild(AbsNoProfile):
pass
class ParentId(models.Model):
pass
class ParentStr(models.Model):
name = models.CharField(max_length=128, primary_key=True)
class Mess(ParentId, ParentStr):
pass
class MessChild(Mess):
pass
django-cacheops-7.2/tests/settings.py 0000664 0000000 0000000 00000007055 15001074010 0017756 0 ustar 00root root 0000000 0000000 import os
INSTALLED_APPS = [
'cacheops',
'django.contrib.contenttypes',
'django.contrib.auth',
'tests',
]
ROOT_URLCONF = 'tests.urls'
MIDDLEWARE_CLASSES = []
AUTH_PROFILE_MODULE = 'tests.UserProfile'
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
USE_TZ = True
# Django replaces this, but it still wants it. *shrugs*
DATABASE_ENGINE = 'django.db.backends.sqlite3',
if os.environ.get('CACHEOPS_DB') == 'postgresql':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'cacheops',
'USER': 'cacheops',
'PASSWORD': 'cacheops',
'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
},
'slave': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'cacheops_slave',
'USER': 'cacheops',
'PASSWORD': 'cacheops',
'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
},
}
# Use psycopg2cffi for PyPy
try:
import psycopg2 # noqa
except ImportError:
from psycopg2cffi import compat
compat.register()
elif os.environ.get('CACHEOPS_DB') == 'postgis':
POSTGIS_VERSION = (2, 1, 1)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'cacheops',
'USER': 'cacheops',
'PASSWORD': 'cacheops',
'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
},
'slave': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'cacheops_slave',
'USER': 'cacheops',
'PASSWORD': 'cacheops',
'HOST': os.getenv('POSTGRES_HOST') or '127.0.0.1',
},
}
elif os.environ.get('CACHEOPS_DB') == 'mysql':
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cacheops',
'USER': 'root',
'PASSWORD': 'cacheops',
'HOST': os.getenv('MYSQL_HOST') or '127.0.0.1',
},
'slave': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cacheops_slave',
'USER': 'root',
'PASSWORD': 'cacheops',
'HOST': os.getenv('MYSQL_HOST') or '127.0.0.1',
},
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite.db',
# Make in memory sqlite test db to work with threads
# See https://code.djangoproject.com/ticket/12118
'TEST': {
'NAME': ':memory:cache=shared'
}
},
'slave': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite_slave.db',
}
}
CACHEOPS_REDIS = {
'host': os.getenv('REDIS_HOST') or '127.0.0.1',
'port': 6379,
'db': 13,
'socket_timeout': 3,
}
CACHEOPS_DEFAULTS = {
'timeout': 60*60
}
CACHEOPS = {
'tests.local': {'local_get': True},
'tests.cacheonsavemodel': {'cache_on_save': True},
'tests.dbbinded': {'db_agnostic': False},
'tests.*': {},
'tests.noncachedvideoproxy': None,
'tests.noncachedmedia': None,
'tests.noprofile': None,
'auth.*': {},
}
if os.environ.get('CACHEOPS_PREFIX'):
CACHEOPS_PREFIX = lambda q: 'p:'
CACHEOPS_INSIDEOUT = bool(os.environ.get('CACHEOPS_INSIDEOUT'))
CACHEOPS_DEGRADE_ON_FAILURE = bool(os.environ.get('CACHEOPS_DEGRADE_ON_FAILURE'))
ALLOWED_HOSTS = ['testserver']
SECRET_KEY = 'abc'
TEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates'}]
django-cacheops-7.2/tests/test_extras.py 0000664 0000000 0000000 00000015104 15001074010 0020455 0 ustar 00root root 0000000 0000000 from django.db import transaction
from django.test import TestCase, override_settings
from cacheops import cached_as, no_invalidation, invalidate_obj, invalidate_model, invalidate_all
from cacheops.conf import settings
from cacheops.signals import cache_read, cache_invalidated
from .utils import BaseTestCase, make_inc
from .models import Post, Category, Local, DbAgnostic, DbBinded
class SettingsTests(TestCase):
def test_context_manager(self):
self.assertTrue(settings.CACHEOPS_ENABLED)
with self.settings(CACHEOPS_ENABLED=False):
self.assertFalse(settings.CACHEOPS_ENABLED)
@override_settings(CACHEOPS_ENABLED=False)
def test_decorator(self):
self.assertFalse(settings.CACHEOPS_ENABLED)
@override_settings(CACHEOPS_ENABLED=False)
class ClassOverrideSettingsTests(TestCase):
def test_class(self):
self.assertFalse(settings.CACHEOPS_ENABLED)
class SignalsTests(BaseTestCase):
def setUp(self):
super(SignalsTests, self).setUp()
def set_signal(signal=None, **kwargs):
self.signal_calls.append(kwargs)
self.signal_calls = []
cache_read.connect(set_signal, dispatch_uid=1, weak=False)
def tearDown(self):
super(SignalsTests, self).tearDown()
cache_read.disconnect(dispatch_uid=1)
def test_queryset(self):
# Miss
test_model = Category.objects.create(title="foo")
Category.objects.cache().get(id=test_model.id)
self.assertEqual(self.signal_calls, [{'sender': Category, 'func': None, 'hit': False}])
# Hit
self.signal_calls = []
Category.objects.cache().get(id=test_model.id) # hit
self.assertEqual(self.signal_calls, [{'sender': Category, 'func': None, 'hit': True}])
def test_queryset_empty(self):
list(Category.objects.cache().filter(pk__in=[]))
self.assertEqual(self.signal_calls, [{'sender': Category, 'func': None, 'hit': False}])
def test_cached_as(self):
get_calls = make_inc(cached_as(Category.objects.filter(title='test')))
func = get_calls.__wrapped__
# Miss
self.assertEqual(get_calls(), 1)
self.assertEqual(self.signal_calls, [{'sender': None, 'func': func, 'hit': False}])
# Hit
self.signal_calls = []
self.assertEqual(get_calls(), 1)
self.assertEqual(self.signal_calls, [{'sender': None, 'func': func, 'hit': True}])
def test_invalidation_signal(self):
def set_signal(signal=None, **kwargs):
signal_calls.append(kwargs)
signal_calls = []
cache_invalidated.connect(set_signal, dispatch_uid=1, weak=False)
invalidate_all()
invalidate_model(Post)
c = Category.objects.create(title='Hey')
self.assertEqual(signal_calls, [
{'sender': None, 'obj_dict': None},
{'sender': Post, 'obj_dict': None},
{'sender': Category, 'obj_dict': {'id': c.pk, 'title': 'Hey'}},
])
class LockingTests(BaseTestCase):
def test_lock(self):
import random
import threading
from .utils import ThreadWithReturnValue
from before_after import before
@cached_as(Post, lock=True, timeout=60)
def func():
return random.random()
results = []
locked = threading.Event()
thread = [None]
def second_thread():
def _target():
try:
with before('redis.Redis.brpoplpush', lambda *a, **kw: locked.set()):
results.append(func())
except Exception:
locked.set()
raise
thread[0] = ThreadWithReturnValue(target=_target)
thread[0].start()
assert locked.wait(1) # Wait until right before the block
with before('random.random', second_thread):
results.append(func())
thread[0].join()
self.assertEqual(results[0], results[1])
class NoInvalidationTests(BaseTestCase):
fixtures = ['basic']
def _template(self, invalidate):
post = Post.objects.cache().get(pk=1)
invalidate(post)
with self.assertNumQueries(0):
Post.objects.cache().get(pk=1)
def test_context_manager(self):
def invalidate(post):
with no_invalidation:
invalidate_obj(post)
self._template(invalidate)
def test_decorator(self):
self._template(no_invalidation(invalidate_obj))
def test_nested(self):
def invalidate(post):
with no_invalidation:
with no_invalidation:
pass
invalidate_obj(post)
self._template(invalidate)
def test_in_transaction(self):
with transaction.atomic():
post = Post.objects.cache().get(pk=1)
with no_invalidation:
post.save()
with self.assertNumQueries(0):
Post.objects.cache().get(pk=1)
class LocalGetTests(BaseTestCase):
def setUp(self):
Local.objects.create(pk=1)
super(LocalGetTests, self).setUp()
def test_unhashable_args(self):
Local.objects.cache().get(pk__in=[1, 2])
class DbAgnosticTests(BaseTestCase):
databases = ('default', 'slave')
def test_db_agnostic_by_default(self):
list(DbAgnostic.objects.cache())
with self.assertNumQueries(0, using='slave'):
list(DbAgnostic.objects.cache().using('slave'))
def test_db_agnostic_disabled(self):
list(DbBinded.objects.cache())
with self.assertNumQueries(1, using='slave'):
list(DbBinded.objects.cache().using('slave'))
def test_model_family():
from cacheops.utils import model_family
from .models import Abs, Concrete1, AbsChild, Concrete2
from .models import NoProfile, NoProfileProxy, AbsNoProfile, NoProfileChild
from .models import ParentId, ParentStr, Mess, MessChild
# Abstract models do not have family, children of an abstract model are not a family
assert model_family(Abs) == set()
assert model_family(Concrete1) == {Concrete1}
assert model_family(AbsChild) == set()
assert model_family(Concrete2) == {Concrete2}
# Everything in but an abstract model
assert model_family(NoProfile) == {NoProfile, NoProfileProxy, NoProfileChild}
assert model_family(NoProfileProxy) == {NoProfile, NoProfileProxy, NoProfileChild}
assert model_family(AbsNoProfile) == set()
assert model_family(NoProfileChild) == {NoProfile, NoProfileProxy, NoProfileChild}
# The worst of multiple inheritance
assert model_family(Mess) == {Mess, MessChild, ParentId, ParentStr}
django-cacheops-7.2/tests/test_low_level.py 0000664 0000000 0000000 00000000763 15001074010 0021144 0 ustar 00root root 0000000 0000000 import pytest
from cacheops.redis import redis_client
from .models import User
from .utils import BaseTestCase
@pytest.fixture()
def base(db):
case = BaseTestCase()
case.setUp()
yield
case.tearDown()
def test_ttl(base):
user = User.objects.create(username='Suor')
qs = User.objects.cache(timeout=100).filter(pk=user.pk)
list(qs)
assert 90 <= redis_client.ttl(qs._cache_key()) <= 100
assert redis_client.ttl(f'{qs._prefix}conj:auth_user:id={user.id}') > 100
django-cacheops-7.2/tests/tests.py 0000664 0000000 0000000 00000115426 15001074010 0017262 0 ustar 00root root 0000000 0000000 from contextlib import contextmanager
from functools import reduce
import operator
import re
import platform
import unittest
from unittest import mock
import django
from django.db import connection
from django.db import DEFAULT_DB_ALIAS
from django.test import override_settings
from django.test.client import RequestFactory
from django.template import Context, Template
from django.db.models import F, Count, Max, OuterRef, Sum, Subquery, Exists, Q
from django.db.models.expressions import RawSQL
from cacheops import invalidate_model, invalidate_obj, \
cached, cached_view, cached_as, cached_view_as
from cacheops import invalidate_fragment
from cacheops.query import invalidate_m2o
from cacheops.templatetags.cacheops import register
decorator_tag = register.decorator_tag
from .models import * # noqa
from .utils import BaseTestCase, make_inc
class BasicTests(BaseTestCase):
fixtures = ['basic']
def test_it_works(self):
with self.assertNumQueries(1):
cnt1 = Category.objects.cache().count()
cnt2 = Category.objects.cache().count()
self.assertEqual(cnt1, cnt2)
def test_empty(self):
with self.assertNumQueries(0):
list(Category.objects.cache().filter(id__in=[]))
def test_exact(self):
list(Category.objects.filter(pk=1).cache())
with self.assertNumQueries(0):
list(Category.objects.filter(pk__exact=1).cache())
def test_exists(self):
with self.assertNumQueries(1):
Category.objects.cache(ops='exists').exists()
Category.objects.cache(ops='exists').exists()
def test_some(self):
# Ignoring SOME condition lead to wrong DNF for this queryset,
# which leads to no invalidation
list(Category.objects.exclude(pk__in=range(10), pk__isnull=False).cache())
c = Category.objects.get(pk=1)
c.save()
with self.assertNumQueries(1):
list(Category.objects.exclude(pk__in=range(10), pk__isnull=False).cache())
def test_invalidation(self):
post = Post.objects.cache().get(pk=1)
post.title += ' changed'
post.save()
with self.assertNumQueries(1):
changed_post = Post.objects.cache().get(pk=1)
self.assertEqual(post.title, changed_post.title)
def test_granular(self):
Post.objects.cache().get(pk=1)
Post.objects.get(pk=2).save()
with self.assertNumQueries(0):
Post.objects.cache().get(pk=1)
def test_invalidate_by_foreign_key(self):
posts = list(Post.objects.cache().filter(category=1))
Post.objects.create(title='New Post', category_id=1)
with self.assertNumQueries(1):
changed_posts = list(Post.objects.cache().filter(category=1))
self.assertEqual(len(changed_posts), len(posts) + 1)
def test_invalidate_by_one_to_one(self):
extras = list(Extra.objects.cache().filter(post=3))
Extra.objects.create(post_id=3, tag=0)
with self.assertNumQueries(1):
changed_extras = list(Extra.objects.cache().filter(post=3))
self.assertEqual(len(changed_extras), len(extras) + 1)
def test_invalidate_by_boolean(self):
count = Post.objects.cache().filter(visible=True).count()
with self.assertNumQueries(0):
Post.objects.cache().filter(visible=True).count()
post = Post.objects.get(pk=1, visible=True)
post.visible = False
post.save()
with self.assertNumQueries(1):
new_count = Post.objects.cache().filter(visible=True).count()
self.assertEqual(new_count, count - 1)
def test_bulk_create(self):
cnt = Category.objects.cache().count()
Category.objects.bulk_create([Category(title='hi'), Category(title='there')])
with self.assertNumQueries(1):
cnt2 = Category.objects.cache().count()
self.assertEqual(cnt2, cnt + 2)
def test_db_column(self):
e = Extra.objects.cache().get(tag=5)
e.save()
def test_fk_to_db_column(self):
e = Extra.objects.cache().get(to_tag__tag=5)
e.save()
with self.assertNumQueries(1):
Extra.objects.cache().get(to_tag=5)
def test_expressions(self):
qs = Extra.objects.cache().filter(tag=F('to_tag') + 1, to_tag=F('tag').bitor(5))
qs.count()
with self.assertNumQueries(0):
qs.count()
def test_expressions_save(self):
# Check saving F
extra = Extra.objects.get(pk=1)
extra.tag = F('tag')
extra.save()
# Check saving ExressionNode
Extra.objects.create(post_id=3, tag=7)
extra = Extra.objects.get(pk=3)
extra.tag = F('tag') + 1
extra.save()
def test_combine(self):
qs = Post.objects.filter(pk__in=[1, 2]) & Post.objects.all()
self.assertEqual(list(qs.cache()), list(qs))
qs = Post.objects.filter(pk__in=[1, 2]) | Post.objects.none()
self.assertEqual(list(qs.cache()), list(qs))
def test_first_and_last(self):
qs = Category.objects.cache(ops='get')
qs.first()
qs.last()
with self.assertNumQueries(0):
qs.first()
qs.last()
def test_union(self):
qs = Post.objects.filter(category=1).values('id', 'title').union(
Category.objects.filter(title='Perl').values('id', 'title')).cache()
list(qs.clone())
# Invalidated
Category.objects.create(title='Perl')
with self.assertNumQueries(1):
list(qs.clone())
# Not invalidated
Category.objects.create(title='Ruby')
with self.assertNumQueries(0):
list(qs.clone())
def test_invalidated_update(self):
list(Post.objects.filter(category=1).cache())
list(Post.objects.filter(category=2).cache())
# Should invalidate both queries
Post.objects.filter(category=1).invalidated_update(category=2)
with self.assertNumQueries(2):
list(Post.objects.filter(category=1).cache())
list(Post.objects.filter(category=2).cache())
def test_subquery(self):
categories = Category.objects.cache().filter(title='Django').only('id')
Post.objects.cache().filter(category__in=Subquery(categories)).count()
def test_rawsql(self):
Post.objects.cache().filter(category__in=RawSQL("select 1", ())).count()
class ValuesTests(BaseTestCase):
fixtures = ['basic']
def test_it_works(self):
with self.assertNumQueries(1):
len(Category.objects.cache().values())
len(Category.objects.cache().values())
def test_it_varies_on_class(self):
with self.assertNumQueries(2):
len(Category.objects.cache())
len(Category.objects.cache().values())
def test_it_varies_on_flat(self):
with self.assertNumQueries(2):
len(Category.objects.cache().values_list())
len(Category.objects.cache().values_list(flat=True))
class DecoratorTests(BaseTestCase):
def test_cached_as_model(self):
get_calls = make_inc(cached_as(Category))
self.assertEqual(get_calls(), 1) # miss
self.assertEqual(get_calls(), 1) # hit
Category.objects.create(title='test') # invalidate
self.assertEqual(get_calls(), 2) # miss
def test_cached_as_cond(self):
get_calls = make_inc(cached_as(Category.objects.filter(title='test')))
self.assertEqual(get_calls(), 1) # cache
Category.objects.create(title='miss') # don't invalidate
self.assertEqual(get_calls(), 1) # hit
Category.objects.create(title='test') # invalidate
self.assertEqual(get_calls(), 2) # miss
def test_cached_as_obj(self):
c = Category.objects.create(title='test')
get_calls = make_inc(cached_as(c))
self.assertEqual(get_calls(), 1) # cache
Category.objects.create(title='miss') # don't invalidate
self.assertEqual(get_calls(), 1) # hit
c.title = 'new'
c.save() # invalidate
self.assertEqual(get_calls(), 2) # miss
def test_cached_as_depends_on_args(self):
get_calls = make_inc(cached_as(Category))
self.assertEqual(get_calls(1), 1) # cache
self.assertEqual(get_calls(1), 1) # hit
self.assertEqual(get_calls(2), 2) # miss
def test_cached_as_depends_on_two_models(self):
get_calls = make_inc(cached_as(Category, Post))
c = Category.objects.create(title='miss')
p = Post.objects.create(title='New Post', category=c)
self.assertEqual(get_calls(1), 1) # cache
c.title = 'new title'
c.save() # invalidate by Category
self.assertEqual(get_calls(1), 2) # miss and cache
p.title = 'new title'
p.save() # invalidate by Post
self.assertEqual(get_calls(1), 3) # miss and cache
def test_cached_as_keep_fresh(self):
c = Category.objects.create(title='test')
calls = [0]
@cached_as(c, keep_fresh=True)
def get_calls(_=None, **kw):
# Invalidate during first run
if calls[0] < 1:
invalidate_obj(c)
calls[0] += 1
return calls[0]
self.assertEqual(get_calls(), 1) # miss, stale result not cached.
self.assertEqual(get_calls(), 2) # miss and cache
self.assertEqual(get_calls(), 2) # hit
def test_cached_view_as(self):
get_calls = make_inc(cached_view_as(Category))
factory = RequestFactory()
r1 = factory.get('/hi')
r2 = factory.get('/hi')
r2.META['REMOTE_ADDR'] = '10.10.10.10'
r3 = factory.get('/bye')
self.assertEqual(get_calls(r1), 1) # cache
self.assertEqual(get_calls(r1), 1) # hit
self.assertEqual(get_calls(r2), 1) # hit, since only url is considered
self.assertEqual(get_calls(r3), 2) # miss
def test_cached_view_on_template_response(self):
from django.template.response import TemplateResponse
from django.template import engines
from_string = engines['django'].from_string
@cached_view_as(Category)
def view(request):
return TemplateResponse(request, from_string('hi'))
factory = RequestFactory()
view(factory.get('/hi'))
from datetime import date, time
from django.utils import timezone
class WeirdTests(BaseTestCase):
def _template(self, field, value):
qs = Weird.objects.cache().filter(**{field: value})
count = qs.count()
obj = Weird.objects.create(**{field: value})
with self.assertNumQueries(2):
self.assertEqual(qs.count(), count + 1)
new_obj = qs.get(pk=obj.pk)
self.assertEqual(getattr(new_obj, field), value)
def test_date(self):
self._template('date_field', date.today())
def test_datetime(self):
# NOTE: some databases (mysql) don't store microseconds
self._template('datetime_field', timezone.now().replace(microsecond=0))
def test_time(self):
self._template('time_field', time(10, 30))
def test_list(self):
self._template('list_field', [1, 2])
def test_binary(self):
obj = Weird.objects.create(binary_field=b'12345')
Weird.objects.cache().get(pk=obj.pk)
Weird.objects.cache().get(pk=obj.pk)
def test_custom(self):
self._template('custom_field', CustomValue('some'))
def test_weird_custom(self):
class WeirdCustom(CustomValue):
def __str__(self):
return 'other'
self._template('custom_field', WeirdCustom('some'))
def test_custom_query(self):
list(Weird.customs.cache())
@unittest.skipIf(connection.vendor != 'postgresql', "Only for PostgreSQL")
class PostgresTests(BaseTestCase):
def test_array_contains(self):
list(TaggedPost.objects.filter(tags__contains=[42]).cache())
def test_array_len(self):
list(TaggedPost.objects.filter(tags__len=42).cache())
def test_json(self):
list(TaggedPost.objects.filter(meta__author='Suor'))
class TemplateTests(BaseTestCase):
def assertRendersTo(self, template, context, result):
s = template.render(Context(context))
self.assertEqual(re.sub(r'\s+', '', s), result)
def test_cached(self):
inc_a = make_inc()
inc_b = make_inc()
t = Template("""
{% load cacheops %}
{% cached 60 'a' %}.a{{ a }}{% endcached %}
{% cached 60 'a' %}.a{{ a }}{% endcached %}
{% cached 60 'a' 'variant' %}.a{{ a }}{% endcached %}
{% cached timeout=60 fragment_name='b' %}.b{{ b }}{% endcached %}
""")
self.assertRendersTo(t, {'a': inc_a, 'b': inc_b}, '.a1.a1.a2.b1')
def test_invalidate_fragment(self):
inc = make_inc()
t = Template("""
{% load cacheops %}
{% cached 60 'a' %}.{{ inc }}{% endcached %}
""")
self.assertRendersTo(t, {'inc': inc}, '.1')
invalidate_fragment('a')
self.assertRendersTo(t, {'inc': inc}, '.2')
def test_cached_as(self):
inc = make_inc()
qs = Post.objects.all()
t = Template("""
{% load cacheops %}
{% cached_as qs None 'a' %}.{{ inc }}{% endcached_as %}
{% cached_as qs timeout=60 fragment_name='a' %}.{{ inc }}{% endcached_as %}
{% cached_as qs fragment_name='a' timeout=60 %}.{{ inc }}{% endcached_as %}
""")
# All the forms are equivalent
self.assertRendersTo(t, {'inc': inc, 'qs': qs}, '.1.1.1')
# Cache works across calls
self.assertRendersTo(t, {'inc': inc, 'qs': qs}, '.1.1.1')
# Post invalidation clears cache
invalidate_model(Post)
self.assertRendersTo(t, {'inc': inc, 'qs': qs}, '.2.2.2')
def test_decorator_tag(self):
@decorator_tag
def my_cached(flag):
return cached(timeout=60) if flag else lambda x: x
inc = make_inc()
t = Template("""
{% load cacheops %}
{% my_cached 1 %}.{{ inc }}{% endmy_cached %}
{% my_cached 0 %}.{{ inc }}{% endmy_cached %}
{% my_cached 0 %}.{{ inc }}{% endmy_cached %}
{% my_cached 1 %}.{{ inc }}{% endmy_cached %}
""")
self.assertRendersTo(t, {'inc': inc}, '.1.2.3.1')
def test_decorator_tag_context(self):
@decorator_tag(takes_context=True)
def my_cached(context):
return cached(timeout=60) if context['flag'] else lambda x: x
inc = make_inc()
t = Template("""
{% load cacheops %}
{% my_cached %}.{{ inc }}{% endmy_cached %}
{% my_cached %}.{{ inc }}{% endmy_cached %}
""")
self.assertRendersTo(t, {'inc': inc, 'flag': True}, '.1.1')
self.assertRendersTo(t, {'inc': inc, 'flag': False}, '.2.3')
def test_jinja2(self):
from jinja2 import Environment
env = Environment(extensions=['cacheops.jinja2.cache'])
t = env.from_string('Hello, {% cached %}{{ name }}{% endcached %}')
t.render(name='Alex')
class IssueTests(BaseTestCase):
databases = ('default', 'slave')
fixtures = ['basic']
def setUp(self):
self.user = User.objects.create(pk=1, username='Suor')
Profile.objects.create(pk=2, user=self.user, tag=10)
super(IssueTests, self).setUp()
def test_16(self):
p = Profile.objects.cache().get(user__id__exact=1)
p.save()
with self.assertNumQueries(1):
Profile.objects.cache().get(user=1)
def test_29(self):
Brand.objects.exclude(labels__in=[1, 2, 3]).cache().count()
def test_45(self):
m = CacheOnSaveModel(title="test")
m.save()
with self.assertNumQueries(0):
CacheOnSaveModel.objects.cache().get(pk=m.pk)
def test_57(self):
list(Post.objects.filter(category__in=Category.objects.nocache()).cache())
def test_114(self):
list(Category.objects.cache().filter(title=u'ó'))
def test_145(self):
# Create One with boolean False
one = One.objects.create(boolean=False)
# Update boolean to True
one = One.objects.cache().get(id=one.id)
one.boolean = True
one.save() # An error was in post_save signal handler
def test_159(self):
brand = Brand.objects.create(pk=1)
label = Label.objects.create(pk=2)
brand.labels.add(label)
# Create another brand with the same pk as label.
# This will trigger a bug invalidating brands querying them by label id.
another_brand = Brand.objects.create(pk=2)
list(brand.labels.cache())
list(another_brand.labels.cache())
# Clear brands for label linked to brand, but not another_brand.
label.brands.clear()
# Cache must stay for another_brand
with self.assertNumQueries(0):
list(another_brand.labels.cache())
def test_161(self):
categories = Category.objects.using('slave').filter(title='Python')
list(Post.objects.using('slave').filter(category__in=categories).cache())
@unittest.skipIf(connection.vendor == 'mysql', 'MySQL fails with encodings')
def test_161_non_ascii(self):
# Non ascii text in non-unicode str literal
list(Category.objects.filter(title='фыва').cache())
list(Category.objects.filter(title='фыва', title__startswith='фыва').cache())
def test_169(self):
c = Category.objects.prefetch_related('posts').get(pk=3)
c.posts.get(visible=1) # this used to fail
def test_173(self):
extra = Extra.objects.get(pk=1)
title = extra.post.category.title
# Cache
list(Extra.objects.filter(post__category__title=title).cache())
# Break the link
extra.post.category_id = 2
extra.post.save()
# Fail because neither Extra nor Catehory changed, but something in between
self.assertEqual([], list(Extra.objects.filter(post__category__title=title).cache()))
def test_177(self):
c = Category.objects.get(pk=1)
c.posts_copy = c.posts.cache()
bool(c.posts_copy)
def test_217(self):
# Destroy and recreate model manager
Post.objects.__class__().contribute_to_class(Post, 'objects')
# Test invalidation
post = Post.objects.cache().get(pk=1)
post.title += ' changed'
post.save()
with self.assertNumQueries(1):
changed_post = Post.objects.cache().get(pk=1)
self.assertEqual(post.title, changed_post.title)
def test_232(self):
list(Post.objects.cache().filter(category__in=[None, 1]).filter(category=1))
@unittest.skipIf(connection.vendor == 'mysql', 'In MySQL DDL is not transaction safe')
def test_265(self):
# Databases must have different structure,
# so exception other then DoesNotExist would be raised.
# Let's delete tests_video from default database
# and try working with it in slave database with using.
# Table is not restored automatically in MySQL, so I disabled this test in MySQL.
connection.cursor().execute("DROP TABLE tests_video;")
# Works fine
c = Video.objects.db_manager('slave').create(title='test_265')
self.assertTrue(Video.objects.using('slave').filter(title='test_265').exists())
# Fails with "no such table: tests_video"
# Fixed by adding .using(instance._state.db) in query.ManagerMixin._pre_save() method
c.title = 'test_265_1'
c.save()
self.assertTrue(Video.objects.using('slave').filter(title='test_265_1').exists())
# This also didn't work before fix above. Test that it works.
c.title = 'test_265_2'
c.save(using='slave')
self.assertTrue(Video.objects.using('slave').filter(title='test_265_2').exists())
# Same bug in other method
# Fixed by adding .using(self._db) in query.QuerySetMixin.invalidated_update() method
Video.objects.using('slave').invalidated_update(title='test_265_3')
self.assertTrue(Video.objects.using('slave').filter(title='test_265_3').exists())
@unittest.skipIf(django.VERSION < (3, 0), "Fixed in Django 3.0")
def test_312(self):
device = Device.objects.create()
# query by 32bytes uuid
d = Device.objects.cache().get(uid=device.uid.hex)
# test invalidation
d.model = 'new model'
d.save()
with self.assertNumQueries(1):
changed_device = Device.objects.cache().get(uid=device.uid.hex)
self.assertEqual(d.model, changed_device.model)
def test_316(self):
Category.objects.cache().annotate(num=Count('posts')).aggregate(total=Sum('num'))
@unittest.expectedFailure
def test_348(self):
foo = Foo.objects.create()
bar = Bar.objects.create(foo=foo)
bar = Bar.objects.cache().get(pk=bar.pk)
bar.foo.delete()
bar = Bar.objects.cache().get(pk=bar.pk)
bar.foo # fails here since we try to fetch Foo instance by cached id
def test_352(self):
CombinedFieldModel.objects.create()
list(CombinedFieldModel.objects.cache().all())
def test_353(self):
foo = Foo.objects.create()
bar = Bar.objects.create()
self.assertEqual(Foo.objects.cache().filter(bar__isnull=True).count(), 1)
bar.foo = foo
bar.save()
self.assertEqual(Foo.objects.cache().filter(bar__isnull=True).count(), 0)
@unittest.skipIf(django.VERSION < (3, 0), "Supported from Django 3.0")
def test_359(self):
post_filter = Exists(Post.objects.all())
len(Category.objects.filter(post_filter).cache())
def test_365(self):
"""
Check that an annotated Subquery is automatically invalidated.
"""
# Retrieve all Categories and annotate the ID of the most recent Post for each
newest_post = Post.objects.filter(category=OuterRef('pk')).order_by('-pk').values('pk')
categories = Category.objects.cache().annotate(newest_post=Subquery(newest_post[:1]))
# Create a new Post in the first Category
post = Post(category=categories[0], title='Foo')
post.save()
# Retrieve Categories again, and check that the newest post ID is correct
categories = Category.objects.cache().annotate(newest_post=Subquery(newest_post[:1]))
self.assertEqual(categories[0].newest_post, post.pk)
@unittest.skipIf(platform.python_implementation() == "PyPy", "dill doesn't do that in PyPy")
def test_385(self):
Client.objects.create(name='Client Name')
with self.assertRaisesRegex(AttributeError, "local object"):
Client.objects.filter(name='Client Name').cache().first()
invalidate_model(Client)
with override_settings(CACHEOPS_SERIALIZER='dill'):
with self.assertNumQueries(1):
Client.objects.filter(name='Client Name').cache().first()
Client.objects.filter(name='Client Name').cache().first()
def test_387(self):
post = Post.objects.defer("visible").last()
post.delete()
def test_407(self):
brand = Brand.objects.create(pk=1)
brand.labels.set([Label.objects.create()])
assert len(Label.objects.filter(brands=1).cache()) == 1 # Cache it
brand.delete() # Invalidation expected after deletion
with self.assertNumQueries(1):
self.assertEqual(len(Label.objects.filter(brands=1).cache()), 0)
def test_407_reverse(self):
brand = Brand.objects.create(pk=1)
label = Label.objects.create(pk=1)
brand.labels.set([label])
assert len(Brand.objects.filter(labels=1).cache()) == 1 # Cache it
label.delete() # Invalidation expected after deletion
with self.assertNumQueries(1):
self.assertEqual(len(Brand.objects.filter(labels=1).cache()), 0)
@mock.patch('cacheops.query.invalidate_dict')
def test_430(self, mock_invalidate_dict):
media_type = MediaType.objects.create(
name="some type"
)
movie = Movie.objects.create(
year=2022,
media_type=media_type,
)
Scene.objects.create(
name="first scene",
movie=movie,
)
invalidate_m2o(Movie, movie)
obj_movie_dict = mock_invalidate_dict.call_args[0][1]
self.assertFalse(isinstance(obj_movie_dict['movie_id'], Media))
self.assertTrue(isinstance(obj_movie_dict['movie_id'], int))
def test_430_no_error_raises(self):
media_type = MediaType.objects.create(
name="some type"
)
movie = Movie.objects.create(
year=2022,
media_type=media_type,
)
Scene.objects.create(
name="first scene",
movie=movie,
)
# no error raises on delete
media_type.delete()
def test_480(self):
orm_lookups = ['title__icontains', 'category__title__icontains']
search_terms = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
queryset = Post.objects.filter(visible=True)
conditions = []
for search_term in search_terms:
queries = [
models.Q(**{orm_lookup: search_term})
for orm_lookup in orm_lookups
]
conditions.append(reduce(operator.or_, queries))
list(queryset.filter(reduce(operator.and_, conditions)).cache())
@unittest.skipIf(connection.vendor != 'postgresql', "Only for PostgreSQL")
def test_489(self):
TaggedPost.objects.cache().filter(tags=[]).count()
class RelatedTests(BaseTestCase):
fixtures = ['basic']
def _template(self, qs, change, should_invalidate=True):
list(qs._clone().cache())
change()
with self.assertNumQueries(1 if should_invalidate else 0):
list(qs.cache())
def test_related_invalidation(self):
self._template(
Post.objects.filter(category__title='Django'),
lambda: Category.objects.get(title='Django').save()
)
def test_reverse_fk(self):
self._template(
Category.objects.filter(posts__title='Cacheops'),
lambda: Post.objects.get(title='Cacheops').save()
)
def test_reverse_fk_same(self):
title = "Implicit variable as pronoun"
self._template(
Category.objects.filter(posts__title=title, posts__visible=True),
lambda: Post.objects.get(title=title, visible=True).save()
)
self._template(
Category.objects.filter(posts__title=title, posts__visible=False),
lambda: Post.objects.get(title=title, visible=True).save(),
should_invalidate=False,
)
def test_reverse_fk_separate(self):
title = "Implicit variable as pronoun"
self._template(
Category.objects.filter(posts__title=title).filter(posts__visible=True),
lambda: Post.objects.get(title=title, visible=True).save()
)
self._template(
Category.objects.filter(posts__title=title).filter(posts__visible=False),
lambda: Post.objects.get(title=title, visible=True).save(),
)
class AggregationTests(BaseTestCase):
fixtures = ['basic']
def test_annotate(self):
qs = Category.objects.annotate(posts_count=Count('posts')).cache()
list(qs._clone())
Post.objects.create(title='New One', category=Category.objects.all()[0])
with self.assertNumQueries(1):
list(qs._clone())
def test_aggregate(self):
qs = Category.objects.cache()
qs.aggregate(posts_count=Count('posts'))
# Test caching
with self.assertNumQueries(0):
qs.aggregate(posts_count=Count('posts'))
# Test invalidation
Post.objects.create(title='New One', category=Category.objects.all()[0])
with self.assertNumQueries(1):
qs.aggregate(posts_count=Count('posts'))
def test_aggregate_granular(self):
cat1, cat2 = Category.objects.all()[:2]
qs = Category.objects.cache().filter(id=cat1.id)
qs.aggregate(posts_count=Count('posts'))
# Test invalidation
Post.objects.create(title='New One', category=cat2)
with self.assertNumQueries(0):
qs.aggregate(posts_count=Count('posts'))
def test_new_alias(self):
qs = Post.objects.cache()
assert qs.aggregate(max=Max('category')) == {'max': 3}
assert qs.aggregate(cat=Max('category')) == {'cat': 3}
def test_filter(self):
qs = Post.objects.cache()
assert qs.aggregate(cnt=Count('category', filter=Q(category__gt=1))) == {'cnt': 2}
assert qs.aggregate(cnt=Count('category', filter=Q(category__lt=3))) == {'cnt': 1}
class M2MTests(BaseTestCase):
brand_cls = Brand
label_cls = Label
def setUp(self):
self.bf = self.brand_cls.objects.create()
self.bs = self.brand_cls.objects.create()
self.fast = self.label_cls.objects.create(text='fast')
self.slow = self.label_cls.objects.create(text='slow')
self.furious = self.label_cls.objects.create(text='furios')
self.setup_m2m()
super(M2MTests, self).setUp()
def setup_m2m(self):
self.bf.labels.add(self.fast, self.furious)
self.bs.labels.add(self.slow, self.furious)
def _template(self, qs_or_action, change, should_invalidate=True):
if hasattr(qs_or_action, 'all'):
action = lambda: list(qs_or_action.all().cache())
else:
action = qs_or_action
action()
change()
with self.assertNumQueries(1 if should_invalidate else 0):
action()
def test_target_invalidates_on_clear(self):
self._template(
self.bf.labels,
lambda: self.bf.labels.clear()
)
def test_base_invalidates_on_clear(self):
self._template(
self.furious.brands,
lambda: self.bf.labels.clear()
)
def test_granular_through_on_clear(self):
through_qs = self.brand_cls.labels.through.objects.cache() \
.filter(brand=self.bs, label=self.slow)
through_qs.get()
self.bf.labels.clear()
with self.assertNumQueries(0):
through_qs.get()
def test_granular_target_on_clear(self):
self._template(
lambda: self.label_cls.objects.cache().get(pk=self.slow.pk),
lambda: self.bf.labels.clear(),
should_invalidate=False
)
def test_target_invalidates_on_add(self):
self._template(
self.bf.labels,
lambda: self.bf.labels.add(self.slow)
)
def test_base_invalidates_on_add(self):
self._template(
self.slow.brands,
lambda: self.bf.labels.add(self.slow)
)
def test_target_invalidates_on_remove(self):
self._template(
self.bf.labels,
lambda: self.bf.labels.remove(self.furious)
)
def test_base_invalidates_on_remove(self):
self._template(
self.furious.brands,
lambda: self.bf.labels.remove(self.furious)
)
class MultiTableInheritanceWithM2MTest(M2MTests):
brand_cls = PremiumBrand
class M2MThroughTests(M2MTests):
brand_cls = BrandT
label_cls = LabelT
def setup_m2m(self):
Labeling.objects.create(brand=self.bf, label=self.fast, tag=10)
Labeling.objects.create(brand=self.bf, label=self.furious, tag=11)
Labeling.objects.create(brand=self.bs, label=self.slow, tag=20)
Labeling.objects.create(brand=self.bs, label=self.furious, tag=21)
# No add and remove methods for explicit through models
test_target_invalidates_on_add = None
test_base_invalidates_on_add = None
test_target_invalidates_on_remove = None
test_base_invalidates_on_remove = None
def test_target_invalidates_on_create(self):
self._template(
self.bf.labels,
lambda: Labeling.objects.create(brand=self.bf, label=self.slow, tag=1)
)
def test_base_invalidates_on_create(self):
self._template(
self.slow.brands,
lambda: Labeling.objects.create(brand=self.bf, label=self.slow, tag=1)
)
def test_target_invalidates_on_delete(self):
self._template(
self.bf.labels,
lambda: Labeling.objects.get(brand=self.bf, label=self.furious).delete()
)
def test_base_invalidates_on_delete(self):
self._template(
self.furious.brands,
# lambda: Labeling.objects.filter(brand=self.bf, label=self.furious).delete()
lambda: Labeling.objects.get(brand=self.bf, label=self.furious).delete()
)
class ProxyTests(BaseTestCase):
def test_30(self):
list(VideoProxy.objects.cache())
Video.objects.create(title='Pulp Fiction')
with self.assertNumQueries(1):
list(VideoProxy.objects.cache())
def test_30_reversed(self):
list(Video.objects.cache())
VideoProxy.objects.create(title='Pulp Fiction')
with self.assertNumQueries(1):
list(Video.objects.cache())
@unittest.expectedFailure
def test_interchange(self):
list(Video.objects.cache())
with self.assertNumQueries(0):
list(VideoProxy.objects.cache())
def test_148_invalidate_from_non_cached_proxy(self):
video = Video.objects.create(title='Pulp Fiction')
Video.objects.cache().get(title=video.title)
NonCachedVideoProxy.objects.get(id=video.id).delete()
with self.assertRaises(Video.DoesNotExist):
Video.objects.cache().get(title=video.title)
def test_148_reverse(self):
media = NonCachedMedia.objects.create(title='Pulp Fiction')
MediaProxy.objects.cache().get(title=media.title)
NonCachedMedia.objects.get(id=media.id).delete()
with self.assertRaises(NonCachedMedia.DoesNotExist):
MediaProxy.objects.cache().get(title=media.title)
def test_siblings(self):
list(VideoProxy.objects.cache())
NonCachedVideoProxy.objects.create(title='Pulp Fiction')
with self.assertNumQueries(1):
list(VideoProxy.objects.cache())
def test_proxy_caching(self):
video = Video.objects.create(title='Pulp Fiction')
self.assertEqual(type(Video.objects.cache().get(pk=video.pk)),
Video)
self.assertEqual(type(VideoProxy.objects.cache().get(pk=video.pk)),
VideoProxy)
def test_proxy_caching_reversed(self):
video = Video.objects.create(title='Pulp Fiction')
self.assertEqual(type(VideoProxy.objects.cache().get(pk=video.pk)),
VideoProxy)
self.assertEqual(type(Video.objects.cache().get(pk=video.pk)),
Video)
class MultitableInheritanceTests(BaseTestCase):
@unittest.expectedFailure
def test_sub_added(self):
media_count = Media.objects.cache().count()
Movie.objects.create(name="Matrix", year=1999)
with self.assertNumQueries(1):
self.assertEqual(Media.objects.cache().count(), media_count + 1)
@unittest.expectedFailure
def test_base_changed(self):
matrix = Movie.objects.create(name="Matrix", year=1999)
list(Movie.objects.cache())
media = Media.objects.get(pk=matrix.pk)
media.name = "Matrix (original)"
media.save()
with self.assertNumQueries(1):
list(Movie.objects.cache())
class SimpleCacheTests(BaseTestCase):
def test_cached(self):
get_calls = make_inc(cached(timeout=100))
self.assertEqual(get_calls(1), 1)
self.assertEqual(get_calls(1), 1)
self.assertEqual(get_calls(2), 2)
get_calls.invalidate(2)
self.assertEqual(get_calls(2), 3)
get_calls.key(2).delete()
self.assertEqual(get_calls(2), 4)
get_calls.key(2).set(42)
self.assertEqual(get_calls(2), 42)
def test_cached_view(self):
get_calls = make_inc(cached_view(timeout=100))
factory = RequestFactory()
r1 = factory.get('/hi')
r2 = factory.get('/hi')
r2.META['REMOTE_ADDR'] = '10.10.10.10'
r3 = factory.get('/bye')
self.assertEqual(get_calls(r1), 1) # cache
self.assertEqual(get_calls(r1), 1) # hit
self.assertEqual(get_calls(r2), 1) # hit, since only url is considered
self.assertEqual(get_calls(r3), 2) # miss
get_calls.invalidate(r1)
self.assertEqual(get_calls(r1), 3) # miss
# Can pass uri to invalidate
get_calls.invalidate(r1.build_absolute_uri())
self.assertEqual(get_calls(r1), 4) # miss
@unittest.skipIf(connection.settings_dict['ENGINE'] != 'django.contrib.gis.db.backends.postgis',
"Only for PostGIS")
class GISTests(BaseTestCase):
def test_invalidate_model_with_geometry(self):
geom = Geometry()
geom.save()
# Raises ValueError if this doesn't work
invalidate_obj(geom)
# NOTE: overriding cache prefix to separate invalidation sets by db.
@override_settings(CACHEOPS_PREFIX=lambda q: q.db)
class MultiDBInvalidationTests(BaseTestCase):
databases = ('default', 'slave')
fixtures = ['basic']
@contextmanager
def _control_counts(self):
Category.objects.cache().count()
Category.objects.using('slave').cache().count()
yield
with self.assertNumQueries(0):
Category.objects.cache().count()
with self.assertNumQueries(1, using='slave'):
Category.objects.cache().using('slave').count()
def test_save(self):
# NOTE: not testing when old db != new db,
# how cacheops works in that situation is undefined at the moment
with self._control_counts():
obj = Category()
obj.save(using='slave')
def test_delete(self):
obj = Category.objects.using('slave').create()
with self._control_counts():
obj.delete(using='slave')
def test_bulk_create(self):
with self._control_counts():
Category.objects.using('slave').bulk_create([Category(title='New')])
def test_invalidated_update(self):
# NOTE: not testing router-based routing
with self._control_counts():
Category.objects.using('slave').invalidated_update(title='update')
@mock.patch('cacheops.invalidation.invalidate_dict')
def test_m2m_changed_call_invalidate(self, mock_invalidate_dict):
label = Label.objects.create()
brand = Brand.objects.create()
brand.labels.add(label)
mock_invalidate_dict.assert_called_with(mock.ANY, mock.ANY, using=DEFAULT_DB_ALIAS)
label = Label.objects.using('slave').create()
brand = Brand.objects.using('slave').create()
brand.labels.add(label)
mock_invalidate_dict.assert_called_with(mock.ANY, mock.ANY, using='slave')
django-cacheops-7.2/tests/tests_sharding.py 0000664 0000000 0000000 00000003522 15001074010 0021132 0 ustar 00root root 0000000 0000000 from django.core.exceptions import ImproperlyConfigured
from django.test import override_settings
from cacheops import cache, CacheMiss
from .models import Category, Post, Extra
from .utils import BaseTestCase
class PrefixTests(BaseTestCase):
databases = ('default', 'slave')
fixtures = ['basic']
def test_context(self):
prefix = ['']
with override_settings(CACHEOPS_PREFIX=lambda _: prefix[0]):
with self.assertNumQueries(2):
Category.objects.cache().count()
prefix[0] = 'x'
Category.objects.cache().count()
@override_settings(CACHEOPS_PREFIX=lambda q: q.db)
def test_db(self):
with self.assertNumQueries(1):
list(Category.objects.cache())
with self.assertNumQueries(1, using='slave'):
list(Category.objects.cache().using('slave'))
list(Category.objects.cache().using('slave'))
@override_settings(CACHEOPS_PREFIX=lambda q: q.table)
def test_table(self):
self.assertTrue(Category.objects.all()._cache_key().startswith('tests_category'))
with self.assertRaises(ImproperlyConfigured):
list(Post.objects.filter(category__title='Django').cache())
@override_settings(CACHEOPS_PREFIX=lambda q: q.table)
def test_self_join_tables(self):
list(Extra.objects.filter(to_tag__pk=1).cache())
@override_settings(CACHEOPS_PREFIX=lambda q: q.table)
def test_union_tables(self):
qs = Post.objects.filter(pk=1).union(Post.objects.filter(pk=2)).cache()
list(qs)
class SimpleCacheTests(BaseTestCase):
def test_prefix(self):
with override_settings(CACHEOPS_PREFIX=lambda _: 'a'):
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
with self.assertRaises(CacheMiss):
cache.get("key")
django-cacheops-7.2/tests/tests_transactions.py 0000664 0000000 0000000 00000010711 15001074010 0022041 0 ustar 00root root 0000000 0000000 from django.db import connection, IntegrityError
from django.db.transaction import atomic
from django.test import TransactionTestCase
from cacheops.transaction import queue_when_in_transaction
from .models import Category, Post
from .utils import run_in_thread
def get_category():
return Category.objects.cache().get(pk=1)
class IntentionalRollback(Exception):
pass
class TransactionSupportTests(TransactionTestCase):
databases = ('default', 'slave')
fixtures = ['basic']
def test_atomic(self):
with atomic():
obj = get_category()
obj.title = 'Changed'
obj.save()
self.assertEqual('Changed', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
self.assertEqual('Changed', run_in_thread(get_category).title)
self.assertEqual('Changed', get_category().title)
def test_nested(self):
with atomic():
with atomic():
obj = get_category()
obj.title = 'Changed'
obj.save()
self.assertEqual('Changed', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
self.assertEqual('Changed', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
self.assertEqual('Changed', run_in_thread(get_category).title)
self.assertEqual('Changed', get_category().title)
def test_rollback(self):
try:
with atomic():
obj = get_category()
obj.title = 'Changed'
obj.save()
self.assertEqual('Changed', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
raise IntentionalRollback()
except IntentionalRollback:
pass
self.assertEqual('Django', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
def test_nested_rollback(self):
with atomic():
try:
with atomic():
obj = get_category()
obj.title = 'Changed'
obj.save()
self.assertEqual('Changed', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
raise IntentionalRollback()
except IntentionalRollback:
pass
self.assertEqual('Django', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
self.assertEqual('Django', get_category().title)
self.assertEqual('Django', run_in_thread(get_category).title)
def test_smart_transactions(self):
with atomic():
get_category()
with self.assertNumQueries(0):
get_category()
with atomic():
with self.assertNumQueries(0):
get_category()
obj = get_category()
obj.title += ' changed'
obj.save()
get_category()
with self.assertNumQueries(1):
get_category()
def test_rollback_during_integrity_error(self):
# store category in cache
get_category()
# Make current DB be "dirty" by write
with self.assertRaises(IntegrityError):
with atomic():
Post.objects.create(category_id=-1, title='')
# however, this write should be rolled back and current DB should
# not be "dirty"
with self.assertNumQueries(0):
get_category()
def test_call_cacheops_cbs_before_on_commit_cbs(self):
calls = []
with atomic():
def django_commit_handler():
calls.append('django')
connection.on_commit(django_commit_handler)
@queue_when_in_transaction
def cacheops_commit_handler(using):
calls.append('cacheops')
cacheops_commit_handler('default')
self.assertEqual(calls, ['cacheops', 'django'])
def test_multidb(self):
try:
with atomic('slave'):
with atomic():
obj = get_category()
obj.title = 'Changed'
obj.save()
raise IntentionalRollback()
except IntentionalRollback:
pass
self.assertEqual('Changed', get_category().title)
django-cacheops-7.2/tests/urls.py 0000664 0000000 0000000 00000000021 15001074010 0017065 0 ustar 00root root 0000000 0000000 urlpatterns = []
django-cacheops-7.2/tests/utils.py 0000664 0000000 0000000 00000003567 15001074010 0017262 0 ustar 00root root 0000000 0000000 from django.test import TestCase
from cacheops import invalidate_all
from cacheops.transaction import transaction_states
class BaseTestCase(TestCase):
def setUp(self):
# Emulate not being in transaction by tricking system to ignore its pretest level.
# TestCase wraps each test into 1 or 2 transaction(s) altering cacheops behavior.
# The alternative is using TransactionTestCase, which is 10x slow.
from funcy import empty
transaction_states._states, self._states \
= empty(transaction_states._states), transaction_states._states
invalidate_all()
def tearDown(self):
transaction_states._states = self._states
def make_inc(deco=lambda x: x):
calls = [0]
@deco
def inc(_=None, **kw):
calls[0] += 1
return calls[0]
inc.get = lambda: calls[0]
return inc
# Thread utilities
from threading import Thread
class ThreadWithReturnValue(Thread):
def __init__(self, *args, **kwargs):
super(ThreadWithReturnValue, self).__init__(*args, **kwargs)
self._return = None
self._exc = None
def run(self):
try:
self._return = self._target(*self._args, **self._kwargs)
except Exception as e:
self._exc = e
finally:
# Django does not drop postgres connections opened in new threads.
# This leads to postgres complaining about db accessed when we try to destroy it.
# See https://code.djangoproject.com/ticket/22420#comment:18
from django.db import connection
connection.close()
def join(self, *args, **kwargs):
super(ThreadWithReturnValue, self).join(*args, **kwargs)
if self._exc:
raise self._exc
return self._return
def run_in_thread(target):
t = ThreadWithReturnValue(target=target)
t.start()
return t.join()
django-cacheops-7.2/tox.ini 0000664 0000000 0000000 00000002723 15001074010 0015712 0 ustar 00root root 0000000 0000000 [tox]
envlist =
lint,
py{38,39}-dj{32,40},
py310-dj{32,40,41},
py311-dj{41,42,50},
py312-dj{42,50,51,52},
py313-dj{50,51,52},
pypy310-dj40
[gh-actions]
python =
3.8: py38
3.9: py39
3.10: py310
3.11: py311
3.12: py312
3.13: py313
pypy-3.10: pypy310
[testenv]
passenv = *
allowlist_externals = *
# This is required for gdal to install
setenv =
CPLUS_INCLUDE_PATH=/usr/include/gdal
C_INCLUDE_PATH=/usr/include/gdal
deps =
pytest==8.3.5
pytest-django==4.11.1
dj32: Django>=3.2,<3.3
dj40: Django>=4.0,<4.1
dj41: Django>=4.1,<4.2
dj42: Django>=4.2.8,<5.0
dj50: Django>=5.0,<5.1
dj51: Django>=5.1,<5.2
dj52: Django>=5.2,<5.3
djmain: git+https://github.com/django/django
mysqlclient
py{38,39,310,311,312,313}: psycopg2-binary
; gdal=={env:GDAL_VERSION:2.4}
pypy310: psycopg2cffi>=2.7.6
before_after==1.0.0
jinja2>=2.10
dill
commands =
pytest []
env CACHEOPS_PREFIX=1 pytest []
env CACHEOPS_INSIDEOUT=1 pytest []
env CACHEOPS_DB=mysql pytest []
env CACHEOPS_DB=postgresql pytest []
; env CACHEOPS_DB=postgis pytest []
; Test invalidate command
./manage.py invalidate tests.post
./manage.py invalidate tests
./manage.py invalidate all
[flake8]
max-line-length = 100
ignore = E126,E127,E131,E226,E261,E265,E266,E302,E305,E401,E402,F403,F405,E731,W503
exclude = cross.py,.tox/*
[testenv:lint]
deps = flake8
commands = flake8