Advanced Usage¶
Pickle Version¶
By default, django-cachex uses pickle.DEFAULT_PROTOCOL. To set a specific version:
CACHES = {
"default": {
"BACKEND": "django_cachex.cache.ValkeyCache",
"LOCATION": "valkey://127.0.0.1:6379/1",
"OPTIONS": {
"PICKLE_VERSION": -1 # Highest protocol available
}
}
}
TTL Operations¶
Get TTL¶
from django.core.cache import cache
cache.set("foo", "value", timeout=25)
cache.ttl("foo") # Returns 25
cache.ttl("missing") # Returns 0 (key doesn't exist)
Returns:
0- Key doesn't exist or already expiredNone- Key exists but has no expirationint- Seconds until expiration
Get TTL in Milliseconds¶
Expire & Persist¶
Set Expiration¶
Set Expiration in Milliseconds¶
cache.set("foo", "bar", timeout=22)
cache.pexpire("foo", timeout=5500)
cache.pttl("foo") # Returns 5500
Expire at Specific Time¶
from datetime import datetime, timedelta
cache.set("foo", "bar", timeout=22)
cache.expire_at("foo", datetime.now() + timedelta(hours=1))
cache.ttl("foo") # Returns ~3600
Expire at Specific Time (milliseconds precision)¶
cache.set("foo", "bar", timeout=22)
cache.pexpire_at("foo", datetime.now() + timedelta(milliseconds=900, hours=1))
cache.pttl("foo") # Returns ~3600900
Remove Expiration¶
cache.set("foo", "bar", timeout=22)
cache.persist("foo")
cache.ttl("foo") # Returns None (no expiration)
Locks¶
Distributed locks with the same interface as threading.Lock:
Bulk Operations¶
Search Keys¶
from django.core.cache import cache
# Get all matching keys (not recommended for large datasets)
cache.keys("foo_*") # Returns ["foo_1", "foo_2"]
Iterate Keys (Recommended)¶
For large datasets, use server-side cursors:
Delete by Pattern¶
For better performance with many keys:
Atomic Operations¶
SETNX (Set if Not Exists)¶
cache.set("key", "value1", nx=True) # Returns True
cache.set("key", "value2", nx=True) # Returns False
cache.get("key") # Returns "value1"
Increment/Decrement¶
cache.set("counter", 0)
cache.incr("counter") # Returns 1
cache.incr("counter", delta=5) # Returns 6
cache.decr("counter") # Returns 5
Data Structures¶
django-cachex provides direct access to Valkey/Redis data structures through the cache interface.
Hashes¶
Hashes are maps of field-value pairs, useful for storing objects:
from django.core.cache import cache
# Set a single field
cache.hset("user:1", "name", "Alice")
# Set multiple fields at once
cache.hmset("user:1", {"email": "alice@example.com", "age": 30})
# Get a single field
name = cache.hget("user:1", "name") # "Alice"
# Get multiple fields
values = cache.hmget("user:1", "name", "email") # ["Alice", "alice@example.com"]
# Get all fields and values
user = cache.hgetall("user:1") # {"name": "Alice", "email": "...", "age": 30}
# Increment a numeric field
cache.hincrby("user:1", "age", 1) # 31
cache.hincrbyfloat("user:1", "score", 0.5) # For floating point
# Check if field exists
cache.hexists("user:1", "name") # True
# Delete fields
cache.hdel("user:1", "age")
# Get count of fields
cache.hlen("user:1") # 2
# Get all values
cache.hvals("user:1") # ["Alice", "alice@example.com"]
Sorted Sets¶
Sorted sets store unique members with scores, automatically sorted by score:
from django.core.cache import cache
# Add members with scores
cache.zadd("leaderboard", {"alice": 100, "bob": 85, "charlie": 92})
# Get rank (0-indexed, ascending by score)
cache.zrank("leaderboard", "alice") # 2 (highest score = last)
cache.zrevrank("leaderboard", "alice") # 0 (highest score = first)
# Get score
cache.zscore("leaderboard", "bob") # 85.0
# Get multiple scores
cache.zmscore("leaderboard", "alice", "bob") # [100.0, 85.0]
# Increment score
cache.zincrby("leaderboard", 10, "bob") # 95.0
# Get range by rank (ascending)
cache.zrange("leaderboard", 0, -1) # All members sorted by score
# Get range by rank with scores
cache.zrange("leaderboard", 0, -1, withscores=True)
# Get range by score
cache.zrangebyscore("leaderboard", 80, 100)
# Count members in score range
cache.zcount("leaderboard", 80, 100) # 3
# Remove members
cache.zrem("leaderboard", "charlie")
# Remove by rank range
cache.zremrangebyrank("leaderboard", 0, 1) # Remove lowest 2
# Get total count
cache.zcard("leaderboard")
Lists¶
Lists are ordered collections of elements:
from django.core.cache import cache
# Push elements
cache.lpush("queue", "first") # Prepend (left)
cache.rpush("queue", "last") # Append (right)
# Pop elements
cache.lpop("queue") # Remove and return first
cache.rpop("queue") # Remove and return last
# Get element by index
cache.lindex("queue", 0) # First element
# Get range of elements
cache.lrange("queue", 0, -1) # All elements
# Set element at index
cache.lset("queue", 0, "new_first")
# Trim to range
cache.ltrim("queue", 0, 99) # Keep first 100 elements
# Get length
cache.llen("queue")
# Find element position
cache.lpos("queue", "target") # Returns index or None
# Move element between lists atomically
cache.lmove("source", "dest", "LEFT", "RIGHT") # LPOP source, RPUSH dest
Raw Client Access¶
Access the underlying valkey-py/redis-py client:
Lua Scripts¶
django-cachex provides a high-level interface for Lua scripts with automatic key prefixing and value encoding/decoding.
Registering Scripts¶
Register scripts with cache.register_script():
from django.core.cache import cache
from django_cachex import keys_only_pre
# Simple script - only needs key prefixing
cache.register_script(
"rate_limit",
"""
local current = redis.call('INCR', KEYS[1])
if current == 1 then
redis.call('EXPIRE', KEYS[1], ARGV[1])
end
return current
""",
num_keys=1,
pre_func=keys_only_pre,
)
Executing Scripts¶
Execute registered scripts with cache.eval_script():
# Execute the rate limiter
count = cache.eval_script("rate_limit", keys=["user:123:requests"], args=[60])
if count > 100:
raise RateLimitExceeded()
Pre/Post Processing Hooks¶
Scripts support hooks for transforming inputs and outputs:
pre_func: Transform keys and args before executionpost_func: Transform the result after execution
Built-in Helpers¶
from django_cachex import (
keys_only_pre, # Prefix keys, leave args unchanged
full_encode_pre, # Prefix keys AND encode args (serialize values)
decode_single_post, # Decode a single returned value
decode_list_post, # Decode a list of returned values
noop_post, # Return result unchanged
)
Example with Encoding¶
from django_cachex import full_encode_pre, decode_single_post
cache.register_script(
"get_and_set",
"""
local old = redis.call('GET', KEYS[1])
redis.call('SET', KEYS[1], ARGV[1])
return old
""",
pre_func=full_encode_pre, # Encode the new value
post_func=decode_single_post, # Decode the old value
)
# Works with any serializable Python object
old_session = cache.eval_script(
"get_and_set",
keys=["session:abc"],
args=[{"user_id": 123, "permissions": ["read", "write"]}],
)
Custom Processing Hooks¶
Create custom hooks using ScriptHelpers:
from django_cachex import ScriptHelpers
def my_pre(helpers: ScriptHelpers, keys, args):
# First arg is a secondary key, rest are values
processed_args = [helpers.make_key(args[0], helpers.version)]
processed_args.extend(helpers.encode_values(args[1:]))
return helpers.make_keys(keys), processed_args
def my_post(helpers: ScriptHelpers, result):
# Result is [count, list_of_values]
return {
"count": result[0],
"values": helpers.decode_values(result[1]) if result[1] else [],
}
cache.register_script("custom_op", "...", pre_func=my_pre, post_func=my_post)
Pipeline Support¶
Scripts can be queued in pipelines:
with cache.pipeline() as pipe:
pipe.set("key1", "value1")
pipe.eval_script("rate_limit", keys=["user:1"], args=[60])
pipe.eval_script("rate_limit", keys=["user:2"], args=[60])
results = pipe.execute() # [True, 1, 1]
Async Support¶
Use aeval_script() for async execution:
Script Caching¶
Scripts are automatically cached by SHA hash. On first execution, the script is loaded and its SHA is stored. Subsequent executions use EVALSHA for better performance. If Redis returns NOSCRIPT (e.g., after a server restart), the script is automatically reloaded.