Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Make DictionaryCache have better expiry properties #13292

Merged
merged 42 commits into from
Jul 21, 2022
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
326a175
Make DictionaryCache have better expiry properties
erikjohnston Jul 15, 2022
40a8fba
Newsfile
erikjohnston Jul 15, 2022
a22716c
Fix literal
erikjohnston Jul 15, 2022
f046366
Fix test
erikjohnston Jul 15, 2022
602a81f
don't update access
erikjohnston Jul 15, 2022
23c2f39
Fix mypy
erikjohnston Jul 15, 2022
cad555f
Better stuff
erikjohnston Jul 15, 2022
7aceec3
Fix up
erikjohnston Jul 15, 2022
057ae8b
Comments
erikjohnston Jul 17, 2022
7f7b36d
Comment LruCache
erikjohnston Jul 18, 2022
462db2a
Comment LruCache
erikjohnston Jul 18, 2022
129691f
Comment TreeCache
erikjohnston Jul 18, 2022
45d0dce
Merge branch 'develop' into erikj/dict_cache
erikjohnston Jul 18, 2022
ca8e1af
Update synapse/util/caches/dictionary_cache.py
erikjohnston Jul 19, 2022
e4723df
Update synapse/util/caches/lrucache.py
erikjohnston Jul 19, 2022
a74baa6
Split out code to separate method
erikjohnston Jul 19, 2022
a9ebcd2
Mark DictionaryEntry as frozen
erikjohnston Jul 19, 2022
6a76dba
Don't reuse vars
erikjohnston Jul 19, 2022
d4133b2
Add example
erikjohnston Jul 19, 2022
88aa56c
Make `LruCacheget_multi` return something sane.
erikjohnston Jul 19, 2022
f053edb
Woo comments
erikjohnston Jul 20, 2022
740fe2f
Remove use of `cache_key`
erikjohnston Jul 20, 2022
378aec5
More comments
erikjohnston Jul 20, 2022
5709037
Support values being removed from dict
erikjohnston Jul 20, 2022
b376618
Fixup lint
erikjohnston Jul 20, 2022
12e14f2
Make `missing` a list
erikjohnston Jul 20, 2022
fed7755
Don't iterate twice over `dict_key`
erikjohnston Jul 20, 2022
c9f13f3
Clarify comment
erikjohnston Jul 20, 2022
67bb06d
Use iterable rather than generator
erikjohnston Jul 20, 2022
2ec4cab
Add example
erikjohnston Jul 20, 2022
50bb901
Add doc to TreeCache.get
erikjohnston Jul 20, 2022
aa203c6
Note that if full is True known_absent must be empty
erikjohnston Jul 20, 2022
2151474
When fetching full dict don't return partial
erikjohnston Jul 20, 2022
43e0030
Fix test, set takes an iterable
erikjohnston Jul 20, 2022
3c23161
Add simple test for invalidation
erikjohnston Jul 20, 2022
e5ef14d
Fix test now we don't return partial dicts
erikjohnston Jul 21, 2022
10fb0d0
Woo comments
erikjohnston Jul 21, 2022
d521be2
Comment if dict_keys is None
erikjohnston Jul 21, 2022
3c84a98
Document update doesn't invalidate
erikjohnston Jul 21, 2022
9c54881
Update synapse/util/caches/dictionary_cache.py
erikjohnston Jul 21, 2022
daa2741
Flesh out return value
erikjohnston Jul 21, 2022
055a5dd
Lint
erikjohnston Jul 21, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/13292.misc
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Make `DictionaryCache` expire full entries if they haven't been queried in a while, even if specific keys have been queried recently.
9 changes: 8 additions & 1 deletion synapse/storage/databases/state/store.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,14 @@ def _get_state_for_group_using_cache(
requests state from the cache, if False we need to query the DB for the
missing state.
"""
cache_entry = cache.get(group)
# If we are asked explicitly for a subset of keys, we only ask for those
# from the cache. This ensures that the `DictionaryCache` can make
# better decisions about what to cache and what to expire.
dict_keys = None
if not state_filter.has_wildcards():
dict_keys = state_filter.concrete_types()

cache_entry = cache.get(group, dict_keys=dict_keys)
Comment on lines +205 to +212
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ideally we'd separate this to a different PR. It's not directly related to the changes to DictionaryCache.

state_dict_ids = cache_entry.value

if cache_entry.full or state_filter.is_full():
Expand Down
191 changes: 163 additions & 28 deletions synapse/util/caches/dictionary_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,13 @@
import enum
import logging
import threading
from typing import Any, Dict, Generic, Iterable, Optional, Set, TypeVar
from typing import Any, Dict, Generic, Iterable, Optional, Set, Tuple, TypeVar, Union

import attr
from typing_extensions import Literal

from synapse.util.caches.lrucache import LruCache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_items

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -53,20 +55,67 @@ def __len__(self) -> int:
return len(self.value)


class _FullCacheKey(enum.Enum):
"""The key we use to cache the full dict."""

KEY = object()


class _Sentinel(enum.Enum):
# defining a sentinel in this way allows mypy to correctly handle the
# type of a dictionary lookup.
sentinel = object()


class _PerKeyValue(Generic[DV]):
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
"""The cached value of a dictionary key. If `value` is the sentinel,
indicates that the requested key is known to *not* be in the full dict.
"""

__slots__ = ["value"]

def __init__(self, value: Union[DV, Literal[_Sentinel.sentinel]]) -> None:
self.value = value

def __len__(self) -> int:
# We add a `__len__` implementation as we use this class in a cache
# where the values are variable length.
return 1


class DictionaryCache(Generic[KT, DKT, DV]):
"""Caches key -> dictionary lookups, supporting caching partial dicts, i.e.
fetching a subset of dictionary keys for a particular key.
"""
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved

def __init__(self, name: str, max_entries: int = 1000):
self.cache: LruCache[KT, DictionaryEntry] = LruCache(
max_size=max_entries, cache_name=name, size_callback=len
# We use a single cache to cache two different types of entries:
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
# 1. Map from (key, dict_key) -> dict value (or sentinel, indicating
# the key doesn't exist in the dict); and
# 2. Map from (key, _FullCacheKey.KEY) -> full dict.
#
# The former is used when explicit keys of the dictionary are looked up,
# and the latter when the full dictionary is requested.
#
# If when explicit keys are requested and not in the cache, we then look
# to see if we have the full dict and use that if we do. If found in the
# full dict each key is added into the cache.
#
# This set up allows the `LruCache` to prune the full dict entries if
# they haven't been used in a while, even when there have been recent
# queries for subsets of the dict.
#
# Typing:
# * A key of `(KT, DKT)` has a value of `_PerKeyValue`
# * A key of `(KT, _FullCacheKey.KEY)` has a value of `Dict[DKT, DV]`
self.cache: LruCache[
Tuple[KT, Union[DKT, Literal[_FullCacheKey.KEY]]],
Union[_PerKeyValue, Dict[DKT, DV]],
] = LruCache(
max_size=max_entries,
cache_name=name,
cache_type=TreeCache,
size_callback=len,
)

self.name = name
Expand Down Expand Up @@ -96,28 +145,107 @@ def get(
Returns:
DictionaryEntry
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this doesn't document what happens if dict_keys is None, but we don't have the full dict cached. Could it, please?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

... please?

I think it returns some fixed shape of DictionaryEntry ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I'd put some docs under the dict_keys arg (and there is some in DictionaryEntry), but have also added some in the return value too.

"""
entry = self.cache.get(key, _Sentinel.sentinel)
if entry is not _Sentinel.sentinel:
if dict_keys is None:
return DictionaryEntry(
entry.full, entry.known_absent, dict(entry.value)
)

erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
if dict_keys is None:
# First we check if we have cached the full dict.
entry = self.cache.get((key, _FullCacheKey.KEY), _Sentinel.sentinel)
if entry is not _Sentinel.sentinel:
assert isinstance(entry, dict)
return DictionaryEntry(True, set(), entry)

# If not, check if we have cached any of dict keys.
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
all_entries = self.cache.get_multi(
(key,),
_Sentinel.sentinel,
)
if all_entries is _Sentinel.sentinel:
return DictionaryEntry(False, set(), {})

# If there are entries we need to unwrap the returned cache nodes
# and `_PerKeyValue` into the `DictionaryEntry`.
values = {}
known_absent = set()
for dict_key, dict_value in iterate_tree_cache_items((), all_entries):
dict_key = dict_key[0]
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
dict_value = dict_value.value
richvdh marked this conversation as resolved.
Show resolved Hide resolved

# We have explicitly looked for a full cache key, so we
# shouldn't see one.
assert dict_key != _FullCacheKey.KEY

# ... therefore the values must be `_PerKeyValue`
assert isinstance(dict_value, _PerKeyValue)

if dict_value.value is _Sentinel.sentinel:
known_absent.add(dict_key)
else:
values[dict_key] = dict_value.value

return DictionaryEntry(False, known_absent, values)
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved

# We are being asked for a subset of keys.

# First got and check for each requested dict key in the cache, tracking
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
# which we couldn't find.
values = {}
known_absent = set()
missing = set()
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
for dict_key in dict_keys:
entry = self.cache.get((key, dict_key), _Sentinel.sentinel)
if entry is _Sentinel.sentinel:
missing.add(dict_key)
continue

assert isinstance(entry, _PerKeyValue)

if entry.value is _Sentinel.sentinel:
known_absent.add(dict_key)
else:
return DictionaryEntry(
entry.full,
entry.known_absent,
{k: entry.value[k] for k in dict_keys if k in entry.value},
)
values[dict_key] = entry.value

# If we found everything we can return immediately.
if not missing:
return DictionaryEntry(False, known_absent, values)

# If we are missing any keys check if we happen to have the full dict in
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
# the cache.
#
# We don't update the last access time for this cache fetch, as we
# aren't explicitly interested in the full dict and so we don't want
# requests for explicit dict keys to keep the full dict in the cache.
entry = self.cache.get(
(key, _FullCacheKey.KEY),
_Sentinel.sentinel,
update_last_access=False,
)
if entry is _Sentinel.sentinel:
# Not in the cache, return the subset of keys we found.
return DictionaryEntry(False, known_absent, values)

# We have the full dict!
assert isinstance(entry, dict)

return DictionaryEntry(False, set(), {})
values = {}
for dict_key in dict_keys:
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
# We explicitly add each dict key to the cache, so that cache hit
# rates for each key can be tracked separately.
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
value = entry.get(dict_key, _Sentinel.sentinel) # type: ignore[arg-type]
self.cache[(key, dict_key)] = _PerKeyValue(value)

if value is not _Sentinel.sentinel:
values[dict_key] = value

return DictionaryEntry(True, set(), values)
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved

def invalidate(self, key: KT) -> None:
self.check_thread()

# Increment the sequence number so that any SELECT statements that
# raced with the INSERT don't update the cache (SYN-369)
self.sequence += 1
self.cache.pop(key, None)

# Del-multi accepts truncated tuples.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know what this means. Is it telling me why it's better to use del_multi than pop? If so, can it say so, and explain in more detail?

self.cache.del_multi((key,)) # type: ignore[arg-type]

def invalidate_all(self) -> None:
self.check_thread()
Expand Down Expand Up @@ -149,20 +277,27 @@ def update(
# Only update the cache if the caches sequence number matches the
# number that the cache had before the SELECT was started (SYN-369)
if fetched_keys is None:
self._insert(key, value, set())
self.cache[(key, _FullCacheKey.KEY)] = value
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do we not need to update (or at least invalidate) entries for individual dict keys here as well?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think so, if the value in the DB has changed then we should have previously called invalidate(..) to delete the stale entries? We could easily drop any existing entries, but I don't think its necessary?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

fair enough. That seems like an assumption that we should document in the docstring though.

(Also: shouldn't this method be called set or put or insert or something? Maybe a problem for another time though.)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

(Also: shouldn't this method be called set or put or insert or something? Maybe a problem for another time though.)

Yup, will try and put up a separate PR to do some of these fixes if they're small.

else:
self._update_or_insert(key, value, fetched_keys)
self._update_subset(key, value, fetched_keys)

def _update_or_insert(
self, key: KT, value: Dict[DKT, DV], known_absent: Iterable[DKT]
def _update_subset(
self, key: KT, value: Dict[DKT, DV], fetched_keys: Iterable[DKT]
) -> None:
# We pop and reinsert as we need to tell the cache the size may have
# changed
"""Add the given dictionary values as explicit keys in the cache.

Args:
key
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
value: The dictionary with all the values that we should cache
fetched_keys: The full set of keys that were looked up, any keys
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
here not in `value` should be marked as "known absent".
"""

for dict_key, dict_value in value.items():
self.cache[(key, dict_key)] = _PerKeyValue(dict_value)

entry: DictionaryEntry = self.cache.pop(key, DictionaryEntry(False, set(), {}))
entry.value.update(value)
entry.known_absent.update(known_absent)
self.cache[key] = entry
for dict_key in fetched_keys:
if (key, dict_key) in self.cache:
continue
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved

def _insert(self, key: KT, value: Dict[DKT, DV], known_absent: Set[DKT]) -> None:
self.cache[key] = DictionaryEntry(True, known_absent, value)
self.cache[(key, dict_key)] = _PerKeyValue(_Sentinel.sentinel)
65 changes: 63 additions & 2 deletions synapse/util/caches/lrucache.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,11 @@
from synapse.metrics.jemalloc import get_jemalloc_stats
from synapse.util import Clock, caches
from synapse.util.caches import CacheMetric, EvictionReason, register_cache
from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry
from synapse.util.caches.treecache import (
TreeCache,
TreeCacheNode,
iterate_tree_cache_entry,
)
from synapse.util.linked_list import ListNode

if TYPE_CHECKING:
Expand Down Expand Up @@ -537,6 +541,7 @@ def cache_get(
default: Literal[None] = None,
callbacks: Collection[Callable[[], None]] = ...,
update_metrics: bool = ...,
update_last_access: bool = ...,
) -> Optional[VT]:
...

Expand All @@ -546,6 +551,7 @@ def cache_get(
default: T,
callbacks: Collection[Callable[[], None]] = ...,
update_metrics: bool = ...,
update_last_access: bool = ...,
) -> Union[T, VT]:
...

Expand All @@ -555,10 +561,27 @@ def cache_get(
default: Optional[T] = None,
callbacks: Collection[Callable[[], None]] = (),
update_metrics: bool = True,
update_last_access: bool = True,
) -> Union[None, T, VT]:
"""Lookup a key in the cache
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved

Args:
key
default
callbacks: A collection of callbacks that will fire when the
node is removed from the cache (either due to invalidation
or expiry).
update_metrics: Whether to update the hit rate metrics
update_last_access: Whether to update the last access metrics
on a node if successfully fetched. These metrics are used
to determine when to remove the node from the cache. Set
to False if this fetch should *not* prevent a node from
being expired.
"""
node = cache.get(key, None)
if node is not None:
move_node_to_front(node)
if update_last_access:
move_node_to_front(node)
node.add_callbacks(callbacks)
if update_metrics and metrics:
metrics.inc_hits()
Expand All @@ -568,6 +591,42 @@ def cache_get(
metrics.inc_misses()
return default

@overload
def cache_get_multi(
key: tuple,
default: Literal[None] = None,
update_metrics: bool = True,
) -> Union[None, TreeCacheNode]:
...

@overload
def cache_get_multi(
key: tuple,
default: T,
update_metrics: bool = True,
) -> Union[T, TreeCacheNode]:
...

@synchronized
def cache_get_multi(
key: tuple,
default: Optional[T] = None,
update_metrics: bool = True,
) -> Union[None, T, TreeCacheNode]:
"""Used only for `TreeCache` to fetch a subtree."""
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved

assert isinstance(cache, TreeCache)

node = cache.get(key, None)
if node is not None:
if update_metrics and metrics:
metrics.inc_hits()
return node
else:
if update_metrics and metrics:
metrics.inc_misses()
return default

@synchronized
def cache_set(
key: KT, value: VT, callbacks: Collection[Callable[[], None]] = ()
Expand Down Expand Up @@ -674,6 +733,8 @@ def cache_contains(key: KT) -> bool:
self.setdefault = cache_set_default
self.pop = cache_pop
self.del_multi = cache_del_multi
if cache_type is TreeCache:
self.get_multi = cache_get_multi
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
# `invalidate` is exposed for consistency with DeferredCache, so that it can be
# invalidated by the cache invalidation replication stream.
self.invalidate = cache_del_multi
Expand Down
14 changes: 14 additions & 0 deletions synapse/util/caches/treecache.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,3 +139,17 @@ def iterate_tree_cache_entry(d):
yield from iterate_tree_cache_entry(value_d)
else:
yield d


def iterate_tree_cache_items(key, value):
"""Helper function to iterate over the leaves of a tree, i.e. a dict of that
can contain dicts.

Returns:
A generator yielding key/value pairs.
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
"""
if isinstance(value, TreeCacheNode):
for sub_key, sub_value in value.items():
yield from iterate_tree_cache_items((*key, sub_key), sub_value)
else:
yield key, value
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
Loading