From 44bb881096d7ad4d730e2dc31e0094c0324e0970 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 6 Apr 2021 08:58:18 -0400 Subject: [PATCH 001/222] Add type hints to expiring cache. (#9730) --- changelog.d/9730.misc | 1 + synapse/federation/federation_client.py | 2 +- synapse/handlers/device.py | 4 +- synapse/handlers/e2e_keys.py | 12 --- synapse/handlers/sync.py | 10 ++- synapse/rest/media/v1/preview_url_resource.py | 2 +- synapse/state/__init__.py | 5 +- synapse/util/caches/expiringcache.py | 83 ++++++++++++------- 8 files changed, 65 insertions(+), 54 deletions(-) create mode 100644 changelog.d/9730.misc diff --git a/changelog.d/9730.misc b/changelog.d/9730.misc new file mode 100644 index 000000000000..8063059b0b37 --- /dev/null +++ b/changelog.d/9730.misc @@ -0,0 +1 @@ +Add type hints to expiring cache. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index afdb5bf2fafb..55533d75014c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -102,7 +102,7 @@ def __init__(self, hs: "HomeServer"): max_len=1000, expiry_ms=120 * 1000, reset_expiry_on_get=False, - ) + ) # type: ExpiringCache[str, EventBase] def _clear_tried_cache(self): """Clear pdu_destination_tried cache""" diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 54293d0b9c83..7e76db3e2a3f 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -631,7 +631,7 @@ def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): max_len=10000, expiry_ms=30 * 60 * 1000, iterable=True, - ) + ) # type: ExpiringCache[str, Set[str]] # Attempt to resync out of sync device lists every 30s. self._resync_retry_in_progress = False @@ -760,7 +760,7 @@ async def _need_to_do_resync( """Given a list of updates for a user figure out if we need to do a full resync, or whether we have enough data that we can just apply the delta. """ - seen_updates = self._seen_updates.get(user_id, set()) + seen_updates = self._seen_updates.get(user_id, set()) # type: Set[str] extremity = await self.store.get_device_list_last_stream_id_for_remote(user_id) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 739653a3fa20..92b18378fcfb 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -38,7 +38,6 @@ ) from synapse.util import json_decoder, unwrapFirstError from synapse.util.async_helpers import Linearizer -from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.retryutils import NotRetryingDestination if TYPE_CHECKING: @@ -1292,17 +1291,6 @@ def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler): # user_id -> list of updates waiting to be handled. self._pending_updates = {} # type: Dict[str, List[Tuple[JsonDict, JsonDict]]] - # Recently seen stream ids. We don't bother keeping these in the DB, - # but they're useful to have them about to reduce the number of spurious - # resyncs. - self._seen_updates = ExpiringCache( - cache_name="signing_key_update_edu", - clock=self.clock, - max_len=10000, - expiry_ms=30 * 60 * 1000, - iterable=True, - ) - async def incoming_signing_key_update( self, origin: str, edu_content: JsonDict ) -> None: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7b356ba7e5a8..ff11266c6723 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -252,13 +252,13 @@ def __init__(self, hs: "HomeServer"): self.storage = hs.get_storage() self.state_store = self.storage.state - # ExpiringCache((User, Device)) -> LruCache(state_key => event_id) + # ExpiringCache((User, Device)) -> LruCache(user_id => event_id) self.lazy_loaded_members_cache = ExpiringCache( "lazy_loaded_members_cache", self.clock, max_len=0, expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE, - ) + ) # type: ExpiringCache[Tuple[str, Optional[str]], LruCache[str, str]] async def wait_for_sync_for_user( self, @@ -733,8 +733,10 @@ async def compute_summary( def get_lazy_loaded_members_cache( self, cache_key: Tuple[str, Optional[str]] - ) -> LruCache: - cache = self.lazy_loaded_members_cache.get(cache_key) + ) -> LruCache[str, str]: + cache = self.lazy_loaded_members_cache.get( + cache_key + ) # type: Optional[LruCache[str, str]] if cache is None: logger.debug("creating LruCache for %r", cache_key) cache = LruCache(LAZY_LOADED_MEMBERS_CACHE_MAX_SIZE) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index c4ed9dfdb40f..814145a04a15 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -175,7 +175,7 @@ def __init__( clock=self.clock, # don't spider URLs more often than once an hour expiry_ms=ONE_HOUR, - ) + ) # type: ExpiringCache[str, ObservableDeferred] if self._worker_run_media_background_jobs: self._cleaner_loop = self.clock.looping_call( diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index c3d6e80c49f7..c0f79ffdc8d1 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -22,6 +22,7 @@ Callable, DefaultDict, Dict, + FrozenSet, Iterable, List, Optional, @@ -515,7 +516,7 @@ def __init__(self, hs): expiry_ms=EVICTION_TIMEOUT_SECONDS * 1000, iterable=True, reset_expiry_on_get=True, - ) + ) # type: ExpiringCache[FrozenSet[int], _StateCacheEntry] # # stuff for tracking time spent on state-res by room @@ -536,7 +537,7 @@ async def resolve_state_groups( state_groups_ids: Dict[int, StateMap[str]], event_map: Optional[Dict[str, EventBase]], state_res_store: "StateResolutionStore", - ): + ) -> _StateCacheEntry: """Resolves conflicts between a set of state groups Always generates a new state group (unless we hit the cache), so should diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index e15f7ee698e5..4dc3477e8944 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -15,40 +15,50 @@ import logging from collections import OrderedDict +from typing import Any, Generic, Optional, TypeVar, Union, overload + +import attr +from typing_extensions import Literal from synapse.config import cache as cache_config from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util import Clock from synapse.util.caches import register_cache logger = logging.getLogger(__name__) -SENTINEL = object() +SENTINEL = object() # type: Any + +T = TypeVar("T") +KT = TypeVar("KT") +VT = TypeVar("VT") -class ExpiringCache: + +class ExpiringCache(Generic[KT, VT]): def __init__( self, - cache_name, - clock, - max_len=0, - expiry_ms=0, - reset_expiry_on_get=False, - iterable=False, + cache_name: str, + clock: Clock, + max_len: int = 0, + expiry_ms: int = 0, + reset_expiry_on_get: bool = False, + iterable: bool = False, ): """ Args: - cache_name (str): Name of this cache, used for logging. - clock (Clock) - max_len (int): Max size of dict. If the dict grows larger than this + cache_name: Name of this cache, used for logging. + clock + max_len: Max size of dict. If the dict grows larger than this then the oldest items get automatically evicted. Default is 0, which indicates there is no max limit. - expiry_ms (int): How long before an item is evicted from the cache + expiry_ms: How long before an item is evicted from the cache in milliseconds. Default is 0, indicating items never get evicted based on time. - reset_expiry_on_get (bool): If true, will reset the expiry time for + reset_expiry_on_get: If true, will reset the expiry time for an item on access. Defaults to False. - iterable (bool): If true, the size is calculated by summing the + iterable: If true, the size is calculated by summing the sizes of all entries, rather than the number of entries. """ self._cache_name = cache_name @@ -62,7 +72,7 @@ def __init__( self._expiry_ms = expiry_ms self._reset_expiry_on_get = reset_expiry_on_get - self._cache = OrderedDict() + self._cache = OrderedDict() # type: OrderedDict[KT, _CacheEntry] self.iterable = iterable @@ -79,12 +89,12 @@ def f(): self._clock.looping_call(f, self._expiry_ms / 2) - def __setitem__(self, key, value): + def __setitem__(self, key: KT, value: VT) -> None: now = self._clock.time_msec() self._cache[key] = _CacheEntry(now, value) self.evict() - def evict(self): + def evict(self) -> None: # Evict if there are now too many items while self._max_size and len(self) > self._max_size: _key, value = self._cache.popitem(last=False) @@ -93,7 +103,7 @@ def evict(self): else: self.metrics.inc_evictions() - def __getitem__(self, key): + def __getitem__(self, key: KT) -> VT: try: entry = self._cache[key] self.metrics.inc_hits() @@ -106,7 +116,7 @@ def __getitem__(self, key): return entry.value - def pop(self, key, default=SENTINEL): + def pop(self, key: KT, default: T = SENTINEL) -> Union[VT, T]: """Removes and returns the value with the given key from the cache. If the key isn't in the cache then `default` will be returned if @@ -115,29 +125,40 @@ def pop(self, key, default=SENTINEL): Identical functionality to `dict.pop(..)`. """ - value = self._cache.pop(key, default) + value = self._cache.pop(key, SENTINEL) + # The key was not found. if value is SENTINEL: - raise KeyError(key) + if default is SENTINEL: + raise KeyError(key) + return default - return value + return value.value - def __contains__(self, key): + def __contains__(self, key: KT) -> bool: return key in self._cache - def get(self, key, default=None): + @overload + def get(self, key: KT, default: Literal[None] = None) -> Optional[VT]: + ... + + @overload + def get(self, key: KT, default: T) -> Union[VT, T]: + ... + + def get(self, key: KT, default: Optional[T] = None) -> Union[VT, Optional[T]]: try: return self[key] except KeyError: return default - def setdefault(self, key, value): + def setdefault(self, key: KT, value: VT) -> VT: try: return self[key] except KeyError: self[key] = value return value - def _prune_cache(self): + def _prune_cache(self) -> None: if not self._expiry_ms: # zero expiry time means don't expire. This should never get called # since we have this check in start too. @@ -166,7 +187,7 @@ def _prune_cache(self): len(self), ) - def __len__(self): + def __len__(self) -> int: if self.iterable: return sum(len(entry.value) for entry in self._cache.values()) else: @@ -190,9 +211,7 @@ def set_cache_factor(self, factor: float) -> bool: return False +@attr.s(slots=True) class _CacheEntry: - __slots__ = ["time", "value"] - - def __init__(self, time, value): - self.time = time - self.value = value + time = attr.ib(type=int) + value = attr.ib() From 04819239bae2b39ee42bfdb6f9b83c6d9fe34169 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Apr 2021 14:38:30 +0100 Subject: [PATCH 002/222] Add a Synapse Module for configuring presence update routing (#9491) At the moment, if you'd like to share presence between local or remote users, those users must be sharing a room together. This isn't always the most convenient or useful situation though. This PR adds a module to Synapse that will allow deployments to set up extra logic on where presence updates should be routed. The module must implement two methods, `get_users_for_states` and `get_interested_users`. These methods are given presence updates or user IDs and must return information that Synapse will use to grant passing presence updates around. A method is additionally added to `ModuleApi` which allows triggering a set of users to receive the current, online presence information for all users they are considered interested in. This is the equivalent of that user receiving presence information during an initial sync. The goal of this module is to be fairly generic and useful for a variety of applications, with hard requirements being: * Sending state for a specific set or all known users to a defined set of local and remote users. * The ability to trigger an initial sync for specific users, so they receive all current state. --- README.rst | 7 +- changelog.d/9491.feature | 1 + docs/presence_router_module.md | 235 ++++++++++++++++ docs/sample_config.yaml | 23 +- synapse/app/generic_worker.py | 3 +- synapse/config/server.py | 39 ++- synapse/events/presence_router.py | 104 +++++++ synapse/federation/sender/__init__.py | 19 +- synapse/handlers/presence.py | 278 ++++++++++++++++--- synapse/module_api/__init__.py | 50 ++++ synapse/server.py | 5 + tests/events/test_presence_router.py | 386 ++++++++++++++++++++++++++ tests/handlers/test_sync.py | 21 +- tests/module_api/test_api.py | 175 +++++++++++- 14 files changed, 1282 insertions(+), 64 deletions(-) create mode 100644 changelog.d/9491.feature create mode 100644 docs/presence_router_module.md create mode 100644 synapse/events/presence_router.py create mode 100644 tests/events/test_presence_router.py diff --git a/README.rst b/README.rst index 655a2bf3bed0..1a5503572ee4 100644 --- a/README.rst +++ b/README.rst @@ -393,7 +393,12 @@ massive excess of outgoing federation requests (see `discussion indicate that your server is also issuing far more outgoing federation requests than can be accounted for by your users' activity, this is a likely cause. The misbehavior can be worked around by setting -``use_presence: false`` in the Synapse config file. +the following in the Synapse config file: + +.. code-block:: yaml + + presence: + enabled: false People can't accept room invitations from me -------------------------------------------- diff --git a/changelog.d/9491.feature b/changelog.d/9491.feature new file mode 100644 index 000000000000..8b56a95a44e2 --- /dev/null +++ b/changelog.d/9491.feature @@ -0,0 +1 @@ +Add a Synapse module for routing presence updates between users. diff --git a/docs/presence_router_module.md b/docs/presence_router_module.md new file mode 100644 index 000000000000..d6566d978d06 --- /dev/null +++ b/docs/presence_router_module.md @@ -0,0 +1,235 @@ +# Presence Router Module + +Synapse supports configuring a module that can specify additional users +(local or remote) to should receive certain presence updates from local +users. + +Note that routing presence via Application Service transactions is not +currently supported. + +The presence routing module is implemented as a Python class, which will +be imported by the running Synapse. + +## Python Presence Router Class + +The Python class is instantiated with two objects: + +* A configuration object of some type (see below). +* An instance of `synapse.module_api.ModuleApi`. + +It then implements methods related to presence routing. + +Note that one method of `ModuleApi` that may be useful is: + +```python +async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None +``` + +which can be given a list of local or remote MXIDs to broadcast known, online user +presence to (for those users that the receiving user is considered interested in). +It does not include state for users who are currently offline, and it can only be +called on workers that support sending federation. + +### Module structure + +Below is a list of possible methods that can be implemented, and whether they are +required. + +#### `parse_config` + +```python +def parse_config(config_dict: dict) -> Any +``` + +**Required.** A static method that is passed a dictionary of config options, and + should return a validated config object. This method is described further in + [Configuration](#configuration). + +#### `get_users_for_states` + +```python +async def get_users_for_states( + self, + state_updates: Iterable[UserPresenceState], +) -> Dict[str, Set[UserPresenceState]]: +``` + +**Required.** An asynchronous method that is passed an iterable of user presence +state. This method can determine whether a given presence update should be sent to certain +users. It does this by returning a dictionary with keys representing local or remote +Matrix User IDs, and values being a python set +of `synapse.handlers.presence.UserPresenceState` instances. + +Synapse will then attempt to send the specified presence updates to each user when +possible. + +#### `get_interested_users` + +```python +async def get_interested_users(self, user_id: str) -> Union[Set[str], str] +``` + +**Required.** An asynchronous method that is passed a single Matrix User ID. This +method is expected to return the users that the passed in user may be interested in the +presence of. Returned users may be local or remote. The presence routed as a result of +what this method returns is sent in addition to the updates already sent between users +that share a room together. Presence updates are deduplicated. + +This method should return a python set of Matrix User IDs, or the object +`synapse.events.presence_router.PresenceRouter.ALL_USERS` to indicate that the passed +user should receive presence information for *all* known users. + +For clarity, if the user `@alice:example.org` is passed to this method, and the Set +`{"@bob:example.com", "@charlie:somewhere.org"}` is returned, this signifies that Alice +should receive presence updates sent by Bob and Charlie, regardless of whether these +users share a room. + +### Example + +Below is an example implementation of a presence router class. + +```python +from typing import Dict, Iterable, Set, Union +from synapse.events.presence_router import PresenceRouter +from synapse.handlers.presence import UserPresenceState +from synapse.module_api import ModuleApi + +class PresenceRouterConfig: + def __init__(self): + # Config options with their defaults + # A list of users to always send all user presence updates to + self.always_send_to_users = [] # type: List[str] + + # A list of users to ignore presence updates for. Does not affect + # shared-room presence relationships + self.blacklisted_users = [] # type: List[str] + +class ExamplePresenceRouter: + """An example implementation of synapse.presence_router.PresenceRouter. + Supports routing all presence to a configured set of users, or a subset + of presence from certain users to members of certain rooms. + + Args: + config: A configuration object. + module_api: An instance of Synapse's ModuleApi. + """ + def __init__(self, config: PresenceRouterConfig, module_api: ModuleApi): + self._config = config + self._module_api = module_api + + @staticmethod + def parse_config(config_dict: dict) -> PresenceRouterConfig: + """Parse a configuration dictionary from the homeserver config, do + some validation and return a typed PresenceRouterConfig. + + Args: + config_dict: The configuration dictionary. + + Returns: + A validated config object. + """ + # Initialise a typed config object + config = PresenceRouterConfig() + always_send_to_users = config_dict.get("always_send_to_users") + blacklisted_users = config_dict.get("blacklisted_users") + + # Do some validation of config options... otherwise raise a + # synapse.config.ConfigError. + config.always_send_to_users = always_send_to_users + config.blacklisted_users = blacklisted_users + + return config + + async def get_users_for_states( + self, + state_updates: Iterable[UserPresenceState], + ) -> Dict[str, Set[UserPresenceState]]: + """Given an iterable of user presence updates, determine where each one + needs to go. Returned results will not affect presence updates that are + sent between users who share a room. + + Args: + state_updates: An iterable of user presence state updates. + + Returns: + A dictionary of user_id -> set of UserPresenceState that the user should + receive. + """ + destination_users = {} # type: Dict[str, Set[UserPresenceState] + + # Ignore any updates for blacklisted users + desired_updates = set() + for update in state_updates: + if update.state_key not in self._config.blacklisted_users: + desired_updates.add(update) + + # Send all presence updates to specific users + for user_id in self._config.always_send_to_users: + destination_users[user_id] = desired_updates + + return destination_users + + async def get_interested_users( + self, + user_id: str, + ) -> Union[Set[str], PresenceRouter.ALL_USERS]: + """ + Retrieve a list of users that `user_id` is interested in receiving the + presence of. This will be in addition to those they share a room with. + Optionally, the object PresenceRouter.ALL_USERS can be returned to indicate + that this user should receive all incoming local and remote presence updates. + + Note that this method will only be called for local users. + + Args: + user_id: A user requesting presence updates. + + Returns: + A set of user IDs to return additional presence updates for, or + PresenceRouter.ALL_USERS to return presence updates for all other users. + """ + if user_id in self._config.always_send_to_users: + return PresenceRouter.ALL_USERS + + return set() +``` + +#### A note on `get_users_for_states` and `get_interested_users` + +Both of these methods are effectively two different sides of the same coin. The logic +regarding which users should receive updates for other users should be the same +between them. + +`get_users_for_states` is called when presence updates come in from either federation +or local users, and is used to either direct local presence to remote users, or to +wake up the sync streams of local users to collect remote presence. + +In contrast, `get_interested_users` is used to determine the users that presence should +be fetched for when a local user is syncing. This presence is then retrieved, before +being fed through `get_users_for_states` once again, with only the syncing user's +routing information pulled from the resulting dictionary. + +Their routing logic should thus line up, else you may run into unintended behaviour. + +## Configuration + +Once you've crafted your module and installed it into the same Python environment as +Synapse, amend your homeserver config file with the following. + +```yaml +presence: + routing_module: + module: my_module.ExamplePresenceRouter + config: + # Any configuration options for your module. The below is an example. + # of setting options for ExamplePresenceRouter. + always_send_to_users: ["@presence_gobbler:example.org"] + blacklisted_users: + - "@alice:example.com" + - "@bob:example.com" + ... +``` + +The contents of `config` will be passed as a Python dictionary to the static +`parse_config` method of your class. The object returned by this method will +then be passed to the `__init__` method of your module as `config`. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b0bf9877403e..9182dcd98716 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -82,9 +82,28 @@ pid_file: DATADIR/homeserver.pid # #soft_file_limit: 0 -# Set to false to disable presence tracking on this homeserver. +# Presence tracking allows users to see the state (e.g online/offline) +# of other local and remote users. # -#use_presence: false +presence: + # Uncomment to disable presence tracking on this homeserver. This option + # replaces the previous top-level 'use_presence' option. + # + #enabled: false + + # Presence routers are third-party modules that can specify additional logic + # to where presence updates from users are routed. + # + presence_router: + # The custom module's class. Uncomment to use a custom presence router module. + # + #module: "my_custom_router.PresenceRouter" + + # Configuration options of the custom module. Refer to your module's + # documentation for available options. + # + #config: + # example_option: 'something' # Whether to require authentication to retrieve profile data (avatars, # display names) of other users through the client API. Defaults to diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 3df2aa5c2bb0..d1c207923399 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -281,6 +281,7 @@ def __init__(self, hs): self.hs = hs self.is_mine_id = hs.is_mine_id + self.presence_router = hs.get_presence_router() self._presence_enabled = hs.config.use_presence # The number of ongoing syncs on this process, by user id. @@ -395,7 +396,7 @@ def _user_syncing(): return _user_syncing() async def notify_from_replication(self, states, stream_id): - parties = await get_interested_parties(self.store, states) + parties = await get_interested_parties(self.store, self.presence_router, states) room_ids_to_states, users_to_states = parties self.notifier.on_new_event( diff --git a/synapse/config/server.py b/synapse/config/server.py index 5f8910b6e149..8decc9d10d76 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -27,6 +27,7 @@ from netaddr import AddrFormatError, IPNetwork, IPSet from synapse.api.room_versions import KNOWN_ROOM_VERSIONS +from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_server_name from ._base import Config, ConfigError @@ -238,7 +239,20 @@ def read_config(self, config, **kwargs): self.public_baseurl = config.get("public_baseurl") # Whether to enable user presence. - self.use_presence = config.get("use_presence", True) + presence_config = config.get("presence") or {} + self.use_presence = presence_config.get("enabled") + if self.use_presence is None: + self.use_presence = config.get("use_presence", True) + + # Custom presence router module + self.presence_router_module_class = None + self.presence_router_config = None + presence_router_config = presence_config.get("presence_router") + if presence_router_config: + ( + self.presence_router_module_class, + self.presence_router_config, + ) = load_module(presence_router_config, ("presence", "presence_router")) # Whether to update the user directory or not. This should be set to # false only if we are updating the user directory in a worker @@ -834,9 +848,28 @@ def generate_config_section( # #soft_file_limit: 0 - # Set to false to disable presence tracking on this homeserver. + # Presence tracking allows users to see the state (e.g online/offline) + # of other local and remote users. # - #use_presence: false + presence: + # Uncomment to disable presence tracking on this homeserver. This option + # replaces the previous top-level 'use_presence' option. + # + #enabled: false + + # Presence routers are third-party modules that can specify additional logic + # to where presence updates from users are routed. + # + presence_router: + # The custom module's class. Uncomment to use a custom presence router module. + # + #module: "my_custom_router.PresenceRouter" + + # Configuration options of the custom module. Refer to your module's + # documentation for available options. + # + #config: + # example_option: 'something' # Whether to require authentication to retrieve profile data (avatars, # display names) of other users through the client API. Defaults to diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py new file mode 100644 index 000000000000..24cd389d80c6 --- /dev/null +++ b/synapse/events/presence_router.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING, Dict, Iterable, Set, Union + +from synapse.api.presence import UserPresenceState + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class PresenceRouter: + """ + A module that the homeserver will call upon to help route user presence updates to + additional destinations. If a custom presence router is configured, calls will be + passed to that instead. + """ + + ALL_USERS = "ALL" + + def __init__(self, hs: "HomeServer"): + self.custom_presence_router = None + + # Check whether a custom presence router module has been configured + if hs.config.presence_router_module_class: + # Initialise the module + self.custom_presence_router = hs.config.presence_router_module_class( + config=hs.config.presence_router_config, module_api=hs.get_module_api() + ) + + # Ensure the module has implemented the required methods + required_methods = ["get_users_for_states", "get_interested_users"] + for method_name in required_methods: + if not hasattr(self.custom_presence_router, method_name): + raise Exception( + "PresenceRouter module '%s' must implement all required methods: %s" + % ( + hs.config.presence_router_module_class.__name__, + ", ".join(required_methods), + ) + ) + + async def get_users_for_states( + self, + state_updates: Iterable[UserPresenceState], + ) -> Dict[str, Set[UserPresenceState]]: + """ + Given an iterable of user presence updates, determine where each one + needs to go. + + Args: + state_updates: An iterable of user presence state updates. + + Returns: + A dictionary of user_id -> set of UserPresenceState, indicating which + presence updates each user should receive. + """ + if self.custom_presence_router is not None: + # Ask the custom module + return await self.custom_presence_router.get_users_for_states( + state_updates=state_updates + ) + + # Don't include any extra destinations for presence updates + return {} + + async def get_interested_users(self, user_id: str) -> Union[Set[str], ALL_USERS]: + """ + Retrieve a list of users that `user_id` is interested in receiving the + presence of. This will be in addition to those they share a room with. + Optionally, the object PresenceRouter.ALL_USERS can be returned to indicate + that this user should receive all incoming local and remote presence updates. + + Note that this method will only be called for local users, but can return users + that are local or remote. + + Args: + user_id: A user requesting presence updates. + + Returns: + A set of user IDs to return presence updates for, or ALL_USERS to return all + known updates. + """ + if self.custom_presence_router is not None: + # Ask the custom module for interested users + return await self.custom_presence_router.get_interested_users( + user_id=user_id + ) + + # A custom presence router is not defined. + # Don't report any additional interested users + return set() diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 8babb1ebbe4e..98bfce22ffd6 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -44,6 +44,7 @@ from synapse.util.metrics import Measure, measure_func if TYPE_CHECKING: + from synapse.events.presence_router import PresenceRouter from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -162,6 +163,7 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.is_mine_id = hs.is_mine_id + self._presence_router = None # type: Optional[PresenceRouter] self._transaction_manager = TransactionManager(hs) self._instance_name = hs.get_instance_name() @@ -584,7 +586,22 @@ async def _process_presence_inner(self, states: List[UserPresenceState]) -> None """Given a list of states populate self.pending_presence_by_dest and poke to send a new transaction to each destination """ - hosts_and_states = await get_interested_remotes(self.store, states, self.state) + # We pull the presence router here instead of __init__ + # to prevent a dependency cycle: + # + # AuthHandler -> Notifier -> FederationSender + # -> PresenceRouter -> ModuleApi -> AuthHandler + if self._presence_router is None: + self._presence_router = self.hs.get_presence_router() + + assert self._presence_router is not None + + hosts_and_states = await get_interested_remotes( + self.store, + self._presence_router, + states, + self.state, + ) for destinations, states in hosts_and_states: for destination in destinations: diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index da92feacc9eb..c817f2952d0b 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -25,7 +25,17 @@ import abc import logging from contextlib import contextmanager -from typing import TYPE_CHECKING, Dict, Iterable, List, Set, Tuple +from typing import ( + TYPE_CHECKING, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) from prometheus_client import Counter from typing_extensions import ContextManager @@ -34,6 +44,7 @@ from synapse.api.constants import EventTypes, Membership, PresenceState from synapse.api.errors import SynapseError from synapse.api.presence import UserPresenceState +from synapse.events.presence_router import PresenceRouter from synapse.logging.context import run_in_background from synapse.logging.utils import log_function from synapse.metrics import LaterGauge @@ -42,7 +53,7 @@ from synapse.storage.databases.main import DataStore from synapse.types import Collection, JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -209,6 +220,7 @@ def __init__(self, hs: "HomeServer"): self.notifier = hs.get_notifier() self.federation = hs.get_federation_sender() self.state = hs.get_state_handler() + self.presence_router = hs.get_presence_router() self._presence_enabled = hs.config.use_presence federation_registry = hs.get_federation_registry() @@ -653,7 +665,7 @@ async def _persist_and_notify(self, states): """ stream_id, max_token = await self.store.update_presence(states) - parties = await get_interested_parties(self.store, states) + parties = await get_interested_parties(self.store, self.presence_router, states) room_ids_to_states, users_to_states = parties self.notifier.on_new_event( @@ -1041,7 +1053,12 @@ def __init__(self, hs: "HomeServer"): # # Presence -> Notifier -> PresenceEventSource -> Presence # + # Same with get_module_api, get_presence_router + # + # AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler self.get_presence_handler = hs.get_presence_handler + self.get_module_api = hs.get_module_api + self.get_presence_router = hs.get_presence_router self.clock = hs.get_clock() self.store = hs.get_datastore() self.state = hs.get_state_handler() @@ -1055,7 +1072,7 @@ async def get_new_events( include_offline=True, explicit_room_id=None, **kwargs - ): + ) -> Tuple[List[UserPresenceState], int]: # The process for getting presence events are: # 1. Get the rooms the user is in. # 2. Get the list of user in the rooms. @@ -1068,7 +1085,17 @@ async def get_new_events( # We don't try and limit the presence updates by the current token, as # sending down the rare duplicate is not a concern. + user_id = user.to_string() + stream_change_cache = self.store.presence_stream_cache + with Measure(self.clock, "presence.get_new_events"): + if user_id in self.get_module_api()._send_full_presence_to_local_users: + # This user has been specified by a module to receive all current, online + # user presence. Removing from_key and setting include_offline to false + # will do effectively this. + from_key = None + include_offline = False + if from_key is not None: from_key = int(from_key) @@ -1091,59 +1118,209 @@ async def get_new_events( # doesn't return. C.f. #5503. return [], max_token - presence = self.get_presence_handler() - stream_change_cache = self.store.presence_stream_cache - + # Figure out which other users this user should receive updates for users_interested_in = await self._get_interested_in(user, explicit_room_id) - user_ids_changed = set() # type: Collection[str] - changed = None - if from_key: - changed = stream_change_cache.get_all_entities_changed(from_key) + # We have a set of users that we're interested in the presence of. We want to + # cross-reference that with the users that have actually changed their presence. - if changed is not None and len(changed) < 500: - assert isinstance(user_ids_changed, set) + # Check whether this user should see all user updates - # For small deltas, its quicker to get all changes and then - # work out if we share a room or they're in our presence list - get_updates_counter.labels("stream").inc() - for other_user_id in changed: - if other_user_id in users_interested_in: - user_ids_changed.add(other_user_id) - else: - # Too many possible updates. Find all users we can see and check - # if any of them have changed. - get_updates_counter.labels("full").inc() + if users_interested_in == PresenceRouter.ALL_USERS: + # Provide presence state for all users + presence_updates = await self._filter_all_presence_updates_for_user( + user_id, include_offline, from_key + ) - if from_key: - user_ids_changed = stream_change_cache.get_entities_changed( - users_interested_in, from_key + # Remove the user from the list of users to receive all presence + if user_id in self.get_module_api()._send_full_presence_to_local_users: + self.get_module_api()._send_full_presence_to_local_users.remove( + user_id ) + + return presence_updates, max_token + + # Make mypy happy. users_interested_in should now be a set + assert not isinstance(users_interested_in, str) + + # The set of users that we're interested in and that have had a presence update. + # We'll actually pull the presence updates for these users at the end. + interested_and_updated_users = ( + set() + ) # type: Union[Set[str], FrozenSet[str]] + + if from_key: + # First get all users that have had a presence update + updated_users = stream_change_cache.get_all_entities_changed(from_key) + + # Cross-reference users we're interested in with those that have had updates. + # Use a slightly-optimised method for processing smaller sets of updates. + if updated_users is not None and len(updated_users) < 500: + # For small deltas, it's quicker to get all changes and then + # cross-reference with the users we're interested in + get_updates_counter.labels("stream").inc() + for other_user_id in updated_users: + if other_user_id in users_interested_in: + # mypy thinks this variable could be a FrozenSet as it's possibly set + # to one in the `get_entities_changed` call below, and `add()` is not + # method on a FrozenSet. That doesn't affect us here though, as + # `interested_and_updated_users` is clearly a set() above. + interested_and_updated_users.add(other_user_id) # type: ignore else: - user_ids_changed = users_interested_in + # Too many possible updates. Find all users we can see and check + # if any of them have changed. + get_updates_counter.labels("full").inc() - updates = await presence.current_state_for_users(user_ids_changed) + interested_and_updated_users = ( + stream_change_cache.get_entities_changed( + users_interested_in, from_key + ) + ) + else: + # No from_key has been specified. Return the presence for all users + # this user is interested in + interested_and_updated_users = users_interested_in + + # Retrieve the current presence state for each user + users_to_state = await self.get_presence_handler().current_state_for_users( + interested_and_updated_users + ) + presence_updates = list(users_to_state.values()) - if include_offline: - return (list(updates.values()), max_token) + # Remove the user from the list of users to receive all presence + if user_id in self.get_module_api()._send_full_presence_to_local_users: + self.get_module_api()._send_full_presence_to_local_users.remove(user_id) + + if not include_offline: + # Filter out offline presence states + presence_updates = self._filter_offline_presence_state(presence_updates) + + return presence_updates, max_token + + async def _filter_all_presence_updates_for_user( + self, + user_id: str, + include_offline: bool, + from_key: Optional[int] = None, + ) -> List[UserPresenceState]: + """ + Computes the presence updates a user should receive. + + First pulls presence updates from the database. Then consults PresenceRouter + for whether any updates should be excluded by user ID. + + Args: + user_id: The User ID of the user to compute presence updates for. + include_offline: Whether to include offline presence states from the results. + from_key: The minimum stream ID of updates to pull from the database + before filtering. + + Returns: + A list of presence states for the given user to receive. + """ + if from_key: + # Only return updates since the last sync + updated_users = self.store.presence_stream_cache.get_all_entities_changed( + from_key + ) + if not updated_users: + updated_users = [] + + # Get the actual presence update for each change + users_to_state = await self.get_presence_handler().current_state_for_users( + updated_users + ) + presence_updates = list(users_to_state.values()) + + if not include_offline: + # Filter out offline states + presence_updates = self._filter_offline_presence_state(presence_updates) else: - return ( - [s for s in updates.values() if s.state != PresenceState.OFFLINE], - max_token, + users_to_state = await self.store.get_presence_for_all_users( + include_offline=include_offline ) + presence_updates = list(users_to_state.values()) + + # TODO: This feels wildly inefficient, and it's unfortunate we need to ask the + # module for information on a number of users when we then only take the info + # for a single user + + # Filter through the presence router + users_to_state_set = await self.get_presence_router().get_users_for_states( + presence_updates + ) + + # We only want the mapping for the syncing user + presence_updates = list(users_to_state_set[user_id]) + + # Return presence information for all users + return presence_updates + + def _filter_offline_presence_state( + self, presence_updates: Iterable[UserPresenceState] + ) -> List[UserPresenceState]: + """Given an iterable containing user presence updates, return a list with any offline + presence states removed. + + Args: + presence_updates: Presence states to filter + + Returns: + A new list with any offline presence states removed. + """ + return [ + update + for update in presence_updates + if update.state != PresenceState.OFFLINE + ] + def get_current_key(self): return self.store.get_current_presence_token() @cached(num_args=2, cache_context=True) - async def _get_interested_in(self, user, explicit_room_id, cache_context): + async def _get_interested_in( + self, + user: UserID, + explicit_room_id: Optional[str] = None, + cache_context: Optional[_CacheContext] = None, + ) -> Union[Set[str], str]: """Returns the set of users that the given user should see presence - updates for + updates for. + + Args: + user: The user to retrieve presence updates for. + explicit_room_id: The users that are in the room will be returned. + + Returns: + A set of user IDs to return presence updates for, or "ALL" to return all + known updates. """ user_id = user.to_string() users_interested_in = set() users_interested_in.add(user_id) # So that we receive our own presence + # cache_context isn't likely to ever be None due to the @cached decorator, + # but we can't have a non-optional argument after the optional argument + # explicit_room_id either. Assert cache_context is not None so we can use it + # without mypy complaining. + assert cache_context + + # Check with the presence router whether we should poll additional users for + # their presence information + additional_users = await self.get_presence_router().get_interested_users( + user.to_string() + ) + if additional_users == PresenceRouter.ALL_USERS: + # If the module requested that this user see the presence updates of *all* + # users, then simply return that instead of calculating what rooms this + # user shares + return PresenceRouter.ALL_USERS + + # Add the additional users from the router + users_interested_in.update(additional_users) + + # Find the users who share a room with this user users_who_share_room = await self.store.get_users_who_share_room_with_user( user_id, on_invalidate=cache_context.invalidate ) @@ -1314,14 +1491,15 @@ def handle_update(prev_state, new_state, is_mine, wheel_timer, now): async def get_interested_parties( - store: DataStore, states: List[UserPresenceState] + store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState] ) -> Tuple[Dict[str, List[UserPresenceState]], Dict[str, List[UserPresenceState]]]: """Given a list of states return which entities (rooms, users) are interested in the given states. Args: - store - states + store: The homeserver's data store. + presence_router: A module for augmenting the destinations for presence updates. + states: A list of incoming user presence updates. Returns: A 2-tuple of `(room_ids_to_states, users_to_states)`, @@ -1337,11 +1515,22 @@ async def get_interested_parties( # Always notify self users_to_states.setdefault(state.user_id, []).append(state) + # Ask a presence routing module for any additional parties if one + # is loaded. + router_users_to_states = await presence_router.get_users_for_states(states) + + # Update the dictionaries with additional destinations and state to send + for user_id, user_states in router_users_to_states.items(): + users_to_states.setdefault(user_id, []).extend(user_states) + return room_ids_to_states, users_to_states async def get_interested_remotes( - store: DataStore, states: List[UserPresenceState], state_handler: StateHandler + store: DataStore, + presence_router: PresenceRouter, + states: List[UserPresenceState], + state_handler: StateHandler, ) -> List[Tuple[Collection[str], List[UserPresenceState]]]: """Given a list of presence states figure out which remote servers should be sent which. @@ -1349,9 +1538,10 @@ async def get_interested_remotes( All the presence states should be for local users only. Args: - store - states - state_handler + store: The homeserver's data store. + presence_router: A module for augmenting the destinations for presence updates. + states: A list of incoming user presence updates. + state_handler: Returns: A list of 2-tuples of destinations and states, where for @@ -1363,7 +1553,9 @@ async def get_interested_remotes( # First we look up the rooms each user is in (as well as any explicit # subscriptions), then for each distinct room we look up the remote # hosts in those rooms. - room_ids_to_states, users_to_states = await get_interested_parties(store, states) + room_ids_to_states, users_to_states = await get_interested_parties( + store, presence_router, states + ) for room_id, states in room_ids_to_states.items(): hosts = await state_handler.get_current_hosts_in_room(room_id) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 781e02fbbb31..3ecd46c0382a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -50,11 +50,20 @@ def __init__(self, hs, auth_handler): self._auth = hs.get_auth() self._auth_handler = auth_handler self._server_name = hs.hostname + self._presence_stream = hs.get_event_sources().sources["presence"] # We expose these as properties below in order to attach a helpful docstring. self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient self._public_room_list_manager = PublicRoomListManager(hs) + # The next time these users sync, they will receive the current presence + # state of all local users. Users are added by send_local_online_presence_to, + # and removed after a successful sync. + # + # We make this a private variable to deter modules from accessing it directly, + # though other classes in Synapse will still do so. + self._send_full_presence_to_local_users = set() + @property def http_client(self): """Allows making outbound HTTP requests to remote resources. @@ -385,6 +394,47 @@ async def create_and_send_event_into_room(self, event_dict: JsonDict) -> EventBa return event + async def send_local_online_presence_to(self, users: Iterable[str]) -> None: + """ + Forces the equivalent of a presence initial_sync for a set of local or remote + users. The users will receive presence for all currently online users that they + are considered interested in. + + Updates to remote users will be sent immediately, whereas local users will receive + them on their next sync attempt. + + Note that this method can only be run on the main or federation_sender worker + processes. + """ + if not self._hs.should_send_federation(): + raise Exception( + "send_local_online_presence_to can only be run " + "on processes that send federation", + ) + + for user in users: + if self._hs.is_mine_id(user): + # Modify SyncHandler._generate_sync_entry_for_presence to call + # presence_source.get_new_events with an empty `from_key` if + # that user's ID were in a list modified by ModuleApi somewhere. + # That user would then get all presence state on next incremental sync. + + # Force a presence initial_sync for this user next time + self._send_full_presence_to_local_users.add(user) + else: + # Retrieve presence state for currently online users that this user + # is considered interested in + presence_events, _ = await self._presence_stream.get_new_events( + UserID.from_string(user), from_key=None, include_offline=False + ) + + # Send to remote destinations + await make_deferred_yieldable( + # We pull the federation sender here as we can only do so on workers + # that support sending presence + self._hs.get_federation_sender().send_presence(presence_events) + ) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/synapse/server.py b/synapse/server.py index e42f7b1a1876..cfb55c230ded 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -51,6 +51,7 @@ from synapse.crypto.context_factory import RegularPolicyForHTTPS from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory +from synapse.events.presence_router import PresenceRouter from synapse.events.spamcheck import SpamChecker from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.events.utils import EventClientSerializer @@ -425,6 +426,10 @@ def get_typing_writer_handler(self) -> TypingWriterHandler: else: raise Exception("Workers cannot write typing") + @cache_in_self + def get_presence_router(self) -> PresenceRouter: + return PresenceRouter(self) + @cache_in_self def get_typing_handler(self) -> FollowerTypingHandler: if self.config.worker.writers.typing == self.get_instance_name(): diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py new file mode 100644 index 000000000000..c6e547f11c4d --- /dev/null +++ b/tests/events/test_presence_router.py @@ -0,0 +1,386 @@ +# -*- coding: utf-8 -*- +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union + +from mock import Mock + +import attr + +from synapse.api.constants import EduTypes +from synapse.events.presence_router import PresenceRouter +from synapse.federation.units import Transaction +from synapse.handlers.presence import UserPresenceState +from synapse.module_api import ModuleApi +from synapse.rest import admin +from synapse.rest.client.v1 import login, presence, room +from synapse.types import JsonDict, StreamToken, create_requester + +from tests.handlers.test_sync import generate_sync_config +from tests.unittest import FederatingHomeserverTestCase, TestCase, override_config + + +@attr.s +class PresenceRouterTestConfig: + users_who_should_receive_all_presence = attr.ib(type=List[str], default=[]) + + +class PresenceRouterTestModule: + def __init__(self, config: PresenceRouterTestConfig, module_api: ModuleApi): + self._config = config + self._module_api = module_api + + async def get_users_for_states( + self, state_updates: Iterable[UserPresenceState] + ) -> Dict[str, Set[UserPresenceState]]: + users_to_state = { + user_id: set(state_updates) + for user_id in self._config.users_who_should_receive_all_presence + } + return users_to_state + + async def get_interested_users( + self, user_id: str + ) -> Union[Set[str], PresenceRouter.ALL_USERS]: + if user_id in self._config.users_who_should_receive_all_presence: + return PresenceRouter.ALL_USERS + + return set() + + @staticmethod + def parse_config(config_dict: dict) -> PresenceRouterTestConfig: + """Parse a configuration dictionary from the homeserver config, do + some validation and return a typed PresenceRouterConfig. + + Args: + config_dict: The configuration dictionary. + + Returns: + A validated config object. + """ + # Initialise a typed config object + config = PresenceRouterTestConfig() + + config.users_who_should_receive_all_presence = config_dict.get( + "users_who_should_receive_all_presence" + ) + + return config + + +class PresenceRouterTestCase(FederatingHomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + presence.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + federation_transport_client=Mock(spec=["send_transaction"]), + ) + + def prepare(self, reactor, clock, homeserver): + self.sync_handler = self.hs.get_sync_handler() + self.module_api = homeserver.get_module_api() + + @override_config( + { + "presence": { + "presence_router": { + "module": __name__ + ".PresenceRouterTestModule", + "config": { + "users_who_should_receive_all_presence": [ + "@presence_gobbler:test", + ] + }, + } + }, + "send_federation": True, + } + ) + def test_receiving_all_presence(self): + """Test that a user that does not share a room with another other can receive + presence for them, due to presence routing. + """ + # Create a user who should receive all presence of others + self.presence_receiving_user_id = self.register_user( + "presence_gobbler", "monkey" + ) + self.presence_receiving_user_tok = self.login("presence_gobbler", "monkey") + + # And two users who should not have any special routing + self.other_user_one_id = self.register_user("other_user_one", "monkey") + self.other_user_one_tok = self.login("other_user_one", "monkey") + self.other_user_two_id = self.register_user("other_user_two", "monkey") + self.other_user_two_tok = self.login("other_user_two", "monkey") + + # Put the other two users in a room with each other + room_id = self.helper.create_room_as( + self.other_user_one_id, tok=self.other_user_one_tok + ) + + self.helper.invite( + room_id, + self.other_user_one_id, + self.other_user_two_id, + tok=self.other_user_one_tok, + ) + self.helper.join(room_id, self.other_user_two_id, tok=self.other_user_two_tok) + # User one sends some presence + send_presence_update( + self, + self.other_user_one_id, + self.other_user_one_tok, + "online", + "boop", + ) + + # Check that the presence receiving user gets user one's presence when syncing + presence_updates, sync_token = sync_presence( + self, self.presence_receiving_user_id + ) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.other_user_one_id) + self.assertEqual(presence_update.state, "online") + self.assertEqual(presence_update.status_msg, "boop") + + # Have all three users send presence + send_presence_update( + self, + self.other_user_one_id, + self.other_user_one_tok, + "online", + "user_one", + ) + send_presence_update( + self, + self.other_user_two_id, + self.other_user_two_tok, + "online", + "user_two", + ) + send_presence_update( + self, + self.presence_receiving_user_id, + self.presence_receiving_user_tok, + "online", + "presence_gobbler", + ) + + # Check that the presence receiving user gets everyone's presence + presence_updates, _ = sync_presence( + self, self.presence_receiving_user_id, sync_token + ) + self.assertEqual(len(presence_updates), 3) + + # But that User One only get itself and User Two's presence + presence_updates, _ = sync_presence(self, self.other_user_one_id) + self.assertEqual(len(presence_updates), 2) + + found = False + for update in presence_updates: + if update.user_id == self.other_user_two_id: + self.assertEqual(update.state, "online") + self.assertEqual(update.status_msg, "user_two") + found = True + + self.assertTrue(found) + + @override_config( + { + "presence": { + "presence_router": { + "module": __name__ + ".PresenceRouterTestModule", + "config": { + "users_who_should_receive_all_presence": [ + "@presence_gobbler1:test", + "@presence_gobbler2:test", + "@far_away_person:island", + ] + }, + } + }, + "send_federation": True, + } + ) + def test_send_local_online_presence_to_with_module(self): + """Tests that send_local_presence_to_users sends local online presence to a set + of specified local and remote users, with a custom PresenceRouter module enabled. + """ + # Create a user who will send presence updates + self.other_user_id = self.register_user("other_user", "monkey") + self.other_user_tok = self.login("other_user", "monkey") + + # And another two users that will also send out presence updates, as well as receive + # theirs and everyone else's + self.presence_receiving_user_one_id = self.register_user( + "presence_gobbler1", "monkey" + ) + self.presence_receiving_user_one_tok = self.login("presence_gobbler1", "monkey") + self.presence_receiving_user_two_id = self.register_user( + "presence_gobbler2", "monkey" + ) + self.presence_receiving_user_two_tok = self.login("presence_gobbler2", "monkey") + + # Have all three users send some presence updates + send_presence_update( + self, + self.other_user_id, + self.other_user_tok, + "online", + "I'm online!", + ) + send_presence_update( + self, + self.presence_receiving_user_one_id, + self.presence_receiving_user_one_tok, + "online", + "I'm also online!", + ) + send_presence_update( + self, + self.presence_receiving_user_two_id, + self.presence_receiving_user_two_tok, + "unavailable", + "I'm in a meeting!", + ) + + # Mark each presence-receiving user for receiving all user presence + self.get_success( + self.module_api.send_local_online_presence_to( + [ + self.presence_receiving_user_one_id, + self.presence_receiving_user_two_id, + ] + ) + ) + + # Perform a sync for each user + + # The other user should only receive their own presence + presence_updates, _ = sync_presence(self, self.other_user_id) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.other_user_id) + self.assertEqual(presence_update.state, "online") + self.assertEqual(presence_update.status_msg, "I'm online!") + + # Whereas both presence receiving users should receive everyone's presence updates + presence_updates, _ = sync_presence(self, self.presence_receiving_user_one_id) + self.assertEqual(len(presence_updates), 3) + presence_updates, _ = sync_presence(self, self.presence_receiving_user_two_id) + self.assertEqual(len(presence_updates), 3) + + # Test that sending to a remote user works + remote_user_id = "@far_away_person:island" + + # Note that due to the remote user being in our module's + # users_who_should_receive_all_presence config, they would have + # received user presence updates already. + # + # Thus we reset the mock, and try sending all online local user + # presence again + self.hs.get_federation_transport_client().send_transaction.reset_mock() + + # Broadcast local user online presence + self.get_success( + self.module_api.send_local_online_presence_to([remote_user_id]) + ) + + # Check that the expected presence updates were sent + expected_users = [ + self.other_user_id, + self.presence_receiving_user_one_id, + self.presence_receiving_user_two_id, + ] + + calls = ( + self.hs.get_federation_transport_client().send_transaction.call_args_list + ) + for call in calls: + federation_transaction = call.args[0] # type: Transaction + + # Get the sent EDUs in this transaction + edus = federation_transaction.get_dict()["edus"] + + for edu in edus: + # Make sure we're only checking presence-type EDUs + if edu["edu_type"] != EduTypes.Presence: + continue + + # EDUs can contain multiple presence updates + for presence_update in edu["content"]["push"]: + # Check for presence updates that contain the user IDs we're after + expected_users.remove(presence_update["user_id"]) + + # Ensure that no offline states are being sent out + self.assertNotEqual(presence_update["presence"], "offline") + + self.assertEqual(len(expected_users), 0) + + +def send_presence_update( + testcase: TestCase, + user_id: str, + access_token: str, + presence_state: str, + status_message: Optional[str] = None, +) -> JsonDict: + # Build the presence body + body = {"presence": presence_state} + if status_message: + body["status_msg"] = status_message + + # Update the user's presence state + channel = testcase.make_request( + "PUT", "/presence/%s/status" % (user_id,), body, access_token=access_token + ) + testcase.assertEqual(channel.code, 200) + + return channel.json_body + + +def sync_presence( + testcase: TestCase, + user_id: str, + since_token: Optional[StreamToken] = None, +) -> Tuple[List[UserPresenceState], StreamToken]: + """Perform a sync request for the given user and return the user presence updates + they've received, as well as the next_batch token. + + This method assumes testcase.sync_handler points to the homeserver's sync handler. + + Args: + testcase: The testcase that is currently being run. + user_id: The ID of the user to generate a sync response for. + since_token: An optional token to indicate from at what point to sync from. + + Returns: + A tuple containing a list of presence updates, and the sync response's + next_batch token. + """ + requester = create_requester(user_id) + sync_config = generate_sync_config(requester.user.to_string()) + sync_result = testcase.get_success( + testcase.sync_handler.wait_for_sync_for_user( + requester, sync_config, since_token + ) + ) + + return sync_result.presence, sync_result.next_batch diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index e62586142eb3..8e950f25c53c 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -37,7 +37,7 @@ def prepare(self, reactor, clock, hs): def test_wait_for_sync_for_user_auth_blocking(self): user_id1 = "@user1:test" user_id2 = "@user2:test" - sync_config = self._generate_sync_config(user_id1) + sync_config = generate_sync_config(user_id1) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time @@ -60,7 +60,7 @@ def test_wait_for_sync_for_user_auth_blocking(self): self.auth_blocking._hs_disabled = False - sync_config = self._generate_sync_config(user_id2) + sync_config = generate_sync_config(user_id2) requester = create_requester(user_id2) e = self.get_failure( @@ -69,11 +69,12 @@ def test_wait_for_sync_for_user_auth_blocking(self): ) self.assertEquals(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - def _generate_sync_config(self, user_id): - return SyncConfig( - user=UserID(user_id.split(":")[0][1:], user_id.split(":")[1]), - filter_collection=DEFAULT_FILTER_COLLECTION, - is_guest=False, - request_key="request_key", - device_id="device_id", - ) + +def generate_sync_config(user_id: str) -> SyncConfig: + return SyncConfig( + user=UserID(user_id.split(":")[0][1:], user_id.split(":")[1]), + filter_collection=DEFAULT_FILTER_COLLECTION, + is_guest=False, + request_key="request_key", + device_id="device_id", + ) diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index edacd1b566ba..1d1fceeecf44 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -14,25 +14,37 @@ # limitations under the License. from mock import Mock +from synapse.api.constants import EduTypes from synapse.events import EventBase +from synapse.federation.units import Transaction +from synapse.handlers.presence import UserPresenceState from synapse.rest import admin -from synapse.rest.client.v1 import login, room +from synapse.rest.client.v1 import login, presence, room from synapse.types import create_requester -from tests.unittest import HomeserverTestCase +from tests.events.test_presence_router import send_presence_update, sync_presence +from tests.test_utils.event_injection import inject_member_event +from tests.unittest import FederatingHomeserverTestCase, override_config -class ModuleApiTestCase(HomeserverTestCase): +class ModuleApiTestCase(FederatingHomeserverTestCase): servlets = [ admin.register_servlets, login.register_servlets, room.register_servlets, + presence.register_servlets, ] def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() self.module_api = homeserver.get_module_api() self.event_creation_handler = homeserver.get_event_creation_handler() + self.sync_handler = homeserver.get_sync_handler() + + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver( + federation_transport_client=Mock(spec=["send_transaction"]), + ) def test_can_register_user(self): """Tests that an external module can register a user""" @@ -205,3 +217,160 @@ def test_public_rooms(self): ) ) self.assertFalse(is_in_public_rooms) + + # The ability to send federation is required by send_local_online_presence_to. + @override_config({"send_federation": True}) + def test_send_local_online_presence_to(self): + """Tests that send_local_presence_to_users sends local online presence to local users.""" + # Create a user who will send presence updates + self.presence_receiver_id = self.register_user("presence_receiver", "monkey") + self.presence_receiver_tok = self.login("presence_receiver", "monkey") + + # And another user that will send presence updates out + self.presence_sender_id = self.register_user("presence_sender", "monkey") + self.presence_sender_tok = self.login("presence_sender", "monkey") + + # Put them in a room together so they will receive each other's presence updates + room_id = self.helper.create_room_as( + self.presence_receiver_id, + tok=self.presence_receiver_tok, + ) + self.helper.join(room_id, self.presence_sender_id, tok=self.presence_sender_tok) + + # Presence sender comes online + send_presence_update( + self, + self.presence_sender_id, + self.presence_sender_tok, + "online", + "I'm online!", + ) + + # Presence receiver should have received it + presence_updates, sync_token = sync_presence(self, self.presence_receiver_id) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.presence_sender_id) + self.assertEqual(presence_update.state, "online") + + # Syncing again should result in no presence updates + presence_updates, sync_token = sync_presence( + self, self.presence_receiver_id, sync_token + ) + self.assertEqual(len(presence_updates), 0) + + # Trigger sending local online presence + self.get_success( + self.module_api.send_local_online_presence_to( + [ + self.presence_receiver_id, + ] + ) + ) + + # Presence receiver should have received online presence again + presence_updates, sync_token = sync_presence( + self, self.presence_receiver_id, sync_token + ) + self.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + self.assertEqual(presence_update.user_id, self.presence_sender_id) + self.assertEqual(presence_update.state, "online") + + # Presence sender goes offline + send_presence_update( + self, + self.presence_sender_id, + self.presence_sender_tok, + "offline", + "I slink back into the darkness.", + ) + + # Trigger sending local online presence + self.get_success( + self.module_api.send_local_online_presence_to( + [ + self.presence_receiver_id, + ] + ) + ) + + # Presence receiver should *not* have received offline state + presence_updates, sync_token = sync_presence( + self, self.presence_receiver_id, sync_token + ) + self.assertEqual(len(presence_updates), 0) + + @override_config({"send_federation": True}) + def test_send_local_online_presence_to_federation(self): + """Tests that send_local_presence_to_users sends local online presence to remote users.""" + # Create a user who will send presence updates + self.presence_sender_id = self.register_user("presence_sender", "monkey") + self.presence_sender_tok = self.login("presence_sender", "monkey") + + # And a room they're a part of + room_id = self.helper.create_room_as( + self.presence_sender_id, + tok=self.presence_sender_tok, + ) + + # Mark them as online + send_presence_update( + self, + self.presence_sender_id, + self.presence_sender_tok, + "online", + "I'm online!", + ) + + # Make up a remote user to send presence to + remote_user_id = "@far_away_person:island" + + # Create a join membership event for the remote user into the room. + # This allows presence information to flow from one user to the other. + self.get_success( + inject_member_event( + self.hs, + room_id, + sender=remote_user_id, + target=remote_user_id, + membership="join", + ) + ) + + # The remote user would have received the existing room members' presence + # when they joined the room. + # + # Thus we reset the mock, and try sending online local user + # presence again + self.hs.get_federation_transport_client().send_transaction.reset_mock() + + # Broadcast local user online presence + self.get_success( + self.module_api.send_local_online_presence_to([remote_user_id]) + ) + + # Check that a presence update was sent as part of a federation transaction + found_update = False + calls = ( + self.hs.get_federation_transport_client().send_transaction.call_args_list + ) + for call in calls: + federation_transaction = call.args[0] # type: Transaction + + # Get the sent EDUs in this transaction + edus = federation_transaction.get_dict()["edus"] + + for edu in edus: + # Make sure we're only checking presence-type EDUs + if edu["edu_type"] != EduTypes.Presence: + continue + + # EDUs can contain multiple presence updates + for presence_update in edu["content"]["push"]: + if presence_update["user_id"] == self.presence_sender_id: + found_update = True + + self.assertTrue(found_update) From 0d87c6bd121d043d5474cc20e352794ff8cf5625 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 6 Apr 2021 16:32:04 +0100 Subject: [PATCH 003/222] Don't report anything from GaugeBucketCollector metrics until data is present (#8926) This PR modifies `GaugeBucketCollector` to only report data once it has been updated, rather than initially reporting a value of 0. Fixes zero values being reported for some metrics on startup until a background job to update the metric's value runs later. --- changelog.d/8926.bugfix | 1 + synapse/metrics/__init__.py | 16 +++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 changelog.d/8926.bugfix diff --git a/changelog.d/8926.bugfix b/changelog.d/8926.bugfix new file mode 100644 index 000000000000..aad7bd83ce36 --- /dev/null +++ b/changelog.d/8926.bugfix @@ -0,0 +1 @@ +Prevent `synapse_forward_extremities` and `synapse_excess_extremity_events` Prometheus metrics from initially reporting zero-values after startup. diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 3b499efc07d3..13a5bc4558cb 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -214,7 +214,12 @@ class GaugeBucketCollector: Prometheus, and optimise for that case. """ - __slots__ = ("_name", "_documentation", "_bucket_bounds", "_metric") + __slots__ = ( + "_name", + "_documentation", + "_bucket_bounds", + "_metric", + ) def __init__( self, @@ -242,11 +247,16 @@ def __init__( if self._bucket_bounds[-1] != float("inf"): self._bucket_bounds.append(float("inf")) - self._metric = self._values_to_metric([]) + # We initially set this to None. We won't report metrics until + # this has been initialised after a successful data update + self._metric = None # type: Optional[GaugeHistogramMetricFamily] + registry.register(self) def collect(self): - yield self._metric + # Don't report metrics unless we've already collected some data + if self._metric is not None: + yield self._metric def update_data(self, values: Iterable[float]): """Update the data to be reported by the metric From 48d44ab1425ffac721b7d407823c2315cda1929a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 8 Apr 2021 08:01:14 -0400 Subject: [PATCH 004/222] Record more information into structured logs. (#9654) Records additional request information into the structured logs, e.g. the requester, IP address, etc. --- changelog.d/9654.feature | 1 + synapse/http/site.py | 112 ++++++++++++++---- synapse/logging/context.py | 70 +++++++++-- synapse/metrics/background_process_metrics.py | 18 ++- synapse/replication/tcp/protocol.py | 5 +- tests/crypto/test_keyring.py | 23 ++-- tests/logging/test_terse_json.py | 70 ++++++++++- tests/unittest.py | 2 +- tests/util/caches/test_descriptors.py | 7 +- tests/util/test_logcontext.py | 35 ++---- 10 files changed, 255 insertions(+), 88 deletions(-) create mode 100644 changelog.d/9654.feature diff --git a/changelog.d/9654.feature b/changelog.d/9654.feature new file mode 100644 index 000000000000..a54c96cf19d1 --- /dev/null +++ b/changelog.d/9654.feature @@ -0,0 +1 @@ +Include request information in structured logging output. diff --git a/synapse/http/site.py b/synapse/http/site.py index 47754aff43ca..c0c873ce3205 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -14,7 +14,7 @@ import contextlib import logging import time -from typing import Optional, Type, Union +from typing import Optional, Tuple, Type, Union import attr from zope.interface import implementer @@ -26,7 +26,11 @@ from synapse.config.server import ListenerConfig from synapse.http import get_request_user_agent, redact_uri from synapse.http.request_metrics import RequestMetrics, requests_counter -from synapse.logging.context import LoggingContext, PreserveLoggingContext +from synapse.logging.context import ( + ContextRequest, + LoggingContext, + PreserveLoggingContext, +) from synapse.types import Requester logger = logging.getLogger(__name__) @@ -63,7 +67,7 @@ def __init__(self, channel, *args, **kw): # The requester, if authenticated. For federation requests this is the # server name, for client requests this is the Requester object. - self.requester = None # type: Optional[Union[Requester, str]] + self._requester = None # type: Optional[Union[Requester, str]] # we can't yet create the logcontext, as we don't know the method. self.logcontext = None # type: Optional[LoggingContext] @@ -93,6 +97,31 @@ def __repr__(self): self.site.site_tag, ) + @property + def requester(self) -> Optional[Union[Requester, str]]: + return self._requester + + @requester.setter + def requester(self, value: Union[Requester, str]) -> None: + # Store the requester, and update some properties based on it. + + # This should only be called once. + assert self._requester is None + + self._requester = value + + # A logging context should exist by now (and have a ContextRequest). + assert self.logcontext is not None + assert self.logcontext.request is not None + + ( + requester, + authenticated_entity, + ) = self.get_authenticated_entity() + self.logcontext.request.requester = requester + # If there's no authenticated entity, it was the requester. + self.logcontext.request.authenticated_entity = authenticated_entity or requester + def get_request_id(self): return "%s-%i" % (self.get_method(), self.request_seq) @@ -126,13 +155,60 @@ def get_method(self) -> str: return self.method.decode("ascii") return method + def get_authenticated_entity(self) -> Tuple[Optional[str], Optional[str]]: + """ + Get the "authenticated" entity of the request, which might be the user + performing the action, or a user being puppeted by a server admin. + + Returns: + A tuple: + The first item is a string representing the user making the request. + + The second item is a string or None representing the user who + authenticated when making this request. See + Requester.authenticated_entity. + """ + # Convert the requester into a string that we can log + if isinstance(self._requester, str): + return self._requester, None + elif isinstance(self._requester, Requester): + requester = self._requester.user.to_string() + authenticated_entity = self._requester.authenticated_entity + + # If this is a request where the target user doesn't match the user who + # authenticated (e.g. and admin is puppetting a user) then we return both. + if self._requester.user.to_string() != authenticated_entity: + return requester, authenticated_entity + + return requester, None + elif self._requester is not None: + # This shouldn't happen, but we log it so we don't lose information + # and can see that we're doing something wrong. + return repr(self._requester), None # type: ignore[unreachable] + + return None, None + def render(self, resrc): # this is called once a Resource has been found to serve the request; in our # case the Resource in question will normally be a JsonResource. # create a LogContext for this request request_id = self.get_request_id() - self.logcontext = LoggingContext(request_id, request=request_id) + self.logcontext = LoggingContext( + request_id, + request=ContextRequest( + request_id=request_id, + ip_address=self.getClientIP(), + site_tag=self.site.site_tag, + # The requester is going to be unknown at this point. + requester=None, + authenticated_entity=None, + method=self.get_method(), + url=self.get_redacted_uri(), + protocol=self.clientproto.decode("ascii", errors="replace"), + user_agent=get_request_user_agent(self), + ), + ) # override the Server header which is set by twisted self.setHeader("Server", self.site.server_version_string) @@ -277,25 +353,6 @@ def _finished_processing(self): # to the client (nb may be negative) response_send_time = self.finish_time - self._processing_finished_time - # Convert the requester into a string that we can log - authenticated_entity = None - if isinstance(self.requester, str): - authenticated_entity = self.requester - elif isinstance(self.requester, Requester): - authenticated_entity = self.requester.authenticated_entity - - # If this is a request where the target user doesn't match the user who - # authenticated (e.g. and admin is puppetting a user) then we log both. - if self.requester.user.to_string() != authenticated_entity: - authenticated_entity = "{},{}".format( - authenticated_entity, - self.requester.user.to_string(), - ) - elif self.requester is not None: - # This shouldn't happen, but we log it so we don't lose information - # and can see that we're doing something wrong. - authenticated_entity = repr(self.requester) # type: ignore[unreachable] - user_agent = get_request_user_agent(self, "-") code = str(self.code) @@ -305,6 +362,13 @@ def _finished_processing(self): code += "!" log_level = logging.INFO if self._should_log_request() else logging.DEBUG + + # If this is a request where the target user doesn't match the user who + # authenticated (e.g. and admin is puppetting a user) then we log both. + requester, authenticated_entity = self.get_authenticated_entity() + if authenticated_entity: + requester = "{}.{}".format(authenticated_entity, requester) + self.site.access_logger.log( log_level, "%s - %s - {%s}" @@ -312,7 +376,7 @@ def _finished_processing(self): ' %sB %s "%s %s %s" "%s" [%d dbevts]', self.getClientIP(), self.site.site_tag, - authenticated_entity, + requester, processing_time, response_send_time, usage.ru_utime, diff --git a/synapse/logging/context.py b/synapse/logging/context.py index 03cf3c2b8ed3..e78343f5545b 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -22,7 +22,6 @@ See doc/log_contexts.rst for details on how this works. """ - import inspect import logging import threading @@ -30,6 +29,7 @@ import warnings from typing import TYPE_CHECKING, Optional, Tuple, TypeVar, Union +import attr from typing_extensions import Literal from twisted.internet import defer, threads @@ -181,6 +181,29 @@ def __sub__(self, other: "ContextResourceUsage") -> "ContextResourceUsage": return res +@attr.s(slots=True) +class ContextRequest: + """ + A bundle of attributes from the SynapseRequest object. + + This exists to: + + * Avoid a cycle between LoggingContext and SynapseRequest. + * Be a single variable that can be passed from parent LoggingContexts to + their children. + """ + + request_id = attr.ib(type=str) + ip_address = attr.ib(type=str) + site_tag = attr.ib(type=str) + requester = attr.ib(type=Optional[str]) + authenticated_entity = attr.ib(type=Optional[str]) + method = attr.ib(type=str) + url = attr.ib(type=str) + protocol = attr.ib(type=str) + user_agent = attr.ib(type=str) + + LoggingContextOrSentinel = Union["LoggingContext", "_Sentinel"] @@ -256,7 +279,7 @@ def __init__( self, name: Optional[str] = None, parent_context: "Optional[LoggingContext]" = None, - request: Optional[str] = None, + request: Optional[ContextRequest] = None, ) -> None: self.previous_context = current_context() self.name = name @@ -281,7 +304,11 @@ def __init__( self.parent_context = parent_context if self.parent_context is not None: - self.parent_context.copy_to(self) + # we track the current request_id + self.request = self.parent_context.request + + # we also track the current scope: + self.scope = self.parent_context.scope if request is not None: # the request param overrides the request from the parent context @@ -289,7 +316,7 @@ def __init__( def __str__(self) -> str: if self.request: - return str(self.request) + return self.request.request_id return "%s@%x" % (self.name, id(self)) @classmethod @@ -556,8 +583,23 @@ def filter(self, record: logging.LogRecord) -> Literal[True]: # we end up in a death spiral of infinite loops, so let's check, for # robustness' sake. if context is not None: - # Logging is interested in the request. - record.request = context.request # type: ignore + # Logging is interested in the request ID. Note that for backwards + # compatibility this is stored as the "request" on the record. + record.request = str(context) # type: ignore + + # Add some data from the HTTP request. + request = context.request + if request is None: + return True + + record.ip_address = request.ip_address # type: ignore + record.site_tag = request.site_tag # type: ignore + record.requester = request.requester # type: ignore + record.authenticated_entity = request.authenticated_entity # type: ignore + record.method = request.method # type: ignore + record.url = request.url # type: ignore + record.protocol = request.protocol # type: ignore + record.user_agent = request.user_agent # type: ignore return True @@ -630,8 +672,8 @@ def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSe def nested_logging_context(suffix: str) -> LoggingContext: """Creates a new logging context as a child of another. - The nested logging context will have a 'request' made up of the parent context's - request, plus the given suffix. + The nested logging context will have a 'name' made up of the parent context's + name, plus the given suffix. CPU/db usage stats will be added to the parent context's on exit. @@ -641,7 +683,7 @@ def nested_logging_context(suffix: str) -> LoggingContext: # ... do stuff Args: - suffix: suffix to add to the parent context's 'request'. + suffix: suffix to add to the parent context's 'name'. Returns: LoggingContext: new logging context. @@ -653,11 +695,17 @@ def nested_logging_context(suffix: str) -> LoggingContext: ) parent_context = None prefix = "" + request = None else: assert isinstance(curr_context, LoggingContext) parent_context = curr_context - prefix = str(parent_context.request) - return LoggingContext(parent_context=parent_context, request=prefix + "-" + suffix) + prefix = str(parent_context.name) + request = parent_context.request + return LoggingContext( + prefix + "-" + suffix, + parent_context=parent_context, + request=request, + ) def preserve_fn(f): diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index b56986d8e753..e8a9096c039e 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -16,7 +16,7 @@ import logging import threading from functools import wraps -from typing import TYPE_CHECKING, Dict, Optional, Set +from typing import TYPE_CHECKING, Dict, Optional, Set, Union from prometheus_client.core import REGISTRY, Counter, Gauge @@ -199,11 +199,11 @@ async def run(): _background_process_start_count.labels(desc).inc() _background_process_in_flight_count.labels(desc).inc() - with BackgroundProcessLoggingContext(desc, "%s-%i" % (desc, count)) as context: + with BackgroundProcessLoggingContext(desc, count) as context: try: ctx = noop_context_manager() if bg_start_span: - ctx = start_active_span(desc, tags={"request_id": context.request}) + ctx = start_active_span(desc, tags={"request_id": str(context)}) with ctx: return await maybe_awaitable(func(*args, **kwargs)) except Exception: @@ -242,13 +242,19 @@ class BackgroundProcessLoggingContext(LoggingContext): processes. """ - __slots__ = ["_proc"] + __slots__ = ["_id", "_proc"] - def __init__(self, name: str, request: Optional[str] = None): - super().__init__(name, request=request) + def __init__(self, name: str, id: Optional[Union[int, str]] = None): + super().__init__(name) + self._id = id self._proc = _BackgroundProcess(name, self) + def __str__(self) -> str: + if self._id is not None: + return "%s-%s" % (self.name, self._id) + return "%s@%x" % (self.name, id(self)) + def start(self, rusage: "Optional[resource._RUsage]"): """Log context has started running (again).""" diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index e829add2578d..d10d57424618 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -184,8 +184,9 @@ def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. - ctx_name = "replication-conn-%s" % self.conn_id - self._logging_context = BackgroundProcessLoggingContext(ctx_name, ctx_name) + self._logging_context = BackgroundProcessLoggingContext( + "replication-conn", self.conn_id + ) def connectionMade(self): logger.info("[%s] Connection established", self.id()) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 30fcc4c1bfcc..946482b7e706 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -16,6 +16,7 @@ from mock import Mock +import attr import canonicaljson import signedjson.key import signedjson.sign @@ -68,6 +69,11 @@ def sign_response(self, res): signedjson.sign.sign_json(res, self.server_name, self.key) +@attr.s(slots=True) +class FakeRequest: + id = attr.ib() + + @logcontext_clean class KeyringTestCase(unittest.HomeserverTestCase): def check_context(self, val, expected): @@ -89,7 +95,7 @@ def test_verify_json_objects_for_server_awaits_previous_requests(self): first_lookup_deferred = Deferred() async def first_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request, "context_11") + self.assertEquals(current_context().request.id, "context_11") self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}}) await make_deferred_yieldable(first_lookup_deferred) @@ -102,9 +108,7 @@ async def first_lookup_fetch(keys_to_fetch): mock_fetcher.get_keys.side_effect = first_lookup_fetch async def first_lookup(): - with LoggingContext("context_11") as context_11: - context_11.request = "context_11" - + with LoggingContext("context_11", request=FakeRequest("context_11")): res_deferreds = kr.verify_json_objects_for_server( [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] ) @@ -130,7 +134,7 @@ async def first_lookup(): # should block rather than start a second call async def second_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request, "context_12") + self.assertEquals(current_context().request.id, "context_12") return { "server10": { get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) @@ -142,9 +146,7 @@ async def second_lookup_fetch(keys_to_fetch): second_lookup_state = [0] async def second_lookup(): - with LoggingContext("context_12") as context_12: - context_12.request = "context_12" - + with LoggingContext("context_12", request=FakeRequest("context_12")): res_deferreds_2 = kr.verify_json_objects_for_server( [("server10", json1, 0, "test")] ) @@ -589,10 +591,7 @@ def get_key_id(key): @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): - with LoggingContext("testctx") as ctx: - # we set the "request" prop to make it easier to follow what's going on in the - # logs. - ctx.request = "testctx" + with LoggingContext("testctx"): rv = yield f(*args, **kwargs) return rv diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 48a74e2eee75..bfe0d11c9364 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -12,15 +12,20 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import json import logging -from io import StringIO +from io import BytesIO, StringIO + +from mock import Mock, patch + +from twisted.web.server import Request +from synapse.http.site import SynapseRequest from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter from synapse.logging.context import LoggingContext, LoggingContextFilter from tests.logging import LoggerCleanupMixin +from tests.server import FakeChannel from tests.unittest import TestCase @@ -120,7 +125,7 @@ def test_with_context(self): handler.addFilter(LoggingContextFilter()) logger = self.get_logger(handler) - with LoggingContext(request="test"): + with LoggingContext("name"): logger.info("Hello there, %s!", "wally") log = self.get_log_line() @@ -134,4 +139,61 @@ def test_with_context(self): ] self.assertCountEqual(log.keys(), expected_log_keys) self.assertEqual(log["log"], "Hello there, wally!") - self.assertEqual(log["request"], "test") + self.assertTrue(log["request"].startswith("name@")) + + def test_with_request_context(self): + """ + Information from the logging context request should be added to the JSON response. + """ + handler = logging.StreamHandler(self.output) + handler.setFormatter(JsonFormatter()) + handler.addFilter(LoggingContextFilter()) + logger = self.get_logger(handler) + + # A full request isn't needed here. + site = Mock(spec=["site_tag", "server_version_string", "getResourceFor"]) + site.site_tag = "test-site" + site.server_version_string = "Server v1" + request = SynapseRequest(FakeChannel(site, None)) + # Call requestReceived to finish instantiating the object. + request.content = BytesIO() + # Partially skip some of the internal processing of SynapseRequest. + request._started_processing = Mock() + request.request_metrics = Mock(spec=["name"]) + with patch.object(Request, "render"): + request.requestReceived(b"POST", b"/_matrix/client/versions", b"1.1") + + # Also set the requester to ensure the processing works. + request.requester = "@foo:test" + + with LoggingContext(parent_context=request.logcontext): + logger.info("Hello there, %s!", "wally") + + log = self.get_log_line() + + # The terse logger includes additional request information, if possible. + expected_log_keys = [ + "log", + "level", + "namespace", + "request", + "ip_address", + "site_tag", + "requester", + "authenticated_entity", + "method", + "url", + "protocol", + "user_agent", + ] + self.assertCountEqual(log.keys(), expected_log_keys) + self.assertEqual(log["log"], "Hello there, wally!") + self.assertTrue(log["request"].startswith("POST-")) + self.assertEqual(log["ip_address"], "127.0.0.1") + self.assertEqual(log["site_tag"], "test-site") + self.assertEqual(log["requester"], "@foo:test") + self.assertEqual(log["authenticated_entity"], "@foo:test") + self.assertEqual(log["method"], "POST") + self.assertEqual(log["url"], "/_matrix/client/versions") + self.assertEqual(log["protocol"], "1.1") + self.assertEqual(log["user_agent"], "") diff --git a/tests/unittest.py b/tests/unittest.py index 58a4daa1ec86..57b6a395c791 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -471,7 +471,7 @@ def setup_test_homeserver(self, *args, **kwargs): kwargs["config"] = config_obj async def run_bg_updates(): - with LoggingContext("run_bg_updates", request="run_bg_updates-1"): + with LoggingContext("run_bg_updates"): while not await stor.db_pool.updates.has_completed_background_updates(): await stor.db_pool.updates.do_next_background_update(1) diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index afb11b9caf2d..e434e21aeee6 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -661,14 +661,13 @@ def fn(self, arg1, arg2): @descriptors.cachedList("fn", "args1") async def list_fn(self, args1, arg2): - assert current_context().request == "c1" + assert current_context().name == "c1" # we want this to behave like an asynchronous function await run_on_reactor() - assert current_context().request == "c1" + assert current_context().name == "c1" return self.mock(args1, arg2) - with LoggingContext() as c1: - c1.request = "c1" + with LoggingContext("c1") as c1: obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index 58ee918f6533..5d9c4665aa58 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -17,11 +17,10 @@ class LoggingContextTestCase(unittest.TestCase): def _check_test_key(self, value): - self.assertEquals(current_context().request, value) + self.assertEquals(current_context().name, value) def test_with_context(self): - with LoggingContext() as context_one: - context_one.request = "test" + with LoggingContext("test"): self._check_test_key("test") @defer.inlineCallbacks @@ -30,15 +29,13 @@ def test_sleep(self): @defer.inlineCallbacks def competing_callback(): - with LoggingContext() as competing_context: - competing_context.request = "competing" + with LoggingContext("competing"): yield clock.sleep(0) self._check_test_key("competing") reactor.callLater(0, competing_callback) - with LoggingContext() as context_one: - context_one.request = "one" + with LoggingContext("one"): yield clock.sleep(0) self._check_test_key("one") @@ -47,9 +44,7 @@ def _test_run_in_background(self, function): callback_completed = [False] - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): # fire off function, but don't wait on it. d2 = run_in_background(function) @@ -133,9 +128,7 @@ def blocking_function(): sentinel_context = current_context() - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) @@ -149,9 +142,7 @@ def blocking_function(): def test_make_deferred_yieldable_with_chained_deferreds(self): sentinel_context = current_context() - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) @@ -166,9 +157,7 @@ def test_make_deferred_yieldable_on_non_deferred(self): """Check that make_deferred_yieldable does the right thing when its argument isn't actually a deferred""" - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable("bum") self._check_test_key("one") @@ -177,9 +166,9 @@ def test_make_deferred_yieldable_on_non_deferred(self): self._check_test_key("one") def test_nested_logging_context(self): - with LoggingContext(request="foo"): + with LoggingContext("foo"): nested_context = nested_logging_context(suffix="bar") - self.assertEqual(nested_context.request, "foo-bar") + self.assertEqual(nested_context.name, "foo-bar") @defer.inlineCallbacks def test_make_deferred_yieldable_with_await(self): @@ -193,9 +182,7 @@ async def blocking_function(): sentinel_context = current_context() - with LoggingContext() as context_one: - context_one.request = "one" - + with LoggingContext("one"): d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) From 452991527a3e76715f0571f81f036ea20de0be36 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 8 Apr 2021 08:28:32 -0400 Subject: [PATCH 005/222] MSC3083: Check for space membership during a local join of restricted rooms. (#9735) When joining a room with join rules set to 'restricted', check if the user is a member of the spaces defined in the 'allow' key of the join rules. This only applies to an experimental room version, as defined in MSC3083. --- changelog.d/9735.feature | 1 + scripts-dev/complement.sh | 2 +- synapse/handlers/room_member.py | 75 ++++++++++++++++++++++++++++++++- 3 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9735.feature diff --git a/changelog.d/9735.feature b/changelog.d/9735.feature new file mode 100644 index 000000000000..c2c74f13d578 --- /dev/null +++ b/changelog.d/9735.feature @@ -0,0 +1 @@ +Add experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 31cc20a826b9..b77187472ffe 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -46,4 +46,4 @@ if [[ -n "$1" ]]; then fi # Run the tests! -COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist -count=1 $EXTRA_COMPLEMENT_ARGS ./tests +COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 1cf12f3255bc..894ef859f4d4 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -20,7 +20,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple from synapse import types -from synapse.api.constants import AccountDataTypes, EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership from synapse.api.errors import ( AuthError, Codes, @@ -29,6 +29,7 @@ SynapseError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID @@ -178,6 +179,62 @@ async def ratelimit_invite( await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id) + async def _can_join_without_invite( + self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str + ) -> bool: + """ + Check whether a user can join a room without an invite. + + When joining a room with restricted joined rules (as defined in MSC3083), + the membership of spaces must be checked during join. + + Args: + state_ids: The state of the room as it currently is. + room_version: The room version of the room being joined. + user_id: The user joining the room. + + Returns: + True if the user can join the room, false otherwise. + """ + # This only applies to room versions which support the new join rule. + if not room_version.msc3083_join_rules: + return True + + # If there's no join rule, then it defaults to public (so this doesn't apply). + join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + if not join_rules_event_id: + return True + + # If the join rule is not restricted, this doesn't apply. + join_rules_event = await self.store.get_event(join_rules_event_id) + if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: + return True + + # If allowed is of the wrong form, then only allow invited users. + allowed_spaces = join_rules_event.content.get("allow", []) + if not isinstance(allowed_spaces, list): + return False + + # Get the list of joined rooms and see if there's an overlap. + joined_rooms = await self.store.get_rooms_for_user(user_id) + + # Pull out the other room IDs, invalid data gets filtered. + for space in allowed_spaces: + if not isinstance(space, dict): + continue + + space_id = space.get("space") + if not isinstance(space_id, str): + continue + + # The user was joined to one of the spaces specified, they can join + # this room! + if space_id in joined_rooms: + return True + + # The user was not in any of the required spaces. + return False + async def _local_membership_update( self, requester: Requester, @@ -235,9 +292,25 @@ async def _local_membership_update( if event.membership == Membership.JOIN: newly_joined = True + user_is_invited = False if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) newly_joined = prev_member_event.membership != Membership.JOIN + user_is_invited = prev_member_event.membership == Membership.INVITE + + # If the member is not already in the room and is not accepting an invite, + # check if they should be allowed access via membership in a space. + if ( + newly_joined + and not user_is_invited + and not await self._can_join_without_invite( + prev_state_ids, event.room_version, user_id + ) + ): + raise AuthError( + 403, + "You do not belong to any of the required spaces to join this room.", + ) # Only rate-limit if the user actually joined the room, otherwise we'll end # up blocking profile updates. From cb657eb2f85b29fc8e6ea7b692677ec63d76472b Mon Sep 17 00:00:00 2001 From: Johannes Wienke Date: Thu, 8 Apr 2021 14:49:14 +0200 Subject: [PATCH 006/222] Put opencontainers labels to the final image (#9765) They don't make any sense on the intermediate builder image. The final images needs them to be of use for anyone. Signed-off-by: Johannes Wienke --- changelog.d/9765.docker | 1 + docker/Dockerfile | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9765.docker diff --git a/changelog.d/9765.docker b/changelog.d/9765.docker new file mode 100644 index 000000000000..f170a3671447 --- /dev/null +++ b/changelog.d/9765.docker @@ -0,0 +1 @@ +Move opencontainers labels to the final Docker image such that users can inspect them. diff --git a/docker/Dockerfile b/docker/Dockerfile index 5b7bf02776d6..4f5cd06d7294 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,11 +18,6 @@ ARG PYTHON_VERSION=3.8 ### FROM docker.io/python:${PYTHON_VERSION}-slim as builder -LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' -LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' -LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git' -LABEL org.opencontainers.image.licenses='Apache-2.0' - # install the OS build deps RUN apt-get update && apt-get install -y \ build-essential \ @@ -66,6 +61,11 @@ RUN pip install --prefix="/install" --no-deps --no-warn-script-location /synapse FROM docker.io/python:${PYTHON_VERSION}-slim +LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' +LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' +LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git' +LABEL org.opencontainers.image.licenses='Apache-2.0' + RUN apt-get update && apt-get install -y \ curl \ gosu \ From 5edd91caecbdb7b8fefaf016cb452eefe3f56772 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 16:21:32 +0100 Subject: [PATCH 007/222] Fix incompatibility with tox 2.5 Apparently on tox 2.5, `usedevelop` overrides `skip_install`, so we end up trying to install the full dependencies even for the `-old` environment. --- changelog.d/9769.misc | 1 + tox.ini | 18 ++++++++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) create mode 100644 changelog.d/9769.misc diff --git a/changelog.d/9769.misc b/changelog.d/9769.misc new file mode 100644 index 000000000000..042a50615f06 --- /dev/null +++ b/changelog.d/9769.misc @@ -0,0 +1 @@ +Fix incompatibility with `tox` 2.5. diff --git a/tox.ini b/tox.ini index 9ff70fe312c9..8224edaef94e 100644 --- a/tox.ini +++ b/tox.ini @@ -104,7 +104,8 @@ usedevelop=true # A test suite for the oldest supported versions of Python libraries, to catch # any uses of APIs not available in them. [testenv:py35-old] -skip_install=True +skip_install = true +usedevelop = false deps = # Old automat version for Twisted Automat == 0.3.0 @@ -136,7 +137,8 @@ commands = python -m synmark {posargs:} [testenv:packaging] -skip_install=True +skip_install = true +usedevelop = false deps = check-manifest commands = @@ -154,7 +156,8 @@ extras = lint commands = isort -c --df --sp setup.cfg {[base]lint_targets} [testenv:check-newsfragment] -skip_install = True +skip_install = true +usedevelop = false deps = towncrier>=18.6.0rc1 commands = python -m towncrier.check --compare-with=origin/develop @@ -163,7 +166,8 @@ commands = commands = {toxinidir}/scripts-dev/generate_sample_config --check [testenv:combine] -skip_install = True +skip_install = true +usedevelop = false deps = coverage pip>=10 ; python_version >= '3.6' @@ -173,14 +177,16 @@ commands= coverage report [testenv:cov-erase] -skip_install = True +skip_install = true +usedevelop = false deps = coverage commands= coverage erase [testenv:cov-html] -skip_install = True +skip_install = true +usedevelop = false deps = coverage commands= From 906065c75b629e0d2962359e3f5f531b1ba4fbc1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 16:41:35 +0100 Subject: [PATCH 008/222] unpin olddeps build from py36 --- .buildkite/scripts/test_old_deps.sh | 6 +++--- tox.ini | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh index 9fe5b696b0f7..3753f41a4033 100755 --- a/.buildkite/scripts/test_old_deps.sh +++ b/.buildkite/scripts/test_old_deps.sh @@ -1,16 +1,16 @@ #!/usr/bin/env bash # this script is run by buildkite in a plain `xenial` container; it installs the -# minimal requirements for tox and hands over to the py35-old tox environment. +# minimal requirements for tox and hands over to the py3-old tox environment. set -ex apt-get update -apt-get install -y python3.5 python3.5-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox +apt-get install -y python3 python3-dev python3-pip libxml2-dev libxslt-dev xmlsec1 zlib1g-dev tox export LANG="C.UTF-8" # Prevent virtualenv from auto-updating pip to an incompatible version export VIRTUALENV_NO_DOWNLOAD=1 -exec tox -e py35-old,combine +exec tox -e py3-old,combine diff --git a/tox.ini b/tox.ini index 8224edaef94e..b2bc6f23ef2e 100644 --- a/tox.ini +++ b/tox.ini @@ -74,7 +74,7 @@ commands = # we use "env" rather than putting a value in `setenv` so that it is not # inherited by other tox environments. # - # keep this in sync with the copy in `testenv:py35-old`. + # keep this in sync with the copy in `testenv:py3-old`. # /usr/bin/env COVERAGE_PROCESS_START={toxinidir}/.coveragerc "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} @@ -103,7 +103,7 @@ usedevelop=true # A test suite for the oldest supported versions of Python libraries, to catch # any uses of APIs not available in them. -[testenv:py35-old] +[testenv:py3-old] skip_install = true usedevelop = false deps = From abade34633da2002014a1dd7e4da3f558e610582 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 12:00:47 +0100 Subject: [PATCH 009/222] Require py36 and Postgres 9.6 --- changelog.d/9766.feature | 1 + setup.py | 2 +- synapse/storage/engines/postgres.py | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9766.feature diff --git a/changelog.d/9766.feature b/changelog.d/9766.feature new file mode 100644 index 000000000000..030bdb456199 --- /dev/null +++ b/changelog.d/9766.feature @@ -0,0 +1 @@ +Synapse now requires Python 3.6 or later and Postgres 9.6 or later. diff --git a/setup.py b/setup.py index 29e9971dc1d8..4e9e333c6089 100755 --- a/setup.py +++ b/setup.py @@ -123,7 +123,7 @@ def exec_file(path_segments): zip_safe=False, long_description=long_description, long_description_content_type="text/x-rst", - python_requires="~=3.5", + python_requires="~=3.6", classifiers=[ "Development Status :: 5 - Production/Stable", "Topic :: Communications :: Chat", diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index 80a3558aec3e..d95f88b3e98b 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -47,8 +47,8 @@ def check_database(self, db_conn, allow_outdated_version: bool = False): self._version = db_conn.server_version # Are we on a supported PostgreSQL version? - if not allow_outdated_version and self._version < 90500: - raise RuntimeError("Synapse requires PostgreSQL 9.5+ or above.") + if not allow_outdated_version and self._version < 90600: + raise RuntimeError("Synapse requires PostgreSQL 9.6 or above.") with db_conn.cursor() as txn: txn.execute("SHOW SERVER_ENCODING") From 3ada9b42640137d908fa2db64e78f3f79d11dd3a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 13:45:19 +0100 Subject: [PATCH 010/222] Drop support for sqlite<3.22 as well --- changelog.d/9766.feature | 2 +- synapse/storage/database.py | 62 ++++------------------------- synapse/storage/engines/_base.py | 8 ---- synapse/storage/engines/postgres.py | 7 ---- synapse/storage/engines/sqlite.py | 15 +++---- tests/storage/test_database.py | 12 +----- 6 files changed, 14 insertions(+), 92 deletions(-) diff --git a/changelog.d/9766.feature b/changelog.d/9766.feature index 030bdb456199..ecf49cfee1a3 100644 --- a/changelog.d/9766.feature +++ b/changelog.d/9766.feature @@ -1 +1 @@ -Synapse now requires Python 3.6 or later and Postgres 9.6 or later. +Synapse now requires Python 3.6 or later. It also requires Postgres 9.6 or later or SQLite 3.22 or later. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 94590e7b458d..a2f016a7ef4d 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -2060,68 +2060,20 @@ def make_in_list_sql_clause( def make_tuple_comparison_clause( - database_engine: BaseDatabaseEngine, keys: List[Tuple[str, KV]] + _database_engine: BaseDatabaseEngine, keys: List[Tuple[str, KV]] ) -> Tuple[str, List[KV]]: """Returns a tuple comparison SQL clause - Depending what the SQL engine supports, builds a SQL clause that looks like either - "(a, b) > (?, ?)", or "(a > ?) OR (a == ? AND b > ?)". + Builds a SQL clause that looks like "(a, b) > (?, ?)" Args: - database_engine + _database_engine keys: A set of (column, value) pairs to be compared. Returns: A tuple of SQL query and the args """ - if database_engine.supports_tuple_comparison: - return ( - "(%s) > (%s)" % (",".join(k[0] for k in keys), ",".join("?" for _ in keys)), - [k[1] for k in keys], - ) - - # we want to build a clause - # (a > ?) OR - # (a == ? AND b > ?) OR - # (a == ? AND b == ? AND c > ?) - # ... - # (a == ? AND b == ? AND ... AND z > ?) - # - # or, equivalently: - # - # (a > ? OR (a == ? AND - # (b > ? OR (b == ? AND - # ... - # (y > ? OR (y == ? AND - # z > ? - # )) - # ... - # )) - # )) - # - # which itself is equivalent to (and apparently easier for the query optimiser): - # - # (a >= ? AND (a > ? OR - # (b >= ? AND (b > ? OR - # ... - # (y >= ? AND (y > ? OR - # z > ? - # )) - # ... - # )) - # )) - # - # - - clause = "" - args = [] # type: List[KV] - for k, v in keys[:-1]: - clause = clause + "(%s >= ? AND (%s > ? OR " % (k, k) - args.extend([v, v]) - - (k, v) = keys[-1] - clause += "%s > ?" % (k,) - args.append(v) - - clause += "))" * (len(keys) - 1) - return clause, args + return ( + "(%s) > (%s)" % (",".join(k[0] for k in keys), ",".join("?" for _ in keys)), + [k[1] for k in keys], + ) diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index cca839c70f59..21db1645d3c5 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -42,14 +42,6 @@ def can_native_upsert(self) -> bool: """ ... - @property - @abc.abstractmethod - def supports_tuple_comparison(self) -> bool: - """ - Do we support comparing tuples, i.e. `(a, b) > (c, d)`? - """ - ... - @property @abc.abstractmethod def supports_using_any_list(self) -> bool: diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index d95f88b3e98b..dba8cc51d357 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -129,13 +129,6 @@ def can_native_upsert(self): """ return True - @property - def supports_tuple_comparison(self): - """ - Do we support comparing tuples, i.e. `(a, b) > (c, d)`? - """ - return True - @property def supports_using_any_list(self): """Do we support using `a = ANY(?)` and passing a list""" diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index b87e7798daab..f4f16456f2ab 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -56,14 +56,6 @@ def can_native_upsert(self): """ return self.module.sqlite_version_info >= (3, 24, 0) - @property - def supports_tuple_comparison(self): - """ - Do we support comparing tuples, i.e. `(a, b) > (c, d)`? This requires - SQLite 3.15+. - """ - return self.module.sqlite_version_info >= (3, 15, 0) - @property def supports_using_any_list(self): """Do we support using `a = ANY(?)` and passing a list""" @@ -72,8 +64,11 @@ def supports_using_any_list(self): def check_database(self, db_conn, allow_outdated_version: bool = False): if not allow_outdated_version: version = self.module.sqlite_version_info - if version < (3, 11, 0): - raise RuntimeError("Synapse requires sqlite 3.11 or above.") + # Synapse is untested against older SQLite versions, and we don't want + # to let users upgrade to a version of Synapse with broken support for their + # sqlite version, because it risks leaving them with a half-upgraded db. + if version < (3, 22, 0): + raise RuntimeError("Synapse requires sqlite 3.22 or above.") def check_new_database(self, txn): """Gets called when setting up a brand new database. This allows us to diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 5a77c84962d9..1bba58fc03a9 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -36,17 +36,7 @@ def _stub_db_engine(**kwargs) -> BaseDatabaseEngine: class TupleComparisonClauseTestCase(unittest.TestCase): def test_native_tuple_comparison(self): - db_engine = _stub_db_engine(supports_tuple_comparison=True) + db_engine = _stub_db_engine() clause, args = make_tuple_comparison_clause(db_engine, [("a", 1), ("b", 2)]) self.assertEqual(clause, "(a,b) > (?,?)") self.assertEqual(args, [1, 2]) - - def test_emulated_tuple_comparison(self): - db_engine = _stub_db_engine(supports_tuple_comparison=False) - clause, args = make_tuple_comparison_clause( - db_engine, [("a", 1), ("b", 2), ("c", 3)] - ) - self.assertEqual( - clause, "(a >= ? AND (a > ? OR (b >= ? AND (b > ? OR c > ?))))" - ) - self.assertEqual(args, [1, 1, 2, 2, 3]) From 9278eb701e7f5358f80e416f3330e6bc380d100f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 12:03:12 +0100 Subject: [PATCH 011/222] drop support for stretch and xenial --- scripts-dev/build_debian_packages | 2 -- 1 file changed, 2 deletions(-) diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index d0685c8b35fd..bddc441df276 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -18,11 +18,9 @@ import threading from concurrent.futures import ThreadPoolExecutor DISTS = ( - "debian:stretch", "debian:buster", "debian:bullseye", "debian:sid", - "ubuntu:xenial", "ubuntu:bionic", "ubuntu:focal", "ubuntu:groovy", From 04ff88139a70bcc852480229672626de7e620b9f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 12:09:48 +0100 Subject: [PATCH 012/222] Update tox.ini to remove py35 --- tox.ini | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/tox.ini b/tox.ini index b2bc6f23ef2e..998b04b22442 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,8 @@ [tox] -envlist = packaging, py35, py36, py37, py38, py39, check_codestyle, check_isort +envlist = packaging, py36, py37, py38, py39, check_codestyle, check_isort + +# we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208 +minversion = 2.3.2 [base] deps = @@ -48,6 +51,7 @@ deps = extras = # install the optional dependendencies for tox environments without # '-noextras' in their name + # (this requires tox 3) !noextras: all test @@ -74,8 +78,6 @@ commands = # we use "env" rather than putting a value in `setenv` so that it is not # inherited by other tox environments. # - # keep this in sync with the copy in `testenv:py3-old`. - # /usr/bin/env COVERAGE_PROCESS_START={toxinidir}/.coveragerc "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} # As of twisted 16.4, trial tries to import the tests as a package (previously @@ -121,11 +123,7 @@ commands = # Install Synapse itself. This won't update any libraries. pip install -e ".[test]" - # we have to duplicate the command from `testenv` rather than refer to it - # as `{[testenv]commands}`, because we run on ubuntu xenial, which has - # tox 2.3.1, and https://github.com/tox-dev/tox/issues/208. - # - /usr/bin/env COVERAGE_PROCESS_START={toxinidir}/.coveragerc "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} + {[testenv]commands} [testenv:benchmark] deps = From 77e56deffcf685ca5d0a264059f0c326e53e99af Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 15:26:49 +0100 Subject: [PATCH 013/222] update test_old_deps script --- .buildkite/scripts/test_old_deps.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/scripts/test_old_deps.sh b/.buildkite/scripts/test_old_deps.sh index 3753f41a4033..9270d55f0461 100755 --- a/.buildkite/scripts/test_old_deps.sh +++ b/.buildkite/scripts/test_old_deps.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# this script is run by buildkite in a plain `xenial` container; it installs the +# this script is run by buildkite in a plain `bionic` container; it installs the # minimal requirements for tox and hands over to the py3-old tox environment. set -ex From 3a569fb2000e972efe2e145d57ffd9441ee41665 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 8 Apr 2021 17:30:01 +0100 Subject: [PATCH 014/222] Fix sharded federation sender sometimes using 100% CPU. We pull all destinations requiring catchup from the DB in batches. However, if all those destinations get filtered out (due to the federation sender being sharded), then the `last_processed` destination doesn't get updated, and we keep requesting the same set repeatedly. --- changelog.d/9770.bugfix | 1 + synapse/federation/sender/__init__.py | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9770.bugfix diff --git a/changelog.d/9770.bugfix b/changelog.d/9770.bugfix new file mode 100644 index 000000000000..baf93138de41 --- /dev/null +++ b/changelog.d/9770.bugfix @@ -0,0 +1 @@ +Fix bug where sharded federation senders could get stuck repeatedly querying the DB in a loop, using lots of CPU. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 98bfce22ffd6..d821dcbf6a69 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -734,16 +734,18 @@ async def _wake_destinations_needing_catchup(self) -> None: self._catchup_after_startup_timer = None break + last_processed = destinations_to_wake[-1] + destinations_to_wake = [ d for d in destinations_to_wake if self._federation_shard_config.should_handle(self._instance_name, d) ] - for last_processed in destinations_to_wake: + for destination in destinations_to_wake: logger.info( "Destination %s has outstanding catch-up, waking up.", last_processed, ) - self.wake_destination(last_processed) + self.wake_destination(destination) await self.clock.sleep(CATCH_UP_STARTUP_INTERVAL_SEC) From 24c58ebfc992eec4b37c5aead8d9eef8e7afdd16 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 8 Apr 2021 18:29:57 +0100 Subject: [PATCH 015/222] remove unused param on `make_tuple_comparison_clause` --- synapse/storage/database.py | 5 +---- synapse/storage/databases/main/client_ips.py | 1 - synapse/storage/databases/main/devices.py | 2 +- synapse/storage/databases/main/events_bg_updates.py | 1 - tests/storage/test_database.py | 3 +-- 5 files changed, 3 insertions(+), 9 deletions(-) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a2f016a7ef4d..b302cd57864c 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -2059,15 +2059,12 @@ def make_in_list_sql_clause( KV = TypeVar("KV") -def make_tuple_comparison_clause( - _database_engine: BaseDatabaseEngine, keys: List[Tuple[str, KV]] -) -> Tuple[str, List[KV]]: +def make_tuple_comparison_clause(keys: List[Tuple[str, KV]]) -> Tuple[str, List[KV]]: """Returns a tuple comparison SQL clause Builds a SQL clause that looks like "(a, b) > (?, ?)" Args: - _database_engine keys: A set of (column, value) pairs to be compared. Returns: diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 6d18e692b0a9..ea3c15fd0ee5 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -298,7 +298,6 @@ def _devices_last_seen_update_txn(txn): # times, which is fine. where_clause, where_args = make_tuple_comparison_clause( - self.database_engine, [("user_id", last_user_id), ("device_id", last_device_id)], ) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index d327e9aa0b8c..9bf8ba888f76 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -985,7 +985,7 @@ async def _remove_duplicate_outbound_pokes(self, progress, batch_size): def _txn(txn): clause, args = make_tuple_comparison_clause( - self.db_pool.engine, [(x, last_row[x]) for x in KEY_COLS] + [(x, last_row[x]) for x in KEY_COLS] ) sql = """ SELECT stream_id, destination, user_id, device_id, MAX(ts) AS ts diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 78367ea58ded..79e7df6ca9c1 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -838,7 +838,6 @@ def _calculate_chain_cover_txn( # We want to do a `(topological_ordering, stream_ordering) > (?,?)` # comparison, but that is not supported on older SQLite versions tuple_clause, tuple_args = make_tuple_comparison_clause( - self.database_engine, [ ("events.room_id", last_room_id), ("topological_ordering", last_depth), diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 1bba58fc03a9..a906d30e73b3 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -36,7 +36,6 @@ def _stub_db_engine(**kwargs) -> BaseDatabaseEngine: class TupleComparisonClauseTestCase(unittest.TestCase): def test_native_tuple_comparison(self): - db_engine = _stub_db_engine() - clause, args = make_tuple_comparison_clause(db_engine, [("a", 1), ("b", 2)]) + clause, args = make_tuple_comparison_clause([("a", 1), ("b", 2)]) self.assertEqual(clause, "(a,b) > (?,?)") self.assertEqual(args, [1, 2]) From 2ca4e349e9d0c606d802ae15c06089080fa4f27e Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Thu, 8 Apr 2021 23:38:54 +0200 Subject: [PATCH 016/222] Bugbear: Add Mutable Parameter fixes (#9682) Part of #9366 Adds in fixes for B006 and B008, both relating to mutable parameter lint errors. Signed-off-by: Jonathan de Jong --- changelog.d/9682.misc | 1 + contrib/cmdclient/console.py | 5 +++- contrib/cmdclient/http.py | 24 +++++++++++++---- setup.cfg | 4 +-- synapse/appservice/scheduler.py | 6 ++--- synapse/config/ratelimiting.py | 6 +++-- synapse/events/__init__.py | 14 +++++++--- synapse/federation/units.py | 5 ++-- synapse/handlers/appservice.py | 4 +-- synapse/handlers/federation.py | 2 +- synapse/handlers/message.py | 11 +++++--- synapse/handlers/register.py | 4 ++- synapse/handlers/sync.py | 8 +++--- synapse/http/client.py | 4 +-- synapse/http/proxyagent.py | 6 +++-- synapse/logging/opentracing.py | 3 ++- synapse/module_api/__init__.py | 12 +++++---- synapse/notifier.py | 19 ++++++++------ synapse/storage/database.py | 20 +++++++++----- synapse/storage/databases/main/events.py | 7 +++-- .../storage/databases/main/group_server.py | 4 ++- synapse/storage/databases/main/state.py | 6 +++-- synapse/storage/databases/state/bg_updates.py | 5 +++- synapse/storage/databases/state/store.py | 5 ++-- synapse/storage/state.py | 26 ++++++++++++------- synapse/storage/util/id_generators.py | 11 ++++++-- synapse/util/caches/lrucache.py | 14 +++++----- .../test_matrix_federation_agent.py | 15 ++++++++--- tests/replication/_base.py | 4 +-- .../replication/slave/storage/test_events.py | 16 +++++++----- tests/rest/client/v1/test_rooms.py | 5 +++- tests/rest/client/v1/utils.py | 14 +++++++--- tests/rest/client/v2_alpha/test_relations.py | 5 ++-- tests/storage/test_id_generators.py | 14 +++++----- tests/storage/test_redaction.py | 10 +++++-- tests/test_state.py | 5 ++-- tests/test_visibility.py | 7 +++-- tests/util/test_ratelimitutils.py | 6 +++-- 38 files changed, 224 insertions(+), 113 deletions(-) create mode 100644 changelog.d/9682.misc diff --git a/changelog.d/9682.misc b/changelog.d/9682.misc new file mode 100644 index 000000000000..428a466facfb --- /dev/null +++ b/changelog.d/9682.misc @@ -0,0 +1 @@ +Introduce flake8-bugbear to the test suite and fix some of its lint violations. diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py index 67e032244ecc..856dd437db91 100755 --- a/contrib/cmdclient/console.py +++ b/contrib/cmdclient/console.py @@ -24,6 +24,7 @@ import time import urllib from http import TwistedHttpClient +from typing import Optional import nacl.encoding import nacl.signing @@ -718,7 +719,7 @@ def _run_and_pprint( method, path, data=None, - query_params={"access_token": None}, + query_params: Optional[dict] = None, alt_text=None, ): """Runs an HTTP request and pretty prints the output. @@ -729,6 +730,8 @@ def _run_and_pprint( data: Raw JSON data if any query_params: dict of query parameters to add to the url """ + query_params = query_params or {"access_token": None} + url = self._url() + path if "access_token" in query_params: query_params["access_token"] = self._tok() diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 851e80c25bb4..1cf913756e6a 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -16,6 +16,7 @@ import json import urllib from pprint import pformat +from typing import Optional from twisted.internet import defer, reactor from twisted.web.client import Agent, readBody @@ -85,8 +86,9 @@ def get_json(self, url, args=None): body = yield readBody(response) defer.returnValue(json.loads(body)) - def _create_put_request(self, url, json_data, headers_dict={}): + def _create_put_request(self, url, json_data, headers_dict: Optional[dict] = None): """Wrapper of _create_request to issue a PUT request""" + headers_dict = headers_dict or {} if "Content-Type" not in headers_dict: raise defer.error(RuntimeError("Must include Content-Type header for PUTs")) @@ -95,14 +97,22 @@ def _create_put_request(self, url, json_data, headers_dict={}): "PUT", url, producer=_JsonProducer(json_data), headers_dict=headers_dict ) - def _create_get_request(self, url, headers_dict={}): + def _create_get_request(self, url, headers_dict: Optional[dict] = None): """Wrapper of _create_request to issue a GET request""" - return self._create_request("GET", url, headers_dict=headers_dict) + return self._create_request("GET", url, headers_dict=headers_dict or {}) @defer.inlineCallbacks def do_request( - self, method, url, data=None, qparams=None, jsonreq=True, headers={} + self, + method, + url, + data=None, + qparams=None, + jsonreq=True, + headers: Optional[dict] = None, ): + headers = headers or {} + if qparams: url = "%s?%s" % (url, urllib.urlencode(qparams, True)) @@ -123,8 +133,12 @@ def do_request( defer.returnValue(json.loads(body)) @defer.inlineCallbacks - def _create_request(self, method, url, producer=None, headers_dict={}): + def _create_request( + self, method, url, producer=None, headers_dict: Optional[dict] = None + ): """Creates and sends a request to the given url""" + headers_dict = headers_dict or {} + headers_dict["User-Agent"] = ["Synapse Cmd Client"] retries_left = 5 diff --git a/setup.cfg b/setup.cfg index 7329eed213d7..5fdb51ac7397 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,8 +18,8 @@ ignore = # E203: whitespace before ':' (which is contrary to pep8?) # E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) -# B00*: Subsection of the bugbear suite (TODO: add in remaining fixes) -ignore=W503,W504,E203,E731,E501,B006,B007,B008 +# B007: Subsection of the bugbear suite (TODO: add in remaining fixes) +ignore=W503,W504,E203,E731,E501,B007 [isort] line_length = 88 diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 366c476f807a..5203ffe90fdd 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -49,7 +49,7 @@ components. """ import logging -from typing import List +from typing import List, Optional from synapse.appservice import ApplicationService, ApplicationServiceState from synapse.events import EventBase @@ -191,11 +191,11 @@ async def send( self, service: ApplicationService, events: List[EventBase], - ephemeral: List[JsonDict] = [], + ephemeral: Optional[List[JsonDict]] = None, ): try: txn = await self.store.create_appservice_txn( - service=service, events=events, ephemeral=ephemeral + service=service, events=events, ephemeral=ephemeral or [] ) service_is_up = await self._is_service_up(service) if service_is_up: diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 3f3997f4e53b..7a8d5851c40b 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict +from typing import Dict, Optional from ._base import Config @@ -21,8 +21,10 @@ class RateLimitConfig: def __init__( self, config: Dict[str, float], - defaults={"per_second": 0.17, "burst_count": 3.0}, + defaults: Optional[Dict[str, float]] = None, ): + defaults = defaults or {"per_second": 0.17, "burst_count": 3.0} + self.per_second = config.get("per_second", defaults["per_second"]) self.burst_count = int(config.get("burst_count", defaults["burst_count"])) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 8f6b955d17b7..f9032e36977f 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -330,9 +330,11 @@ def __init__( self, event_dict: JsonDict, room_version: RoomVersion, - internal_metadata_dict: JsonDict = {}, + internal_metadata_dict: Optional[JsonDict] = None, rejected_reason: Optional[str] = None, ): + internal_metadata_dict = internal_metadata_dict or {} + event_dict = dict(event_dict) # Signatures is a dict of dicts, and this is faster than doing a @@ -386,9 +388,11 @@ def __init__( self, event_dict: JsonDict, room_version: RoomVersion, - internal_metadata_dict: JsonDict = {}, + internal_metadata_dict: Optional[JsonDict] = None, rejected_reason: Optional[str] = None, ): + internal_metadata_dict = internal_metadata_dict or {} + event_dict = dict(event_dict) # Signatures is a dict of dicts, and this is faster than doing a @@ -507,9 +511,11 @@ def _event_type_from_format_version(format_version: int) -> Type[EventBase]: def make_event_from_dict( event_dict: JsonDict, room_version: RoomVersion = RoomVersions.V1, - internal_metadata_dict: JsonDict = {}, + internal_metadata_dict: Optional[JsonDict] = None, rejected_reason: Optional[str] = None, ) -> EventBase: """Construct an EventBase from the given event dict""" event_type = _event_type_from_format_version(room_version.event_format) - return event_type(event_dict, room_version, internal_metadata_dict, rejected_reason) + return event_type( + event_dict, room_version, internal_metadata_dict or {}, rejected_reason + ) diff --git a/synapse/federation/units.py b/synapse/federation/units.py index b662c4262120..0f8bf000ac3d 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -18,6 +18,7 @@ """ import logging +from typing import Optional import attr @@ -98,7 +99,7 @@ class Transaction(JsonEncodedObject): "pdus", ] - def __init__(self, transaction_id=None, pdus=[], **kwargs): + def __init__(self, transaction_id=None, pdus: Optional[list] = None, **kwargs): """If we include a list of pdus then we decode then as PDU's automatically. """ @@ -107,7 +108,7 @@ def __init__(self, transaction_id=None, pdus=[], **kwargs): if "edus" in kwargs and not kwargs["edus"]: del kwargs["edus"] - super().__init__(transaction_id=transaction_id, pdus=pdus, **kwargs) + super().__init__(transaction_id=transaction_id, pdus=pdus or [], **kwargs) @staticmethod def create_new(pdus, **kwargs): diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 996f9e5debc8..9fb7ee335d38 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -182,7 +182,7 @@ def notify_interested_services_ephemeral( self, stream_key: str, new_token: Optional[int], - users: Collection[Union[str, UserID]] = [], + users: Optional[Collection[Union[str, UserID]]] = None, ): """This is called by the notifier in the background when a ephemeral event handled by the homeserver. @@ -215,7 +215,7 @@ def notify_interested_services_ephemeral( # We only start a new background process if necessary rather than # optimistically (to cut down on overhead). self._notify_interested_services_ephemeral( - services, stream_key, new_token, users + services, stream_key, new_token, users or [] ) @wrap_as_background_process("notify_interested_services_ephemeral") diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5ea8a7b6034b..67888898ffb5 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1790,7 +1790,7 @@ async def _make_and_verify_event( room_id: str, user_id: str, membership: str, - content: JsonDict = {}, + content: JsonDict, params: Optional[Dict[str, Union[str, Iterable[str]]]] = None, ) -> Tuple[str, EventBase, RoomVersion]: ( diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6069968f7f34..125dae6d2577 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -137,7 +137,7 @@ async def get_state_events( self, user_id: str, room_id: str, - state_filter: StateFilter = StateFilter.all(), + state_filter: Optional[StateFilter] = None, at_token: Optional[StreamToken] = None, is_guest: bool = False, ) -> List[dict]: @@ -164,6 +164,8 @@ async def get_state_events( AuthError (403) if the user doesn't have permission to view members of this room. """ + state_filter = state_filter or StateFilter.all() + if at_token: # FIXME this claims to get the state at a stream position, but # get_recent_events_for_room operates by topo ordering. This therefore @@ -874,7 +876,7 @@ async def handle_new_client_event( event: EventBase, context: EventContext, ratelimit: bool = True, - extra_users: List[UserID] = [], + extra_users: Optional[List[UserID]] = None, ignore_shadow_ban: bool = False, ) -> EventBase: """Processes a new event. @@ -902,6 +904,7 @@ async def handle_new_client_event( Raises: ShadowBanError if the requester has been shadow-banned. """ + extra_users = extra_users or [] # we don't apply shadow-banning to membership events here. Invites are blocked # higher up the stack, and we allow shadow-banned users to send join and leave @@ -1071,7 +1074,7 @@ async def persist_and_notify_client_event( event: EventBase, context: EventContext, ratelimit: bool = True, - extra_users: List[UserID] = [], + extra_users: Optional[List[UserID]] = None, ) -> EventBase: """Called when we have fully built the event, have already calculated the push actions for the event, and checked auth. @@ -1083,6 +1086,8 @@ async def persist_and_notify_client_event( it was de-duplicated (e.g. because we had already persisted an event with the same transaction ID.) """ + extra_users = extra_users or [] + assert self.storage.persistence is not None assert self._events_shard_config.should_handle( self._instance_name, event.room_id diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 9701b76d0f91..3b6660c873f7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -169,7 +169,7 @@ async def register_user( user_type: Optional[str] = None, default_display_name: Optional[str] = None, address: Optional[str] = None, - bind_emails: Iterable[str] = [], + bind_emails: Optional[Iterable[str]] = None, by_admin: bool = False, user_agent_ips: Optional[List[Tuple[str, str]]] = None, auth_provider_id: Optional[str] = None, @@ -204,6 +204,8 @@ async def register_user( Raises: SynapseError if there was a problem registering. """ + bind_emails = bind_emails or [] + await self.check_registration_ratelimit(address) result = await self.spam_checker.check_registration_for_spam( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ff11266c6723..f8d88ef77be9 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -548,7 +548,7 @@ async def _load_filtered_recents( ) async def get_state_after_event( - self, event: EventBase, state_filter: StateFilter = StateFilter.all() + self, event: EventBase, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: """ Get the room state after the given event @@ -558,7 +558,7 @@ async def get_state_after_event( state_filter: The state filter used to fetch state from the database. """ state_ids = await self.state_store.get_state_ids_for_event( - event.event_id, state_filter=state_filter + event.event_id, state_filter=state_filter or StateFilter.all() ) if event.is_state(): state_ids = dict(state_ids) @@ -569,7 +569,7 @@ async def get_state_at( self, room_id: str, stream_position: StreamToken, - state_filter: StateFilter = StateFilter.all(), + state_filter: Optional[StateFilter] = None, ) -> StateMap[str]: """Get the room state at a particular stream position @@ -589,7 +589,7 @@ async def get_state_at( if last_events: last_event = last_events[-1] state = await self.get_state_after_event( - last_event, state_filter=state_filter + last_event, state_filter=state_filter or StateFilter.all() ) else: diff --git a/synapse/http/client.py b/synapse/http/client.py index e691ba6d8888..f7a07f0466c5 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -297,7 +297,7 @@ class SimpleHttpClient: def __init__( self, hs: "HomeServer", - treq_args: Dict[str, Any] = {}, + treq_args: Optional[Dict[str, Any]] = None, ip_whitelist: Optional[IPSet] = None, ip_blacklist: Optional[IPSet] = None, use_proxy: bool = False, @@ -317,7 +317,7 @@ def __init__( self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist - self._extra_treq_args = treq_args + self._extra_treq_args = treq_args or {} self.user_agent = hs.version_string self.clock = hs.get_clock() diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 16ec850064dd..ea5ad14cb07c 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -27,7 +27,7 @@ from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase from twisted.web.error import SchemeNotSupported from twisted.web.http_headers import Headers -from twisted.web.iweb import IAgent +from twisted.web.iweb import IAgent, IPolicyForHTTPS from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint @@ -88,12 +88,14 @@ def __init__( self, reactor, proxy_reactor=None, - contextFactory=BrowserLikePolicyForHTTPS(), + contextFactory: Optional[IPolicyForHTTPS] = None, connectTimeout=None, bindAddress=None, pool=None, use_proxy=False, ): + contextFactory = contextFactory or BrowserLikePolicyForHTTPS() + _AgentBase.__init__(self, reactor, pool) if proxy_reactor is None: diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index b8081f197e87..bfe9136fd8df 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -486,7 +486,7 @@ def start_active_span_from_request( def start_active_span_from_edu( edu_content, operation_name, - references=[], + references: Optional[list] = None, tags=None, start_time=None, ignore_active_span=False, @@ -501,6 +501,7 @@ def start_active_span_from_edu( For the other args see opentracing.tracer """ + references = references or [] if opentracing is None: return noop_context_manager() diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 3ecd46c0382a..ca1bd4cdc96a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Generator, Iterable, Optional, Tuple +from typing import TYPE_CHECKING, Any, Generator, Iterable, List, Optional, Tuple from twisted.internet import defer @@ -127,7 +127,7 @@ def check_user_exists(self, user_id): return defer.ensureDeferred(self._auth_handler.check_user_exists(user_id)) @defer.inlineCallbacks - def register(self, localpart, displayname=None, emails=[]): + def register(self, localpart, displayname=None, emails: Optional[List[str]] = None): """Registers a new user with given localpart and optional displayname, emails. Also returns an access token for the new user. @@ -147,11 +147,13 @@ def register(self, localpart, displayname=None, emails=[]): logger.warning( "Using deprecated ModuleApi.register which creates a dummy user device." ) - user_id = yield self.register_user(localpart, displayname, emails) + user_id = yield self.register_user(localpart, displayname, emails or []) _, access_token = yield self.register_device(user_id) return user_id, access_token - def register_user(self, localpart, displayname=None, emails=[]): + def register_user( + self, localpart, displayname=None, emails: Optional[List[str]] = None + ): """Registers a new user with given localpart and optional displayname, emails. Args: @@ -170,7 +172,7 @@ def register_user(self, localpart, displayname=None, emails=[]): self._hs.get_registration_handler().register_user( localpart=localpart, default_display_name=displayname, - bind_emails=emails, + bind_emails=emails or [], ) ) diff --git a/synapse/notifier.py b/synapse/notifier.py index c178db57e31f..7ce34380af3c 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -276,7 +276,7 @@ def on_new_room_event( event: EventBase, event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, - extra_users: Collection[UserID] = [], + extra_users: Optional[Collection[UserID]] = None, ): """Unwraps event and calls `on_new_room_event_args`.""" self.on_new_room_event_args( @@ -286,7 +286,7 @@ def on_new_room_event( state_key=event.get("state_key"), membership=event.content.get("membership"), max_room_stream_token=max_room_stream_token, - extra_users=extra_users, + extra_users=extra_users or [], ) def on_new_room_event_args( @@ -297,7 +297,7 @@ def on_new_room_event_args( membership: Optional[str], event_pos: PersistedEventPosition, max_room_stream_token: RoomStreamToken, - extra_users: Collection[UserID] = [], + extra_users: Optional[Collection[UserID]] = None, ): """Used by handlers to inform the notifier something has happened in the room, room event wise. @@ -313,7 +313,7 @@ def on_new_room_event_args( self.pending_new_room_events.append( _PendingRoomEventEntry( event_pos=event_pos, - extra_users=extra_users, + extra_users=extra_users or [], room_id=room_id, type=event_type, state_key=state_key, @@ -382,14 +382,14 @@ def _notify_app_services_ephemeral( self, stream_key: str, new_token: Union[int, RoomStreamToken], - users: Collection[Union[str, UserID]] = [], + users: Optional[Collection[Union[str, UserID]]] = None, ): try: stream_token = None if isinstance(new_token, int): stream_token = new_token self.appservice_handler.notify_interested_services_ephemeral( - stream_key, stream_token, users + stream_key, stream_token, users or [] ) except Exception: logger.exception("Error notifying application services of event") @@ -404,13 +404,16 @@ def on_new_event( self, stream_key: str, new_token: Union[int, RoomStreamToken], - users: Collection[Union[str, UserID]] = [], - rooms: Collection[str] = [], + users: Optional[Collection[Union[str, UserID]]] = None, + rooms: Optional[Collection[str]] = None, ): """Used to inform listeners that something has happened event wise. Will wake up all listeners for the given users and rooms. """ + users = users or [] + rooms = rooms or [] + with Measure(self.clock, "on_new_event"): user_streams = set() diff --git a/synapse/storage/database.py b/synapse/storage/database.py index b302cd57864c..fa15b0ce5bc4 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -900,7 +900,7 @@ async def simple_upsert( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, desc: str = "simple_upsert", lock: bool = True, ) -> Optional[bool]: @@ -927,6 +927,8 @@ async def simple_upsert( Native upserts always return None. Emulated upserts return True if a new entry was created, False if an existing one was updated. """ + insertion_values = insertion_values or {} + attempts = 0 while True: try: @@ -964,7 +966,7 @@ def simple_upsert_txn( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, lock: bool = True, ) -> Optional[bool]: """ @@ -982,6 +984,8 @@ def simple_upsert_txn( Native upserts always return None. Emulated upserts return True if a new entry was created, False if an existing one was updated. """ + insertion_values = insertion_values or {} + if self.engine.can_native_upsert and table not in self._unsafe_to_upsert_tables: self.simple_upsert_txn_native_upsert( txn, table, keyvalues, values, insertion_values=insertion_values @@ -1003,7 +1007,7 @@ def simple_upsert_txn_emulated( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, lock: bool = True, ) -> bool: """ @@ -1017,6 +1021,8 @@ def simple_upsert_txn_emulated( Returns True if a new entry was created, False if an existing one was updated. """ + insertion_values = insertion_values or {} + # We need to lock the table :(, unless we're *really* careful if lock: self.engine.lock_table(txn, table) @@ -1077,7 +1083,7 @@ def simple_upsert_txn_native_upsert( table: str, keyvalues: Dict[str, Any], values: Dict[str, Any], - insertion_values: Dict[str, Any] = {}, + insertion_values: Optional[Dict[str, Any]] = None, ) -> None: """ Use the native UPSERT functionality in recent PostgreSQL versions. @@ -1090,7 +1096,7 @@ def simple_upsert_txn_native_upsert( """ allvalues = {} # type: Dict[str, Any] allvalues.update(keyvalues) - allvalues.update(insertion_values) + allvalues.update(insertion_values or {}) if not values: latter = "NOTHING" @@ -1513,7 +1519,7 @@ async def simple_select_many_batch( column: str, iterable: Iterable[Any], retcols: Iterable[str], - keyvalues: Dict[str, Any] = {}, + keyvalues: Optional[Dict[str, Any]] = None, desc: str = "simple_select_many_batch", batch_size: int = 100, ) -> List[Any]: @@ -1531,6 +1537,8 @@ async def simple_select_many_batch( desc: description of the transaction, for logging and metrics batch_size: the number of rows for each select query """ + keyvalues = keyvalues or {} + results = [] # type: List[Dict[str, Any]] if not iterable: diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 98dac19a9525..ad17123915b4 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -320,8 +320,8 @@ def _persist_events_txn( txn: LoggingTransaction, events_and_contexts: List[Tuple[EventBase, EventContext]], backfilled: bool, - state_delta_for_room: Dict[str, DeltaState] = {}, - new_forward_extremeties: Dict[str, List[str]] = {}, + state_delta_for_room: Optional[Dict[str, DeltaState]] = None, + new_forward_extremeties: Optional[Dict[str, List[str]]] = None, ): """Insert some number of room events into the necessary database tables. @@ -342,6 +342,9 @@ def _persist_events_txn( extremities. """ + state_delta_for_room = state_delta_for_room or {} + new_forward_extremeties = new_forward_extremeties or {} + all_events_and_contexts = events_and_contexts min_stream_order = events_and_contexts[0][0].internal_metadata.stream_ordering diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index 8f462dfc315e..bd7826f4e9b7 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -1171,7 +1171,7 @@ async def register_user_group_membership( user_id: str, membership: str, is_admin: bool = False, - content: JsonDict = {}, + content: Optional[JsonDict] = None, local_attestation: Optional[dict] = None, remote_attestation: Optional[dict] = None, is_publicised: bool = False, @@ -1192,6 +1192,8 @@ async def register_user_group_membership( is_publicised: Whether this should be publicised. """ + content = content or {} + def _register_user_group_membership_txn(txn, next_id): # TODO: Upsert? self.db_pool.simple_delete_txn( diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index a7f371732fd7..93431efe0023 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -190,7 +190,7 @@ def _get_current_state_ids_txn(txn): # FIXME: how should this be cached? async def get_filtered_current_state_ids( - self, room_id: str, state_filter: StateFilter = StateFilter.all() + self, room_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: """Get the current state event of a given type for a room based on the current_state_events table. This may not be as up-to-date as the result @@ -205,7 +205,9 @@ async def get_filtered_current_state_ids( Map from type/state_key to event ID. """ - where_clause, where_args = state_filter.make_sql_filter_clause() + where_clause, where_args = ( + state_filter or StateFilter.all() + ).make_sql_filter_clause() if not where_clause: # We delegate to the cached version diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 1fd333b707e1..75c09b3687fd 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -14,6 +14,7 @@ # limitations under the License. import logging +from typing import Optional from synapse.storage._base import SQLBaseStore from synapse.storage.database import DatabasePool @@ -73,8 +74,10 @@ def _count_state_group_hops_txn(self, txn, state_group): return count def _get_state_groups_from_groups_txn( - self, txn, groups, state_filter=StateFilter.all() + self, txn, groups, state_filter: Optional[StateFilter] = None ): + state_filter = state_filter or StateFilter.all() + results = {group: {} for group in groups} where_clause, where_args = state_filter.make_sql_filter_clause() diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 97ec65f757e3..dfcf89d91c9b 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -15,7 +15,7 @@ import logging from collections import namedtuple -from typing import Dict, Iterable, List, Set, Tuple +from typing import Dict, Iterable, List, Optional, Set, Tuple from synapse.api.constants import EventTypes from synapse.storage._base import SQLBaseStore @@ -210,7 +210,7 @@ def _get_state_for_group_using_cache(self, cache, group, state_filter): return state_filter.filter_state(state_dict_ids), not missing_types async def _get_state_for_groups( - self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() + self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Dict[int, MutableStateMap[str]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -223,6 +223,7 @@ async def _get_state_for_groups( Returns: Dict of state group to state map. """ + state_filter = state_filter or StateFilter.all() member_filter, non_member_filter = state_filter.get_member_split() diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 2e277a21c458..c1c147c62ac2 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -449,7 +449,7 @@ def _get_state_groups_from_groups( return self.stores.state._get_state_groups_from_groups(groups, state_filter) async def get_state_for_events( - self, event_ids: Iterable[str], state_filter: StateFilter = StateFilter.all() + self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[EventBase]]: """Given a list of event_ids and type tuples, return a list of state dicts for each event. @@ -465,7 +465,7 @@ async def get_state_for_events( groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter + groups, state_filter or StateFilter.all() ) state_event_map = await self.stores.main.get_events( @@ -485,7 +485,7 @@ async def get_state_for_events( return {event: event_to_state[event] for event in event_ids} async def get_state_ids_for_events( - self, event_ids: Iterable[str], state_filter: StateFilter = StateFilter.all() + self, event_ids: Iterable[str], state_filter: Optional[StateFilter] = None ) -> Dict[str, StateMap[str]]: """ Get the state dicts corresponding to a list of events, containing the event_ids @@ -502,7 +502,7 @@ async def get_state_ids_for_events( groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter + groups, state_filter or StateFilter.all() ) event_to_state = { @@ -513,7 +513,7 @@ async def get_state_ids_for_events( return {event: event_to_state[event] for event in event_ids} async def get_state_for_event( - self, event_id: str, state_filter: StateFilter = StateFilter.all() + self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[EventBase]: """ Get the state dict corresponding to a particular event @@ -525,11 +525,13 @@ async def get_state_for_event( Returns: A dict from (type, state_key) -> state_event """ - state_map = await self.get_state_for_events([event_id], state_filter) + state_map = await self.get_state_for_events( + [event_id], state_filter or StateFilter.all() + ) return state_map[event_id] async def get_state_ids_for_event( - self, event_id: str, state_filter: StateFilter = StateFilter.all() + self, event_id: str, state_filter: Optional[StateFilter] = None ) -> StateMap[str]: """ Get the state dict corresponding to a particular event @@ -541,11 +543,13 @@ async def get_state_ids_for_event( Returns: A dict from (type, state_key) -> state_event """ - state_map = await self.get_state_ids_for_events([event_id], state_filter) + state_map = await self.get_state_ids_for_events( + [event_id], state_filter or StateFilter.all() + ) return state_map[event_id] def _get_state_for_groups( - self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all() + self, groups: Iterable[int], state_filter: Optional[StateFilter] = None ) -> Awaitable[Dict[int, MutableStateMap[str]]]: """Gets the state at each of a list of state groups, optionally filtering by type/state_key @@ -558,7 +562,9 @@ def _get_state_for_groups( Returns: Dict of state group to state map. """ - return self.stores.state._get_state_for_groups(groups, state_filter) + return self.stores.state._get_state_for_groups( + groups, state_filter or StateFilter.all() + ) async def store_state_group( self, diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index d4643c4fdf30..32d6cc16b9a6 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -17,7 +17,7 @@ import threading from collections import OrderedDict from contextlib import contextmanager -from typing import Dict, List, Optional, Set, Tuple, Union +from typing import Dict, Iterable, List, Optional, Set, Tuple, Union import attr @@ -91,7 +91,14 @@ class StreamIdGenerator: # ... persist event ... """ - def __init__(self, db_conn, table, column, extra_tables=[], step=1): + def __init__( + self, + db_conn, + table, + column, + extra_tables: Iterable[Tuple[str, str]] = (), + step=1, + ): assert step != 0 self._lock = threading.Lock() self._step = step diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 60bb6ff642f2..20c8e2d9f5ec 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -57,12 +57,14 @@ def enumerate_leaves(node, depth): class _Node: __slots__ = ["prev_node", "next_node", "key", "value", "callbacks"] - def __init__(self, prev_node, next_node, key, value, callbacks=set()): + def __init__( + self, prev_node, next_node, key, value, callbacks: Optional[set] = None + ): self.prev_node = prev_node self.next_node = next_node self.key = key self.value = value - self.callbacks = callbacks + self.callbacks = callbacks or set() class LruCache(Generic[KT, VT]): @@ -176,10 +178,10 @@ def cache_len(): self.len = synchronized(cache_len) - def add_node(key, value, callbacks=set()): + def add_node(key, value, callbacks: Optional[set] = None): prev_node = list_root next_node = prev_node.next_node - node = _Node(prev_node, next_node, key, value, callbacks) + node = _Node(prev_node, next_node, key, value, callbacks or set()) prev_node.next_node = node next_node.prev_node = node cache[key] = node @@ -237,7 +239,7 @@ def cache_get( def cache_get( key: KT, default: Optional[T] = None, - callbacks: Iterable[Callable[[], None]] = [], + callbacks: Iterable[Callable[[], None]] = (), update_metrics: bool = True, ): node = cache.get(key, None) @@ -253,7 +255,7 @@ def cache_get( return default @synchronized - def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = []): + def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()): node = cache.get(key, None) if node is not None: # We sometimes store large objects, e.g. dicts, which cause diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 4c56253da549..73e12ea6c3ca 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Optional from mock import Mock @@ -180,7 +181,11 @@ def _make_get_request(self, uri): _check_logcontext(context) def _handle_well_known_connection( - self, client_factory, expected_sni, content, response_headers={} + self, + client_factory, + expected_sni, + content, + response_headers: Optional[dict] = None, ): """Handle an outgoing HTTPs connection: wire it up to a server, check that the request is for a .well-known, and send the response. @@ -202,10 +207,12 @@ def _handle_well_known_connection( self.assertEqual( request.requestHeaders.getRawHeaders(b"user-agent"), [b"test-agent"] ) - self._send_well_known_response(request, content, headers=response_headers) + self._send_well_known_response(request, content, headers=response_headers or {}) return well_known_server - def _send_well_known_response(self, request, content, headers={}): + def _send_well_known_response( + self, request, content, headers: Optional[dict] = None + ): """Check that an incoming request looks like a valid .well-known request, and send back the response. """ @@ -213,7 +220,7 @@ def _send_well_known_response(self, request, content, headers={}): self.assertEqual(request.path, b"/.well-known/matrix/server") self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"testserv"]) # send back a response - for k, v in headers.items(): + for k, v in (headers or {}).items(): request.setHeader(k, v) request.write(content) request.finish() diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 1d4a59286241..aff19d9fb3c5 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -266,7 +266,7 @@ def create_test_resource(self): return resource def make_worker_hs( - self, worker_app: str, extra_config: dict = {}, **kwargs + self, worker_app: str, extra_config: Optional[dict] = None, **kwargs ) -> HomeServer: """Make a new worker HS instance, correctly connecting replcation stream to the master HS. @@ -283,7 +283,7 @@ def make_worker_hs( config = self._get_worker_hs_config() config["worker_app"] = worker_app - config.update(extra_config) + config.update(extra_config or {}) worker_hs = self.setup_test_homeserver( homeserver_to_use=GenericWorkerServer, diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 0ceb0f935cd4..333374b183cd 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Iterable, Optional from canonicaljson import encode_canonical_json @@ -332,15 +333,18 @@ def build_event( room_id=ROOM_ID, type="m.room.message", key=None, - internal={}, + internal: Optional[dict] = None, depth=None, - prev_events=[], - auth_events=[], - prev_state=[], + prev_events: Optional[list] = None, + auth_events: Optional[list] = None, + prev_state: Optional[list] = None, redacts=None, - push_actions=[], + push_actions: Iterable = frozenset(), **content ): + prev_events = prev_events or [] + auth_events = auth_events or [] + prev_state = prev_state or [] if depth is None: depth = self.event_id @@ -369,7 +373,7 @@ def build_event( if redacts is not None: event_dict["redacts"] = redacts - event = make_event_from_dict(event_dict, internal_metadata_dict=internal) + event = make_event_from_dict(event_dict, internal_metadata_dict=internal or {}) self.event_id += 1 state_handler = self.hs.get_state_handler() diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index ed65f645fc2c..715414a3107d 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -19,6 +19,7 @@ """Tests REST events for /rooms paths.""" import json +from typing import Iterable from urllib import parse as urlparse from mock import Mock @@ -207,7 +208,9 @@ def test_topic_perms(self): ) self.assertEquals(403, channel.code, msg=channel.result["body"]) - def _test_get_membership(self, room=None, members=[], expect_code=None): + def _test_get_membership( + self, room=None, members: Iterable = frozenset(), expect_code=None + ): for member in members: path = "/rooms/%s/state/m.room.member/%s" % (room, member) channel = self.make_request("GET", path) diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 946740aa5d51..8a4dddae2b2a 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -132,7 +132,7 @@ def change_membership( src: str, targ: str, membership: str, - extra_data: dict = {}, + extra_data: Optional[dict] = None, tok: Optional[str] = None, expect_code: int = 200, ) -> None: @@ -156,7 +156,7 @@ def change_membership( path = path + "?access_token=%s" % tok data = {"membership": membership} - data.update(extra_data) + data.update(extra_data or {}) channel = make_request( self.hs.get_reactor(), @@ -187,7 +187,13 @@ def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200): ) def send_event( - self, room_id, type, content={}, txn_id=None, tok=None, expect_code=200 + self, + room_id, + type, + content: Optional[dict] = None, + txn_id=None, + tok=None, + expect_code=200, ): if txn_id is None: txn_id = "m%s" % (str(time.time())) @@ -201,7 +207,7 @@ def send_event( self.site, "PUT", path, - json.dumps(content).encode("utf8"), + json.dumps(content or {}).encode("utf8"), ) assert ( diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index e7bb5583fc48..21ee436b919b 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -16,6 +16,7 @@ import itertools import json import urllib +from typing import Optional from synapse.api.constants import EventTypes, RelationTypes from synapse.rest import admin @@ -681,7 +682,7 @@ def _send_relation( relation_type, event_type, key=None, - content={}, + content: Optional[dict] = None, access_token=None, parent_id=None, ): @@ -713,7 +714,7 @@ def _send_relation( "POST", "/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s" % (self.room, original_id, relation_type, event_type, query), - json.dumps(content).encode("utf-8"), + json.dumps(content or {}).encode("utf-8"), access_token=access_token, ) return channel diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index aad6bc907e43..6c389fe9acd8 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Optional + from synapse.storage.database import DatabasePool from synapse.storage.engines import IncorrectDatabaseSetup from synapse.storage.util.id_generators import MultiWriterIdGenerator @@ -43,7 +45,7 @@ def _setup_db(self, txn): ) def _create_id_generator( - self, instance_name="master", writers=["master"] + self, instance_name="master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( @@ -53,7 +55,7 @@ def _create(conn): instance_name=instance_name, tables=[("foobar", "instance_name", "stream_id")], sequence_name="foobar_seq", - writers=writers, + writers=writers or ["master"], ) return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) @@ -476,7 +478,7 @@ def _setup_db(self, txn): ) def _create_id_generator( - self, instance_name="master", writers=["master"] + self, instance_name="master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( @@ -486,7 +488,7 @@ def _create(conn): instance_name=instance_name, tables=[("foobar", "instance_name", "stream_id")], sequence_name="foobar_seq", - writers=writers, + writers=writers or ["master"], positive=False, ) @@ -612,7 +614,7 @@ def _setup_db(self, txn): ) def _create_id_generator( - self, instance_name="master", writers=["master"] + self, instance_name="master", writers: Optional[List[str]] = None ) -> MultiWriterIdGenerator: def _create(conn): return MultiWriterIdGenerator( @@ -625,7 +627,7 @@ def _create(conn): ("foobar2", "instance_name", "stream_id"), ], sequence_name="foobar_seq", - writers=writers, + writers=writers or ["master"], ) return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 2622207639b7..2d2f58903c64 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional from canonicaljson import json @@ -47,10 +48,15 @@ def prepare(self, reactor, clock, hs): self.depth = 1 def inject_room_member( - self, room, user, membership, replaces_state=None, extra_content={} + self, + room, + user, + membership, + replaces_state=None, + extra_content: Optional[dict] = None, ): content = {"membership": membership} - content.update(extra_content) + content.update(extra_content or {}) builder = self.event_builder_factory.for_room_version( RoomVersions.V1, { diff --git a/tests/test_state.py b/tests/test_state.py index 6227a3ba9555..1d2019699df6 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List, Optional from mock import Mock @@ -37,7 +38,7 @@ def create_event( state_key=None, depth=2, event_id=None, - prev_events=[], + prev_events: Optional[List[str]] = None, **kwargs ): global _next_event_id @@ -58,7 +59,7 @@ def create_event( "sender": "@user_id:example.com", "room_id": "!room_id:example.com", "depth": depth, - "prev_events": prev_events, + "prev_events": prev_events or [], } if state_key is not None: diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 510b63011470..1b4dd47a8238 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from typing import Optional from mock import Mock @@ -147,9 +148,11 @@ def inject_visibility(self, user_id, visibility): return event @defer.inlineCallbacks - def inject_room_member(self, user_id, membership="join", extra_content={}): + def inject_room_member( + self, user_id, membership="join", extra_content: Optional[dict] = None + ): content = {"membership": membership} - content.update(extra_content) + content.update(extra_content or {}) builder = self.event_builder_factory.for_room_version( RoomVersions.V1, { diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py index 4d1aee91d537..3fed55090a7c 100644 --- a/tests/util/test_ratelimitutils.py +++ b/tests/util/test_ratelimitutils.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + from synapse.config.homeserver import HomeServerConfig from synapse.util.ratelimitutils import FederationRateLimiter @@ -89,9 +91,9 @@ def _await_resolution(reactor, d): return (reactor.seconds() - start_time) * 1000 -def build_rc_config(settings={}): +def build_rc_config(settings: Optional[dict] = None): config_dict = default_config("test") - config_dict.update(settings) + config_dict.update(settings or {}) config = HomeServerConfig() config.parse_config_dict(config_dict, "", "") return config.rc_federation From 48a1f4db313026c79fbc7d9d950175693368d98b Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 9 Apr 2021 10:44:40 +0200 Subject: [PATCH 017/222] Remove old admin API `GET /_synapse/admin/v1/users/` (#9401) Related: #8334 Deprecated in: #9429 - Synapse 1.28.0 (2021-02-25) `GET /_synapse/admin/v1/users/` has no - unit tests - documentation API in v2 is available (#5925 - 12/2019, v1.7.0). API is misleading. It expects `user_id` and returns a list of all users. Signed-off-by: Dirk Klimpel dirk@klimpel.org --- UPGRADE.rst | 13 +++++++++++++ changelog.d/9401.removal | 1 + synapse/rest/admin/__init__.py | 2 -- synapse/rest/admin/users.py | 23 ----------------------- tests/storage/test_client_ips.py | 4 ++-- 5 files changed, 16 insertions(+), 27 deletions(-) create mode 100644 changelog.d/9401.removal diff --git a/UPGRADE.rst b/UPGRADE.rst index ba488e1041e6..665821d4ef04 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -85,6 +85,19 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.32.0 +==================== + +Removal of old List Accounts Admin API +-------------------------------------- + +The deprecated v1 "list accounts" admin API (``GET /_synapse/admin/v1/users/``) has been removed in this version. + +The `v2 list accounts API `_ +has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``GET /_synapse/admin/v2/users``. + +The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25). + Upgrading to v1.29.0 ==================== diff --git a/changelog.d/9401.removal b/changelog.d/9401.removal new file mode 100644 index 000000000000..9c813e021574 --- /dev/null +++ b/changelog.d/9401.removal @@ -0,0 +1 @@ +Remove old admin API `GET /_synapse/admin/v1/users/`. \ No newline at end of file diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 8457db1e2275..5daa795df1a4 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -62,7 +62,6 @@ UserMembershipRestServlet, UserRegisterServlet, UserRestServletV2, - UsersRestServlet, UsersRestServletV2, UserTokenRestServlet, WhoisRestServlet, @@ -248,7 +247,6 @@ def register_servlets_for_client_rest_resource(hs, http_server): PurgeHistoryStatusRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) PurgeHistoryRestServlet(hs).register(http_server) - UsersRestServlet(hs).register(http_server) ResetPasswordRestServlet(hs).register(http_server) SearchUsersRestServlet(hs).register(http_server) ShutdownRoomRestServlet(hs).register(http_server) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index fa7804583a31..595898c259fb 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -45,29 +45,6 @@ logger = logging.getLogger(__name__) -class UsersRestServlet(RestServlet): - PATTERNS = admin_patterns("/users/(?P[^/]*)$") - - def __init__(self, hs: "HomeServer"): - self.hs = hs - self.store = hs.get_datastore() - self.auth = hs.get_auth() - self.admin_handler = hs.get_admin_handler() - - async def on_GET( - self, request: SynapseRequest, user_id: str - ) -> Tuple[int, List[JsonDict]]: - target_user = UserID.from_string(user_id) - await assert_requester_is_admin(self.auth, request) - - if not self.hs.is_mine(target_user): - raise SynapseError(400, "Can only users a local user") - - ret = await self.store.get_users() - - return 200, ret - - class UsersRestServletV2(RestServlet): PATTERNS = admin_patterns("/users$", "v2") diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 34e65260970e..a8a6ddc46610 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -390,7 +390,7 @@ def test_old_user_ips_pruned(self): class ClientIpAuthTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, + synapse.rest.admin.register_servlets, login.register_servlets, ] @@ -434,7 +434,7 @@ def _runtest(self, headers, expected_ip, make_request_args): self.reactor, self.site, "GET", - "/_synapse/admin/v1/users/" + self.user_id, + "/_synapse/admin/v2/users/" + self.user_id, access_token=access_token, custom_headers=headers1.items(), **make_request_args, From 0277b8f3e6dc3d960a7192dfc8035e2cbf2559dc Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Fri, 9 Apr 2021 10:54:30 +0100 Subject: [PATCH 018/222] Proof of concept for GitHub Actions (#9661) Signed-off-by: Dan Callahan --- .github/workflows/tests.yml | 322 ++++++++++++++++++++++++++++++++++++ changelog.d/9661.misc | 1 + 2 files changed, 323 insertions(+) create mode 100644 .github/workflows/tests.yml create mode 100644 changelog.d/9661.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000000..12c82ac620fb --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,322 @@ +name: Tests + +on: + push: + branches: ["develop", "release-*"] + pull_request: + +jobs: + lint: + runs-on: ubuntu-latest + strategy: + matrix: + toxenv: + - "check-sampleconfig" + - "check_codestyle" + - "check_isort" + - "mypy" + - "packaging" + + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: pip install tox + - run: tox -e ${{ matrix.toxenv }} + + lint-crlf: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Check line endings + run: scripts-dev/check_line_terminators.sh + + lint-newsfile: + if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + - run: pip install tox + - name: Patch Buildkite-specific test script + run: | + sed -i -e 's/\$BUILDKITE_PULL_REQUEST/${{ github.event.number }}/' \ + scripts-dev/check-newsfragment + - run: scripts-dev/check-newsfragment + + lint-sdist: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v2 + with: + python-version: "3.x" + - run: pip install wheel + - run: python setup.py sdist bdist_wheel + - uses: actions/upload-artifact@v2 + with: + name: Python Distributions + path: dist/* + + # Dummy step to gate other tests on without repeating the whole list + linting-done: + if: ${{ always() }} # Run this even if prior jobs were skipped + needs: [lint, lint-crlf, lint-newsfile, lint-sdist] + runs-on: ubuntu-latest + steps: + - run: "true" + + trial: + if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail + needs: linting-done + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["3.6", "3.7", "3.8", "3.9"] + database: ["sqlite"] + include: + # Newest Python without optional deps + - python-version: "3.9" + toxenv: "py-noextras,combine" + + # Oldest Python with PostgreSQL + - python-version: "3.6" + database: "postgres" + postgres-version: "9.6" + + # Newest Python with PostgreSQL + - python-version: "3.9" + database: "postgres" + postgres-version: "13" + + steps: + - uses: actions/checkout@v2 + - run: sudo apt-get -qq install xmlsec1 + - name: Set up PostgreSQL ${{ matrix.postgres-version }} + if: ${{ matrix.postgres-version }} + run: | + docker run -d -p 5432:5432 \ + -e POSTGRES_PASSWORD=postgres \ + -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ + postgres:${{ matrix.postgres-version }} + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - run: pip install tox + - name: Await PostgreSQL + if: ${{ matrix.postgres-version }} + timeout-minutes: 2 + run: until pg_isready -h localhost; do sleep 1; done + - run: tox -e py,combine + env: + TRIAL_FLAGS: "--jobs=2" + SYNAPSE_POSTGRES: ${{ matrix.database == 'postgres' || '' }} + SYNAPSE_POSTGRES_HOST: localhost + SYNAPSE_POSTGRES_USER: postgres + SYNAPSE_POSTGRES_PASSWORD: postgres + - name: Dump logs + # Note: Dumps to workflow logs instead of using actions/upload-artifact + # This keeps logs colocated with failing jobs + # It also ignores find's exit code; this is a best effort affair + run: >- + find _trial_temp -name '*.log' + -exec echo "::group::{}" \; + -exec cat {} \; + -exec echo "::endgroup::" \; + || true + + trial-olddeps: + if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail + needs: linting-done + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Test with old deps + uses: docker://ubuntu:bionic # For old python and sqlite + with: + workdir: /github/workspace + entrypoint: .buildkite/scripts/test_old_deps.sh + env: + TRIAL_FLAGS: "--jobs=2" + - name: Dump logs + # Note: Dumps to workflow logs instead of using actions/upload-artifact + # This keeps logs colocated with failing jobs + # It also ignores find's exit code; this is a best effort affair + run: >- + find _trial_temp -name '*.log' + -exec echo "::group::{}" \; + -exec cat {} \; + -exec echo "::endgroup::" \; + || true + + trial-pypy: + # Very slow; only run if the branch name includes 'pypy' + if: ${{ contains(github.ref, 'pypy') && !failure() }} + needs: linting-done + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ["pypy-3.6"] + + steps: + - uses: actions/checkout@v2 + - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - run: pip install tox + - run: tox -e py,combine + env: + TRIAL_FLAGS: "--jobs=2" + - name: Dump logs + # Note: Dumps to workflow logs instead of using actions/upload-artifact + # This keeps logs colocated with failing jobs + # It also ignores find's exit code; this is a best effort affair + run: >- + find _trial_temp -name '*.log' + -exec echo "::group::{}" \; + -exec cat {} \; + -exec echo "::endgroup::" \; + || true + + sytest: + if: ${{ !failure() }} + needs: linting-done + runs-on: ubuntu-latest + container: + image: matrixdotorg/sytest-synapse:${{ matrix.sytest-tag }} + volumes: + - ${{ github.workspace }}:/src + env: + BUILDKITE_BRANCH: ${{ github.head_ref }} + POSTGRES: ${{ matrix.postgres && 1}} + MULTI_POSTGRES: ${{ (matrix.postgres == 'multi-postgres') && 1}} + WORKERS: ${{ matrix.workers && 1 }} + REDIS: ${{ matrix.redis && 1 }} + BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} + + strategy: + fail-fast: false + matrix: + include: + - sytest-tag: bionic + + - sytest-tag: bionic + postgres: postgres + + - sytest-tag: testing + postgres: postgres + + - sytest-tag: bionic + postgres: multi-postgres + workers: workers + + - sytest-tag: buster + postgres: multi-postgres + workers: workers + + - sytest-tag: buster + postgres: postgres + workers: workers + redis: redis + + steps: + - uses: actions/checkout@v2 + - name: Prepare test blacklist + run: cat sytest-blacklist .buildkite/worker-blacklist > synapse-blacklist-with-workers + - name: Run SyTest + run: /bootstrap.sh synapse + working-directory: /src + - name: Dump results.tap + if: ${{ always() }} + run: cat /logs/results.tap + - name: Upload SyTest logs + uses: actions/upload-artifact@v2 + if: ${{ always() }} + with: + name: Sytest Logs - ${{ job.status }} - (${{ join(matrix.*, ', ') }}) + path: | + /logs/results.tap + /logs/**/*.log* + + portdb: + if: ${{ !failure() }} # Allow previous steps to be skipped, but not fail + needs: linting-done + runs-on: ubuntu-latest + strategy: + matrix: + include: + - python-version: "3.6" + postgres-version: "9.6" + + - python-version: "3.9" + postgres-version: "13" + + services: + postgres: + image: postgres:${{ matrix.postgres-version }} + ports: + - 5432:5432 + env: + POSTGRES_PASSWORD: "postgres" + POSTGRES_INITDB_ARGS: "--lc-collate C --lc-ctype C --encoding UTF8" + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - uses: actions/checkout@v2 + - run: sudo apt-get -qq install xmlsec1 + - uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Patch Buildkite-specific test scripts + run: | + sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py + sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml + sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml + sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc + - run: .buildkite/scripts/test_synapse_port_db.sh + + complement: + if: ${{ !failure() }} + needs: linting-done + runs-on: ubuntu-latest + container: + # https://github.com/matrix-org/complement/blob/master/dockerfiles/ComplementCIBuildkite.Dockerfile + image: matrixdotorg/complement:latest + env: + CI: true + ports: + - 8448:8448 + volumes: + - /var/run/docker.sock:/var/run/docker.sock + + steps: + - name: Run actions/checkout@v2 for synapse + uses: actions/checkout@v2 + with: + path: synapse + + - name: Run actions/checkout@v2 for complement + uses: actions/checkout@v2 + with: + repository: "matrix-org/complement" + path: complement + + # Build initial Synapse image + - run: docker build -t matrixdotorg/synapse:latest -f docker/Dockerfile . + working-directory: synapse + + # Build a ready-to-run Synapse image based on the initial image above. + # This new image includes a config file, keys for signing and TLS, and + # other settings to make it suitable for testing under Complement. + - run: docker build -t complement-synapse -f Synapse.Dockerfile . + working-directory: complement/dockerfiles + + # Run Complement + - run: go test -v -tags synapse_blacklist ./tests + env: + COMPLEMENT_BASE_IMAGE: complement-synapse:latest + working-directory: complement diff --git a/changelog.d/9661.misc b/changelog.d/9661.misc new file mode 100644 index 000000000000..b5beb4626c8c --- /dev/null +++ b/changelog.d/9661.misc @@ -0,0 +1 @@ +Experiment with GitHub Actions for CI. From abc814dcbf559282220c35a45b3959bb23a2ed50 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Apr 2021 08:11:51 -0400 Subject: [PATCH 019/222] Enable complement tests for MSC2946. (#9771) By providing the additional build tag for `msc2946`. --- changelog.d/9771.misc | 1 + scripts-dev/complement.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9771.misc diff --git a/changelog.d/9771.misc b/changelog.d/9771.misc new file mode 100644 index 000000000000..42d651d4cc99 --- /dev/null +++ b/changelog.d/9771.misc @@ -0,0 +1 @@ +Enable Complement tests for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary API. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index b77187472ffe..1612ab522c33 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -46,4 +46,4 @@ if [[ -n "$1" ]]; then fi # Run the tests! -COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests +COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests From f9464501846755e09e882c97e2d8c1490c9bf74b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 9 Apr 2021 18:12:15 +0100 Subject: [PATCH 020/222] Fix duplicate logging of exceptions in transaction processing (#9780) There's no point logging this twice. --- changelog.d/9780.bugfix | 1 + synapse/federation/transport/server.py | 10 +++------- 2 files changed, 4 insertions(+), 7 deletions(-) create mode 100644 changelog.d/9780.bugfix diff --git a/changelog.d/9780.bugfix b/changelog.d/9780.bugfix new file mode 100644 index 000000000000..70985a050f11 --- /dev/null +++ b/changelog.d/9780.bugfix @@ -0,0 +1 @@ +Fix duplicate logging of exceptions thrown during federation transaction processing. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 5ef0556ef7f8..a9c1391d27ef 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -425,13 +425,9 @@ async def on_PUT(self, origin, content, query, transaction_id): logger.exception(e) return 400, {"error": "Invalid transaction"} - try: - code, response = await self.handler.on_incoming_transaction( - origin, transaction_data - ) - except Exception: - logger.exception("on_incoming_transaction failed") - raise + code, response = await self.handler.on_incoming_transaction( + origin, transaction_data + ) return code, response From 0b3112123da5fae4964db784e3bab0c4d83d9d62 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 9 Apr 2021 13:44:38 -0400 Subject: [PATCH 021/222] Use mock from the stdlib. (#9772) --- changelog.d/9772.misc | 1 + setup.cfg | 3 +-- setup.py | 2 +- synmark/suites/logging.py | 3 +-- tests/api/test_auth.py | 2 +- tests/app/test_openid_listener.py | 2 +- tests/appservice/test_appservice.py | 3 +-- tests/appservice/test_scheduler.py | 2 +- tests/crypto/test_keyring.py | 3 +-- tests/events/test_presence_router.py | 6 +++--- tests/federation/test_complexity.py | 2 +- tests/federation/test_federation_catch_up.py | 3 +-- tests/federation/test_federation_sender.py | 3 +-- tests/handlers/test_admin.py | 3 +-- tests/handlers/test_appservice.py | 2 +- tests/handlers/test_auth.py | 2 +- tests/handlers/test_cas.py | 2 +- tests/handlers/test_directory.py | 2 +- tests/handlers/test_e2e_keys.py | 2 +- tests/handlers/test_e2e_room_keys.py | 3 +-- tests/handlers/test_oidc.py | 3 +-- tests/handlers/test_password_providers.py | 3 +-- tests/handlers/test_presence.py | 2 +- tests/handlers/test_profile.py | 2 +- tests/handlers/test_register.py | 2 +- tests/handlers/test_saml.py | 3 +-- tests/handlers/test_typing.py | 3 +-- tests/handlers/test_user_directory.py | 2 +- tests/http/federation/test_matrix_federation_agent.py | 3 +-- tests/http/federation/test_srv_resolver.py | 2 +- tests/http/test_client.py | 3 +-- tests/http/test_fedclient.py | 2 +- tests/http/test_servlet.py | 3 +-- tests/http/test_simple_client.py | 2 +- tests/logging/test_terse_json.py | 3 +-- tests/module_api/test_api.py | 5 +++-- tests/push/test_http.py | 2 +- tests/replication/slave/storage/_base.py | 2 +- tests/replication/tcp/streams/test_receipts.py | 2 +- tests/replication/tcp/streams/test_typing.py | 2 +- tests/replication/test_federation_ack.py | 2 +- tests/replication/test_federation_sender_shard.py | 3 +-- tests/replication/test_pusher_shard.py | 3 +-- tests/replication/test_sharded_event_persister.py | 3 +-- tests/rest/admin/test_admin.py | 3 +-- tests/rest/admin/test_room.py | 3 +-- tests/rest/admin/test_user.py | 3 +-- tests/rest/client/test_retention.py | 2 +- tests/rest/client/test_shadow_banned.py | 2 +- tests/rest/client/test_third_party_rules.py | 3 +-- tests/rest/client/test_transactions.py | 2 +- tests/rest/client/v1/test_events.py | 2 +- tests/rest/client/v1/test_login.py | 3 +-- tests/rest/client/v1/test_presence.py | 2 +- tests/rest/client/v1/test_rooms.py | 3 +-- tests/rest/client/v1/test_typing.py | 2 +- tests/rest/client/v1/utils.py | 3 +-- tests/rest/key/v2/test_remote_key_resource.py | 3 +-- tests/rest/media/v1/test_media_storage.py | 3 +-- tests/rest/media/v1/test_url_preview.py | 3 +-- tests/scripts/test_new_matrix_user.py | 2 +- tests/server_notices/test_resource_limits_server_notices.py | 2 +- tests/storage/test_appservice.py | 3 +-- tests/storage/test_background_update.py | 2 +- tests/storage/test_base.py | 3 +-- tests/storage/test_cleanup_extrems.py | 4 +--- tests/storage/test_client_ips.py | 2 +- tests/storage/test_event_push_actions.py | 2 +- tests/storage/test_monthly_active_users.py | 2 +- tests/test_distributor.py | 2 +- tests/test_federation.py | 2 +- tests/test_phone_home.py | 3 +-- tests/test_state.py | 3 +-- tests/test_terms_auth.py | 3 +-- tests/test_utils/__init__.py | 3 +-- tests/test_visibility.py | 3 +-- tests/unittest.py | 3 +-- tests/util/caches/test_descriptors.py | 3 +-- tests/util/caches/test_ttlcache.py | 2 +- tests/util/test_file_consumer.py | 3 +-- tests/util/test_lrucache.py | 2 +- tests/utils.py | 3 +-- 82 files changed, 86 insertions(+), 126 deletions(-) create mode 100644 changelog.d/9772.misc diff --git a/changelog.d/9772.misc b/changelog.d/9772.misc new file mode 100644 index 000000000000..ec7d94cc2576 --- /dev/null +++ b/changelog.d/9772.misc @@ -0,0 +1 @@ +Use mock from the standard library instead of a separate package. diff --git a/setup.cfg b/setup.cfg index 5fdb51ac7397..33601b71d58c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -23,11 +23,10 @@ ignore=W503,W504,E203,E731,E501,B007 [isort] line_length = 88 -sections=FUTURE,STDLIB,COMPAT,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER +sections=FUTURE,STDLIB,THIRDPARTY,TWISTED,FIRSTPARTY,TESTS,LOCALFOLDER default_section=THIRDPARTY known_first_party = synapse known_tests=tests -known_compat = mock known_twisted=twisted,OpenSSL multi_line_output=3 include_trailing_comma=true diff --git a/setup.py b/setup.py index 4e9e333c6089..2eb70d0bb321 100755 --- a/setup.py +++ b/setup.py @@ -110,7 +110,7 @@ def exec_file(path_segments): # Tests assume that all optional dependencies are installed. # # parameterized_class decorator was introduced in parameterized 0.7.0 -CONDITIONAL_REQUIREMENTS["test"] = ["mock>=2.0", "parameterized>=0.7.0"] +CONDITIONAL_REQUIREMENTS["test"] = ["parameterized>=0.7.0"] setup( name="matrix-synapse", diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index c306891b27f4..b3abc6b254b8 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -16,8 +16,7 @@ import logging import warnings from io import StringIO - -from mock import Mock +from unittest.mock import Mock from pyperf import perf_counter diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 34f72ae795c7..28d77f0ca238 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import pymacaroons diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 467033e201be..33a37fe35eac 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock, patch +from unittest.mock import Mock, patch from parameterized import parameterized diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 0bffeb115081..03a7440eec5f 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 97f8cad0ddd4..3c27d797fb37 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 946482b7e706..a56063315be0 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import time - -from mock import Mock +from unittest.mock import Mock import attr import canonicaljson diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index c6e547f11c4d..c996ecc221a1 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Iterable, List, Optional, Set, Tuple, Union - -from mock import Mock +from unittest.mock import Mock import attr @@ -314,7 +313,8 @@ def test_send_local_online_presence_to_with_module(self): self.hs.get_federation_transport_client().send_transaction.call_args_list ) for call in calls: - federation_transaction = call.args[0] # type: Transaction + call_args = call[0] + federation_transaction = call_args[0] # type: Transaction # Get the sent EDUs in this transaction edus = federation_transaction.get_dict()["edus"] diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 8186b8ca013c..701fa8379fff 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError from synapse.rest import admin diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 95eac6a5a34c..802c5ad299d3 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,6 +1,5 @@ from typing import List, Tuple - -from mock import Mock +from unittest.mock import Mock from synapse.api.constants import EventTypes from synapse.events import EventBase diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index ecc3faa57218..deb12433cf4f 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional - -from mock import Mock +from unittest.mock import Mock from signedjson import key, sign from signedjson.types import BaseKey, SigningKey diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index a01fdd083981..32669ae9ced6 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -14,8 +14,7 @@ # limitations under the License. from collections import Counter - -from mock import Mock +from unittest.mock import Mock import synapse.api.errors import synapse.handlers.admin diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index d5d3fdd99a9e..6e325b24cee1 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index c9f889b5117d..321c5ba045fb 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import pymacaroons diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 7975af243c7c..0444b2679889 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.handlers.cas_handler import CasResponse diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 863d8737b2f8..6ae9d4f8656f 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -14,7 +14,7 @@ # limitations under the License. -from mock import Mock +from unittest.mock import Mock import synapse import synapse.api.errors diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 5e86c5e56bf4..6915ac0205f0 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -14,7 +14,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from signedjson import key as key, sign as sign diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index d7498aa51a80..07893302ecbe 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -16,8 +16,7 @@ # limitations under the License. import copy - -import mock +from unittest import mock from synapse.api.errors import SynapseError diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index c7796fb837bc..8702ee70e0de 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -14,10 +14,9 @@ # limitations under the License. import json import os +from unittest.mock import ANY, Mock, patch from urllib.parse import parse_qs, urlparse -from mock import ANY, Mock, patch - import pymacaroons from synapse.handlers.sso import MappingException diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index a98a65ae67e4..e28e4159eba5 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -16,8 +16,7 @@ """Tests for the password_auth_provider interface""" from typing import Any, Type, Union - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 77330f59a987..9f16cc65fc2e 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -14,7 +14,7 @@ # limitations under the License. -from mock import Mock, call +from unittest.mock import Mock, call from signedjson.key import generate_signing_key diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index 75c6a4e21cb5..d8b1bcac8b49 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import synapse.types from synapse.api.errors import AuthError, SynapseError diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 94b69035945b..69279a5ce9fd 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.api.auth import Auth from synapse.api.constants import UserTypes diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 30efd43b4060..8cfc184fefc9 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -13,8 +13,7 @@ # limitations under the License. from typing import Optional - -from mock import Mock +from unittest.mock import Mock import attr diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 24e71381965a..9fa231a37a95 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -16,8 +16,7 @@ import json from typing import Dict - -from mock import ANY, Mock, call +from unittest.mock import ANY, Mock, call from twisted.internet import defer from twisted.web.resource import Resource diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 98b2f5b38377..c68cb830afcd 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 73e12ea6c3ca..ae9d4504a8c0 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -14,8 +14,7 @@ # limitations under the License. import logging from typing import Optional - -from mock import Mock +from unittest.mock import Mock import treq from netaddr import IPSet diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index fee2985d350e..466ce722d9ce 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer from twisted.internet.defer import Deferred diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 0ce181a51e94..7e2f2a01cc07 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -13,8 +13,7 @@ # limitations under the License. from io import BytesIO - -from mock import Mock +from unittest.mock import Mock from netaddr import IPSet diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 9c52c8fdca14..21c1297171af 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from netaddr import IPSet from parameterized import parameterized diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index 45089158ceb6..f979c96f7cdc 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -14,8 +14,7 @@ # limitations under the License. import json from io import BytesIO - -from mock import Mock +from unittest.mock import Mock from synapse.api.errors import SynapseError from synapse.http.servlet import ( diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py index a1cf0862d4fe..cc4cae320d36 100644 --- a/tests/http/test_simple_client.py +++ b/tests/http/test_simple_client.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from netaddr import IPSet diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index bfe0d11c9364..215fd8b0f9e3 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -15,8 +15,7 @@ import json import logging from io import BytesIO, StringIO - -from mock import Mock, patch +from unittest.mock import Mock, patch from twisted.web.server import Request diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 1d1fceeecf44..349f93560e9d 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.api.constants import EduTypes from synapse.events import EventBase @@ -358,7 +358,8 @@ def test_send_local_online_presence_to_federation(self): self.hs.get_federation_transport_client().send_transaction.call_args_list ) for call in calls: - federation_transaction = call.args[0] # type: Transaction + call_args = call[0] + federation_transaction = call_args[0] # type: Transaction # Get the sent EDUs in this transaction edus = federation_transaction.get_dict()["edus"] diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 60f0820cffbd..4074ade87a14 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet.defer import Deferred diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 56497b8476ee..83e89383f64f 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from tests.replication._base import BaseStreamTestCase diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index 56b062ecc1d6..7d848e41ffed 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -15,7 +15,7 @@ # type: ignore -from mock import Mock +from unittest.mock import Mock from synapse.replication.tcp.streams._base import ReceiptsStream diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index ca49d4dd3af2..4a0b34226443 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.handlers.typing import RoomMember from synapse.replication.tcp.streams import TypingStream diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 0d9e3bb11dba..44ad5eec57ef 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -import mock +from unittest import mock from synapse.app.generic_worker import GenericWorkerServer from synapse.replication.tcp.commands import FederationAckCommand diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 2f2d117858f0..8ca595c3eeab 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import Mock +from unittest.mock import Mock from synapse.api.constants import EventTypes, Membership from synapse.events.builder import EventBuilderFactory diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index ab2988a6ba47..1f12bde1aa47 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index c9b773fbd215..6c2e1674cb21 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging - -from mock import patch +from unittest.mock import patch from synapse.api.room_versions import RoomVersion from synapse.rest import admin diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 057e27372e1e..4abcbe3f553a 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -17,8 +17,7 @@ import os import urllib.parse from binascii import unhexlify - -from mock import Mock +from unittest.mock import Mock from twisted.internet.defer import Deferred diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index b55160b70afa..85f77c0a659e 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -16,8 +16,7 @@ import json import urllib.parse from typing import List, Optional - -from mock import Mock +from unittest.mock import Mock import synapse.rest.admin from synapse.api.constants import EventTypes, Membership diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 0c9ec133c2cc..e47fd3ded83d 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -19,8 +19,7 @@ import urllib.parse from binascii import unhexlify from typing import List, Optional - -from mock import Mock +from unittest.mock import Mock import synapse.rest.admin from synapse.api.constants import UserTypes diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index aee99bb6a0aa..f892a7122894 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.api.constants import EventTypes from synapse.rest import admin diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index d2cce44032fa..288ee128886b 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock, patch +from unittest.mock import Mock, patch import synapse.rest.admin from synapse.api.constants import EventTypes diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index bf3901427707..a7ebe0c3e921 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -14,8 +14,7 @@ # limitations under the License. import threading from typing import Dict - -from mock import Mock +from unittest.mock import Mock from synapse.events import EventBase from synapse.module_api import ModuleApi diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 171632e195e2..3b5747cb12b8 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -1,4 +1,4 @@ -from mock import Mock, call +from unittest.mock import Mock, call from twisted.internet import defer, reactor diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index 2ae896db1ec9..87a18d2cb94e 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -15,7 +15,7 @@ """ Tests REST events for /events paths.""" -from mock import Mock +from unittest.mock import Mock import synapse.rest.admin from synapse.rest.client.v1 import events, login, room diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 988821b16f4d..c7b79ab8a706 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -16,10 +16,9 @@ import time import urllib.parse from typing import Any, Dict, List, Optional, Union +from unittest.mock import Mock from urllib.parse import urlencode -from mock import Mock - import pymacaroons from twisted.web.resource import Resource diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index 94a5154834ec..c136827f7993 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 715414a3107d..4df20c90fda8 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -20,10 +20,9 @@ import json from typing import Iterable +from unittest.mock import Mock from urllib import parse as urlparse -from mock import Mock - import synapse.rest.admin from synapse.api.constants import EventContentFields, EventTypes, Membership from synapse.handlers.pagination import PurgeStatus diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 329dbd06def2..0b8f5651211c 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -16,7 +16,7 @@ """Tests REST events for /rooms paths.""" -from mock import Mock +from unittest.mock import Mock from synapse.rest.client.v1 import room from synapse.types import UserID diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 8a4dddae2b2a..a6a292b20cfa 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -21,8 +21,7 @@ import time import urllib.parse from typing import Any, Dict, Mapping, MutableMapping, Optional - -from mock import patch +from unittest.mock import patch import attr diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 9d0d0ef41466..eb8687ce68c2 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -14,8 +14,7 @@ # limitations under the License. import urllib.parse from io import BytesIO, StringIO - -from mock import Mock +from unittest.mock import Mock import signedjson.key from canonicaljson import encode_canonical_json diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 9f77125fd445..375f0b797704 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -18,10 +18,9 @@ from binascii import unhexlify from io import BytesIO from typing import Optional +from unittest.mock import Mock from urllib import parse -from mock import Mock - import attr from parameterized import parameterized_class from PIL import Image as Image diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 696850243391..9067463e541f 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -15,8 +15,7 @@ import json import os import re - -from mock import patch +from unittest.mock import patch from twisted.internet._resolver import HostResolution from twisted.internet.address import IPv4Address, IPv6Address diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py index 6f56893f5e91..885b95a51f5e 100644 --- a/tests/scripts/test_new_matrix_user.py +++ b/tests/scripts/test_new_matrix_user.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse._scripts.register_new_matrix_user import request_registration diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index d40d65b06a8b..450b4ec710a2 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 1ce29af5fd9d..e755a4db625d 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -15,8 +15,7 @@ import json import os import tempfile - -from mock import Mock +from unittest.mock import Mock import yaml diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 1b4fae0bb555..069db0edc43a 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -1,4 +1,4 @@ -from mock import Mock +from unittest.mock import Mock from synapse.storage.background_updates import BackgroundUpdater diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index eac7e4dcd2fa..54e9e7f6fe98 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -15,8 +15,7 @@ from collections import OrderedDict - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 779113868880..b02fb32ced73 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -14,9 +14,7 @@ # limitations under the License. import os.path -from unittest.mock import patch - -from mock import Mock +from unittest.mock import Mock, patch import synapse.rest.admin from synapse.api.constants import EventTypes diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index a8a6ddc46610..f7f75320ba98 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock import synapse.rest.admin from synapse.http.site import XForwardedForRequest diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 239f7c9faf97..0289942f88d2 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from tests.unittest import HomeserverTestCase diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 5858c7fcc4d2..47556791f4f1 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/test_distributor.py b/tests/test_distributor.py index b57f36e6ac26..6a6cf709f615 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock, patch +from unittest.mock import Mock, patch from synapse.util.distributor import Distributor diff --git a/tests/test_federation.py b/tests/test_federation.py index fc9aab32d063..8928597d17db 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from twisted.internet.defer import succeed diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index e7aed092c275..0f800a075bd4 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -14,8 +14,7 @@ # limitations under the License. import resource - -import mock +from unittest import mock from synapse.app.phone_stats_home import phone_stats_home diff --git a/tests/test_state.py b/tests/test_state.py index 1d2019699df6..83383d88724c 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -13,8 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index a743cdc3a937..0df480db9f17 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -13,8 +13,7 @@ # limitations under the License. import json - -from mock import Mock +from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactorClock diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 43898d8142d9..b557ffd69251 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -21,8 +21,7 @@ import warnings from asyncio import Future from typing import Any, Awaitable, Callable, TypeVar - -from mock import Mock +from unittest.mock import Mock import attr diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 1b4dd47a8238..e502ac197e84 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -14,8 +14,7 @@ # limitations under the License. import logging from typing import Optional - -from mock import Mock +from unittest.mock import Mock from twisted.internet import defer from twisted.internet.defer import succeed diff --git a/tests/unittest.py b/tests/unittest.py index 57b6a395c791..92764434bdf8 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -21,8 +21,7 @@ import logging import time from typing import Callable, Dict, Iterable, Optional, Tuple, Type, TypeVar, Union - -from mock import Mock, patch +from unittest.mock import Mock, patch from canonicaljson import json diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index e434e21aeee6..2d1f9360e0b0 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -15,8 +15,7 @@ # limitations under the License. import logging from typing import Set - -import mock +from unittest import mock from twisted.internet import defer, reactor diff --git a/tests/util/caches/test_ttlcache.py b/tests/util/caches/test_ttlcache.py index 816795c13659..23018081e5a6 100644 --- a/tests/util/caches/test_ttlcache.py +++ b/tests/util/caches/test_ttlcache.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.util.caches.ttlcache import TTLCache diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py index 2012263184f6..d1372f6bc251 100644 --- a/tests/util/test_file_consumer.py +++ b/tests/util/test_file_consumer.py @@ -16,8 +16,7 @@ import threading from io import StringIO - -from mock import NonCallableMock +from unittest.mock import NonCallableMock from twisted.internet import defer, reactor diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index a739a6aaaf8c..ce4f1cc30a57 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -14,7 +14,7 @@ # limitations under the License. -from mock import Mock +from unittest.mock import Mock from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache diff --git a/tests/utils.py b/tests/utils.py index a141ee64966f..2e34fad11c85 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -21,10 +21,9 @@ import uuid import warnings from typing import Type +from unittest.mock import Mock, patch from urllib import parse as urlparse -from mock import Mock, patch - from twisted.internet import defer from synapse.api.constants import EventTypes From e300ef64b16280ce2fdb7a8a9baef68b08aa9c88 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Mon, 12 Apr 2021 15:13:55 +0100 Subject: [PATCH 022/222] Require AppserviceRegistrationType (#9548) This change ensures that the appservice registration behaviour follows the spec. We decided to do this for Dendrite, so it made sense to also make a PR for synapse to correct the behaviour. --- changelog.d/9548.removal | 1 + synapse/api/constants.py | 5 ++++ synapse/rest/client/v2_alpha/register.py | 23 ++++++++++----- tests/rest/client/v2_alpha/test_register.py | 31 ++++++++++++++++++--- tests/test_mau.py | 23 ++++++++------- 5 files changed, 60 insertions(+), 23 deletions(-) create mode 100644 changelog.d/9548.removal diff --git a/changelog.d/9548.removal b/changelog.d/9548.removal new file mode 100644 index 000000000000..1fb88236c6b5 --- /dev/null +++ b/changelog.d/9548.removal @@ -0,0 +1 @@ +Make `/_matrix/client/r0/register` expect a type of `m.login.application_service` when an Application Service registers a user, to align with [the relevant spec](https://spec.matrix.org/unstable/application-service-api/#server-admin-style-permissions). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 6856dab06c1b..a8ae41de480d 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -73,6 +73,11 @@ class LoginType: DUMMY = "m.login.dummy" +# This is used in the `type` parameter for /register when called by +# an appservice to register a new user. +APP_SERVICE_REGISTRATION_TYPE = "m.login.application_service" + + class EventTypes: Member = "m.room.member" Create = "m.room.create" diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index c212da0cb29c..4a064849c1b8 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import hmac import logging import random @@ -22,7 +21,7 @@ import synapse import synapse.api.auth import synapse.types -from synapse.api.constants import LoginType +from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import ( Codes, InteractiveAuthIncompleteError, @@ -430,15 +429,20 @@ async def on_POST(self, request): raise SynapseError(400, "Invalid username") desired_username = body["username"] - appservice = None - if self.auth.has_access_token(request): - appservice = self.auth.get_appservice_by_req(request) - # fork off as soon as possible for ASes which have completely # different registration flows to normal users # == Application Service Registration == - if appservice: + if body.get("type") == APP_SERVICE_REGISTRATION_TYPE: + if not self.auth.has_access_token(request): + raise SynapseError( + 400, + "Appservice token must be provided when using a type of m.login.application_service", + ) + + # Verify the AS + self.auth.get_appservice_by_req(request) + # Set the desired user according to the AS API (which uses the # 'user' key not 'username'). Since this is a new addition, we'll # fallback to 'username' if they gave one. @@ -459,6 +463,11 @@ async def on_POST(self, request): ) return 200, result + elif self.auth.has_access_token(request): + raise SynapseError( + 400, + "An access token should not be provided on requests to /register (except if type is m.login.application_service)", + ) # == Normal User Registration == (everyone else) if not self._registration_enabled: diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 27db4f551e2e..cd60ea70811e 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -14,7 +14,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import datetime import json import os @@ -22,7 +21,7 @@ import pkg_resources import synapse.rest.admin -from synapse.api.constants import LoginType +from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes from synapse.appservice import ApplicationService from synapse.rest.client.v1 import login, logout @@ -59,7 +58,9 @@ def test_POST_appservice_registration_valid(self): ) self.hs.get_datastore().services_cache.append(appservice) - request_data = json.dumps({"username": "as_user_kermit"}) + request_data = json.dumps( + {"username": "as_user_kermit", "type": APP_SERVICE_REGISTRATION_TYPE} + ) channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data @@ -69,9 +70,31 @@ def test_POST_appservice_registration_valid(self): det_data = {"user_id": user_id, "home_server": self.hs.hostname} self.assertDictContainsSubset(det_data, channel.json_body) + def test_POST_appservice_registration_no_type(self): + as_token = "i_am_an_app_service" + + appservice = ApplicationService( + as_token, + self.hs.config.server_name, + id="1234", + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", + ) + + self.hs.get_datastore().services_cache.append(appservice) + request_data = json.dumps({"username": "as_user_kermit"}) + + channel = self.make_request( + b"POST", self.url + b"?access_token=i_am_an_app_service", request_data + ) + + self.assertEquals(channel.result["code"], b"400", channel.result) + def test_POST_appservice_registration_invalid(self): self.appservice = None # no application service exists - request_data = json.dumps({"username": "kermit"}) + request_data = json.dumps( + {"username": "kermit", "type": APP_SERVICE_REGISTRATION_TYPE} + ) channel = self.make_request( b"POST", self.url + b"?access_token=i_am_an_app_service", request_data ) diff --git a/tests/test_mau.py b/tests/test_mau.py index 75d28a42dfe5..7d92a16a8d5a 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -15,9 +15,7 @@ """Tests REST events for /rooms paths.""" -import json - -from synapse.api.constants import LoginType +from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.appservice import ApplicationService from synapse.rest.client.v2_alpha import register, sync @@ -113,7 +111,7 @@ def test_as_ignores_mau(self): ) ) - self.create_user("as_kermit4", token=as_token) + self.create_user("as_kermit4", token=as_token, appservice=True) def test_allowed_after_a_month_mau(self): # Create and sync so that the MAU counts get updated @@ -232,14 +230,15 @@ def test_tracked_but_not_limited(self): self.reactor.advance(100) self.assertEqual(2, self.successResultOf(count)) - def create_user(self, localpart, token=None): - request_data = json.dumps( - { - "username": localpart, - "password": "monkey", - "auth": {"type": LoginType.DUMMY}, - } - ) + def create_user(self, localpart, token=None, appservice=False): + request_data = { + "username": localpart, + "password": "monkey", + "auth": {"type": LoginType.DUMMY}, + } + + if appservice: + request_data["type"] = APP_SERVICE_REGISTRATION_TYPE channel = self.make_request( "POST", From 3efde8b69a660be22c65e0a548b4a7d7f815cb5b Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Mon, 12 Apr 2021 15:27:05 +0100 Subject: [PATCH 023/222] Add option to skip unit tests when building debs (#9793) Signed-off-by: Dan Callahan --- changelog.d/9793.misc | 1 + debian/build_virtualenv | 23 ++++++++++++++++------- debian/changelog | 6 ++++++ scripts-dev/build_debian_packages | 17 +++++++++++------ 4 files changed, 34 insertions(+), 13 deletions(-) create mode 100644 changelog.d/9793.misc diff --git a/changelog.d/9793.misc b/changelog.d/9793.misc new file mode 100644 index 000000000000..6334689d26c1 --- /dev/null +++ b/changelog.d/9793.misc @@ -0,0 +1 @@ +Add option to skip unit tests when building Debian packages. diff --git a/debian/build_virtualenv b/debian/build_virtualenv index cad7d1688398..21caad90cc58 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -50,15 +50,24 @@ PACKAGE_BUILD_DIR="debian/matrix-synapse-py3" VIRTUALENV_DIR="${PACKAGE_BUILD_DIR}${DH_VIRTUALENV_INSTALL_ROOT}/matrix-synapse" TARGET_PYTHON="${VIRTUALENV_DIR}/bin/python" -# we copy the tests to a temporary directory so that we can put them on the -# PYTHONPATH without putting the uninstalled synapse on the pythonpath. -tmpdir=`mktemp -d` -trap "rm -r $tmpdir" EXIT +case "$DEB_BUILD_OPTIONS" in + *nocheck*) + # Skip running tests if "nocheck" present in $DEB_BUILD_OPTIONS + ;; + + *) + # Copy tests to a temporary directory so that we can put them on the + # PYTHONPATH without putting the uninstalled synapse on the pythonpath. + tmpdir=`mktemp -d` + trap "rm -r $tmpdir" EXIT + + cp -r tests "$tmpdir" -cp -r tests "$tmpdir" + PYTHONPATH="$tmpdir" \ + "${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests -PYTHONPATH="$tmpdir" \ - "${TARGET_PYTHON}" -m twisted.trial --reporter=text -j2 tests + ;; +esac # build the config file "${TARGET_PYTHON}" "${VIRTUALENV_DIR}/bin/generate_config" \ diff --git a/debian/changelog b/debian/changelog index 09602ff54bdd..5d526316fcf2 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.31.0+nmu1) UNRELEASED; urgency=medium + + * Skip tests when DEB_BUILD_OPTIONS contains "nocheck". + + -- Dan Callahan Mon, 12 Apr 2021 13:07:36 +0000 + matrix-synapse-py3 (1.31.0) stable; urgency=medium * New synapse release 1.31.0. diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index bddc441df276..3bb6e2c7ea80 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -41,7 +41,7 @@ class Builder(object): self._lock = threading.Lock() self._failed = False - def run_build(self, dist): + def run_build(self, dist, skip_tests=False): """Build deb for a single distribution""" if self._failed: @@ -49,13 +49,13 @@ class Builder(object): raise Exception("failed") try: - self._inner_build(dist) + self._inner_build(dist, skip_tests) except Exception as e: print("build of %s failed: %s" % (dist, e), file=sys.stderr) self._failed = True raise - def _inner_build(self, dist): + def _inner_build(self, dist, skip_tests=False): projdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) os.chdir(projdir) @@ -99,6 +99,7 @@ class Builder(object): "--volume=" + debsdir + ":/debs", "-e", "TARGET_USERID=%i" % (os.getuid(), ), "-e", "TARGET_GROUPID=%i" % (os.getgid(), ), + "-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""), "dh-venv-builder:" + tag, ], stdout=stdout, stderr=subprocess.STDOUT) @@ -122,7 +123,7 @@ class Builder(object): self.active_containers.remove(c) -def run_builds(dists, jobs=1): +def run_builds(dists, jobs=1, skip_tests=False): builder = Builder(redirect_stdout=(jobs > 1)) def sig(signum, _frame): @@ -131,7 +132,7 @@ def run_builds(dists, jobs=1): signal.signal(signal.SIGINT, sig) with ThreadPoolExecutor(max_workers=jobs) as e: - res = e.map(builder.run_build, dists) + res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists) # make sure we consume the iterable so that exceptions are raised. for r in res: @@ -146,9 +147,13 @@ if __name__ == '__main__': '-j', '--jobs', type=int, default=1, help='specify the number of builds to run in parallel', ) + parser.add_argument( + '--no-check', action='store_true', + help='skip running tests after building', + ) parser.add_argument( 'dist', nargs='*', default=DISTS, help='a list of distributions to build for. Default: %(default)s', ) args = parser.parse_args() - run_builds(dists=args.dist, jobs=args.jobs) + run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check) From a7044e5c0f1ef3c1d3844d4b44458b30ca6b80d4 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Mon, 12 Apr 2021 16:00:28 +0100 Subject: [PATCH 024/222] Drop Python 3.5 from Trove classifier metadata. (#9782) * Drop Python 3.5 from Trove classifier metadata. Signed-off-by: Dan Callahan --- changelog.d/9782.misc | 1 + setup.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/9782.misc diff --git a/changelog.d/9782.misc b/changelog.d/9782.misc new file mode 100644 index 000000000000..ecf49cfee1a3 --- /dev/null +++ b/changelog.d/9782.misc @@ -0,0 +1 @@ +Synapse now requires Python 3.6 or later. It also requires Postgres 9.6 or later or SQLite 3.22 or later. diff --git a/setup.py b/setup.py index 2eb70d0bb321..4530df348ab9 100755 --- a/setup.py +++ b/setup.py @@ -129,7 +129,6 @@ def exec_file(path_segments): "Topic :: Communications :: Chat", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", From 1fc97ee876c6f383a6148897d82dbc58703ea9d1 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 13 Apr 2021 11:26:37 +0200 Subject: [PATCH 025/222] Add an admin API to manage ratelimit for a specific user (#9648) --- changelog.d/9648.feature | 1 + docs/admin_api/user_admin_api.rst | 117 +++++++++- synapse/rest/admin/__init__.py | 2 + synapse/rest/admin/users.py | 111 ++++++++++ synapse/storage/databases/main/room.py | 64 +++++- tests/rest/admin/test_user.py | 284 +++++++++++++++++++++++++ 6 files changed, 573 insertions(+), 6 deletions(-) create mode 100644 changelog.d/9648.feature diff --git a/changelog.d/9648.feature b/changelog.d/9648.feature new file mode 100644 index 000000000000..bc77026039cb --- /dev/null +++ b/changelog.d/9648.feature @@ -0,0 +1 @@ +Add an admin API to manage ratelimit for a specific user. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index a8a5a2628c6c..dbce9c90b6c7 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -202,7 +202,7 @@ The following fields are returned in the JSON response body: - ``users`` - An array of objects, each containing information about an user. User objects contain the following fields: - - ``name`` - string - Fully-qualified user ID (ex. `@user:server.com`). + - ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``). - ``is_guest`` - bool - Status if that user is a guest account. - ``admin`` - bool - Status if that user is a server administrator. - ``user_type`` - string - Type of the user. Normal users are type ``None``. @@ -864,3 +864,118 @@ The following parameters should be set in the URL: - ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must be local. + +Override ratelimiting for users +=============================== + +This API allows to override or disable ratelimiting for a specific user. +There are specific APIs to set, get and delete a ratelimit. + +Get status of ratelimit +----------------------- + +The API is:: + + GET /_synapse/admin/v1/users//override_ratelimit + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "messages_per_second": 0, + "burst_count": 0 + } + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must + be local. + +**Response** + +The following fields are returned in the JSON response body: + +- ``messages_per_second`` - integer - The number of actions that can + be performed in a second. `0` mean that ratelimiting is disabled for this user. +- ``burst_count`` - integer - How many actions that can be performed before + being limited. + +If **no** custom ratelimit is set, an empty JSON dict is returned. + +.. code:: json + + {} + +Set ratelimit +------------- + +The API is:: + + POST /_synapse/admin/v1/users//override_ratelimit + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +A response body like the following is returned: + +.. code:: json + + { + "messages_per_second": 0, + "burst_count": 0 + } + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must + be local. + +Body parameters: + +- ``messages_per_second`` - positive integer, optional. The number of actions that can + be performed in a second. Defaults to ``0``. +- ``burst_count`` - positive integer, optional. How many actions that can be performed + before being limited. Defaults to ``0``. + +To disable users' ratelimit set both values to ``0``. + +**Response** + +The following fields are returned in the JSON response body: + +- ``messages_per_second`` - integer - The number of actions that can + be performed in a second. +- ``burst_count`` - integer - How many actions that can be performed before + being limited. + +Delete ratelimit +---------------- + +The API is:: + + DELETE /_synapse/admin/v1/users//override_ratelimit + +To use it, you will need to authenticate by providing an ``access_token`` for a +server admin: see `README.rst `_. + +An empty JSON dict is returned. + +.. code:: json + + {} + +**Parameters** + +The following parameters should be set in the URL: + +- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must + be local. + diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 5daa795df1a4..2dec818a5f5d 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -54,6 +54,7 @@ AccountValidityRenewServlet, DeactivateAccountRestServlet, PushersRestServlet, + RateLimitRestServlet, ResetPasswordRestServlet, SearchUsersRestServlet, ShadowBanRestServlet, @@ -239,6 +240,7 @@ def register_servlets(hs, http_server): ShadowBanRestServlet(hs).register(http_server) ForwardExtremitiesRestServlet(hs).register(http_server) RoomEventContextServlet(hs).register(http_server) + RateLimitRestServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 595898c259fb..04990c71fb57 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -981,3 +981,114 @@ async def on_POST( await self.store.set_shadow_banned(UserID.from_string(user_id), True) return 200, {} + + +class RateLimitRestServlet(RestServlet): + """An admin API to override ratelimiting for an user. + + Example: + POST /_synapse/admin/v1/users/@test:example.com/override_ratelimit + { + "messages_per_second": 0, + "burst_count": 0 + } + 200 OK + { + "messages_per_second": 0, + "burst_count": 0 + } + """ + + PATTERNS = admin_patterns("/users/(?P[^/]*)/override_ratelimit") + + def __init__(self, hs: "HomeServer"): + self.hs = hs + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + async def on_GET( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Can only lookup local users") + + if not await self.store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + ratelimit = await self.store.get_ratelimit_for_user(user_id) + + if ratelimit: + # convert `null` to `0` for consistency + # both values do the same in retelimit handler + ret = { + "messages_per_second": 0 + if ratelimit.messages_per_second is None + else ratelimit.messages_per_second, + "burst_count": 0 + if ratelimit.burst_count is None + else ratelimit.burst_count, + } + else: + ret = {} + + return 200, ret + + async def on_POST( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Only local users can be ratelimited") + + if not await self.store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + body = parse_json_object_from_request(request, allow_empty_body=True) + + messages_per_second = body.get("messages_per_second", 0) + burst_count = body.get("burst_count", 0) + + if not isinstance(messages_per_second, int) or messages_per_second < 0: + raise SynapseError( + 400, + "%r parameter must be a positive int" % (messages_per_second,), + errcode=Codes.INVALID_PARAM, + ) + + if not isinstance(burst_count, int) or burst_count < 0: + raise SynapseError( + 400, + "%r parameter must be a positive int" % (burst_count,), + errcode=Codes.INVALID_PARAM, + ) + + await self.store.set_ratelimit_for_user( + user_id, messages_per_second, burst_count + ) + ratelimit = await self.store.get_ratelimit_for_user(user_id) + assert ratelimit is not None + + ret = { + "messages_per_second": ratelimit.messages_per_second, + "burst_count": ratelimit.burst_count, + } + + return 200, ret + + async def on_DELETE( + self, request: SynapseRequest, user_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self.auth, request) + + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Only local users can be ratelimited") + + if not await self.store.get_user_by_id(user_id): + raise NotFoundError("User not found") + + await self.store.delete_ratelimit_for_user(user_id) + + return 200, {} diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 9cbcd53026d8..47fb12f3f64f 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -521,13 +521,11 @@ def _get_rooms_paginate_txn(txn): ) @cached(max_entries=10000) - async def get_ratelimit_for_user(self, user_id): - """Check if there are any overrides for ratelimiting for the given - user + async def get_ratelimit_for_user(self, user_id: str) -> Optional[RatelimitOverride]: + """Check if there are any overrides for ratelimiting for the given user Args: - user_id (str) - + user_id: user ID of the user Returns: RatelimitOverride if there is an override, else None. If the contents of RatelimitOverride are None or 0 then ratelimitng has been @@ -549,6 +547,62 @@ async def get_ratelimit_for_user(self, user_id): else: return None + async def set_ratelimit_for_user( + self, user_id: str, messages_per_second: int, burst_count: int + ) -> None: + """Sets whether a user is set an overridden ratelimit. + Args: + user_id: user ID of the user + messages_per_second: The number of actions that can be performed in a second. + burst_count: How many actions that can be performed before being limited. + """ + + def set_ratelimit_txn(txn): + self.db_pool.simple_upsert_txn( + txn, + table="ratelimit_override", + keyvalues={"user_id": user_id}, + values={ + "messages_per_second": messages_per_second, + "burst_count": burst_count, + }, + ) + + self._invalidate_cache_and_stream( + txn, self.get_ratelimit_for_user, (user_id,) + ) + + await self.db_pool.runInteraction("set_ratelimit", set_ratelimit_txn) + + async def delete_ratelimit_for_user(self, user_id: str) -> None: + """Delete an overridden ratelimit for a user. + Args: + user_id: user ID of the user + """ + + def delete_ratelimit_txn(txn): + row = self.db_pool.simple_select_one_txn( + txn, + table="ratelimit_override", + keyvalues={"user_id": user_id}, + retcols=["user_id"], + allow_none=True, + ) + + if not row: + return + + # They are there, delete them. + self.db_pool.simple_delete_one_txn( + txn, "ratelimit_override", keyvalues={"user_id": user_id} + ) + + self._invalidate_cache_and_stream( + txn, self.get_ratelimit_for_user, (user_id,) + ) + + await self.db_pool.runInteraction("delete_ratelimit", delete_ratelimit_txn) + @cached() async def get_retention_policy_for_room(self, room_id): """Get the retention policy for a given room. diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index e47fd3ded83d..5070c9698469 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -3011,3 +3011,287 @@ def test_success(self): # Ensure the user is shadow-banned (and the cache was cleared). result = self.get_success(self.store.get_user_by_access_token(other_user_token)) self.assertTrue(result.shadow_banned) + + +class RateLimitTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.url = ( + "/_synapse/admin/v1/users/%s/override_ratelimit" + % urllib.parse.quote(self.other_user) + ) + + def test_no_auth(self): + """ + Try to get information of a user without authentication. + """ + channel = self.make_request("GET", self.url, b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + channel = self.make_request("POST", self.url, b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + channel = self.make_request("DELETE", self.url, b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self): + """ + If the user is not a server admin, an error is returned. + """ + other_user_token = self.login("user", "pass") + + channel = self.make_request( + "GET", + self.url, + access_token=other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + channel = self.make_request( + "POST", + self.url, + access_token=other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + channel = self.make_request( + "DELETE", + self.url, + access_token=other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_user_does_not_exist(self): + """ + Tests that a lookup for a user that does not exist returns a 404 + """ + url = "/_synapse/admin/v1/users/@unknown_person:test/override_ratelimit" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + channel = self.make_request( + "POST", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + channel = self.make_request( + "DELETE", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_user_is_not_local(self): + """ + Tests that a lookup for a user that is not a local returns a 400 + """ + url = ( + "/_synapse/admin/v1/users/@unknown_person:unknown_domain/override_ratelimit" + ) + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual("Can only lookup local users", channel.json_body["error"]) + + channel = self.make_request( + "POST", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual( + "Only local users can be ratelimited", channel.json_body["error"] + ) + + channel = self.make_request( + "DELETE", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual( + "Only local users can be ratelimited", channel.json_body["error"] + ) + + def test_invalid_parameter(self): + """ + If parameters are invalid, an error is returned. + """ + # messages_per_second is a string + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": "string"}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # messages_per_second is negative + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": -1}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # burst_count is a string + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"burst_count": "string"}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + # burst_count is negative + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"burst_count": -1}, + ) + + self.assertEqual(400, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + + def test_return_zero_when_null(self): + """ + If values in database are `null` API should return an int `0` + """ + + self.get_success( + self.store.db_pool.simple_upsert( + table="ratelimit_override", + keyvalues={"user_id": self.other_user}, + values={ + "messages_per_second": None, + "burst_count": None, + }, + ) + ) + + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(0, channel.json_body["messages_per_second"]) + self.assertEqual(0, channel.json_body["burst_count"]) + + def test_success(self): + """ + Rate-limiting (set/update/delete) should succeed for an admin. + """ + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertNotIn("messages_per_second", channel.json_body) + self.assertNotIn("burst_count", channel.json_body) + + # set ratelimit + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": 10, "burst_count": 11}, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(10, channel.json_body["messages_per_second"]) + self.assertEqual(11, channel.json_body["burst_count"]) + + # update ratelimit + channel = self.make_request( + "POST", + self.url, + access_token=self.admin_user_tok, + content={"messages_per_second": 20, "burst_count": 21}, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(20, channel.json_body["messages_per_second"]) + self.assertEqual(21, channel.json_body["burst_count"]) + + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(20, channel.json_body["messages_per_second"]) + self.assertEqual(21, channel.json_body["burst_count"]) + + # delete ratelimit + channel = self.make_request( + "DELETE", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertNotIn("messages_per_second", channel.json_body) + self.assertNotIn("burst_count", channel.json_body) + + # request status + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertNotIn("messages_per_second", channel.json_body) + self.assertNotIn("burst_count", channel.json_body) From 1d5f0e3529ec5acd889037c8ebcca2820ad003d5 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Tue, 13 Apr 2021 10:41:34 +0100 Subject: [PATCH 026/222] Bump black configuration to target py36 (#9781) Signed-off-by: Dan Callahan --- changelog.d/9781.misc | 1 + pyproject.toml | 2 +- synapse/config/tls.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/http/matrixfederationclient.py | 2 +- synapse/http/site.py | 2 +- synapse/storage/database.py | 8 ++++---- tests/replication/slave/storage/test_events.py | 2 +- tests/test_state.py | 2 +- tests/test_utils/event_injection.py | 6 +++--- tests/utils.py | 2 +- 11 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/9781.misc diff --git a/changelog.d/9781.misc b/changelog.d/9781.misc new file mode 100644 index 000000000000..d1c73fc7419a --- /dev/null +++ b/changelog.d/9781.misc @@ -0,0 +1 @@ +Update Black configuration to target Python 3.6. diff --git a/pyproject.toml b/pyproject.toml index cd880d4e39bf..8bca1fa4efd9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -35,7 +35,7 @@ showcontent = true [tool.black] -target-version = ['py35'] +target-version = ['py36'] exclude = ''' ( diff --git a/synapse/config/tls.py b/synapse/config/tls.py index ad37b93c0252..85b5db4c40a8 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -270,7 +270,7 @@ def generate_config_section( tls_certificate_path, tls_private_key_path, acme_domain, - **kwargs + **kwargs, ): """If the acme_domain is specified acme will be enabled. If the TLS paths are not specified the default will be certs in the diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index c817f2952d0b..0047907cd9ca 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1071,7 +1071,7 @@ async def get_new_events( room_ids=None, include_offline=True, explicit_room_id=None, - **kwargs + **kwargs, ) -> Tuple[List[UserPresenceState], int]: # The process for getting presence events are: # 1. Get the rooms the user is in. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 5f01ebd3d472..ab47dec8f2f6 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -272,7 +272,7 @@ async def _send_request_with_optional_trailing_slash( self, request: MatrixFederationRequest, try_trailing_slash_on_400: bool = False, - **send_request_args + **send_request_args, ) -> IResponse: """Wrapper for _send_request which can optionally retry the request upon receiving a combination of a 400 HTTP response code and a diff --git a/synapse/http/site.py b/synapse/http/site.py index c0c873ce3205..32b5e19c0926 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -497,7 +497,7 @@ def __init__( resource, server_version_string, *args, - **kwargs + **kwargs, ): Site.__init__(self, resource, *args, **kwargs) diff --git a/synapse/storage/database.py b/synapse/storage/database.py index fa15b0ce5bc4..77ef29ec71eb 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -488,7 +488,7 @@ def new_transaction( exception_callbacks: List[_CallbackListEntry], func: "Callable[..., R]", *args: Any, - **kwargs: Any + **kwargs: Any, ) -> R: """Start a new database transaction with the given connection. @@ -622,7 +622,7 @@ async def runInteraction( func: "Callable[..., R]", *args: Any, db_autocommit: bool = False, - **kwargs: Any + **kwargs: Any, ) -> R: """Starts a transaction on the database and runs a given function @@ -682,7 +682,7 @@ async def runWithConnection( func: "Callable[..., R]", *args: Any, db_autocommit: bool = False, - **kwargs: Any + **kwargs: Any, ) -> R: """Wraps the .runWithConnection() method on the underlying db_pool. @@ -775,7 +775,7 @@ async def execute( desc: str, decoder: Optional[Callable[[Cursor], R]], query: str, - *args: Any + *args: Any, ) -> R: """Runs a single query for a result set. diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 333374b183cd..db80a0bdbdaf 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -340,7 +340,7 @@ def build_event( prev_state: Optional[list] = None, redacts=None, push_actions: Iterable = frozenset(), - **content + **content, ): prev_events = prev_events or [] auth_events = auth_events or [] diff --git a/tests/test_state.py b/tests/test_state.py index 83383d88724c..0d626f49f6f1 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -38,7 +38,7 @@ def create_event( depth=2, event_id=None, prev_events: Optional[List[str]] = None, - **kwargs + **kwargs, ): global _next_event_id diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index c3c4a93e1f9b..3dfbf8f8a9e4 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -33,7 +33,7 @@ async def inject_member_event( membership: str, target: Optional[str] = None, extra_content: Optional[dict] = None, - **kwargs + **kwargs, ) -> EventBase: """Inject a membership event into a room.""" if target is None: @@ -58,7 +58,7 @@ async def inject_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs + **kwargs, ) -> EventBase: """Inject a generic event into a room @@ -83,7 +83,7 @@ async def create_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs + **kwargs, ) -> Tuple[EventBase, EventContext]: if room_version is None: room_version = await hs.get_datastore().get_room_version_id(kwargs["room_id"]) diff --git a/tests/utils.py b/tests/utils.py index 2e34fad11c85..c78d3e5ba706 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -190,7 +190,7 @@ def setup_test_homeserver( config=None, reactor=None, homeserver_to_use: Type[HomeServer] = TestHomeServer, - **kwargs + **kwargs, ): """ Setup a homeserver suitable for running tests against. Keyword arguments From c1dbe84c3dcd643f4acedba346046b25a117e3c3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 13 Apr 2021 11:51:10 +0100 Subject: [PATCH 027/222] Add release helper script (#9713) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/9713.misc | 1 + scripts-dev/release.py | 244 +++++++++++++++++++++++++++++++++++++++++ setup.py | 7 ++ 3 files changed, 252 insertions(+) create mode 100644 changelog.d/9713.misc create mode 100755 scripts-dev/release.py diff --git a/changelog.d/9713.misc b/changelog.d/9713.misc new file mode 100644 index 000000000000..908e7a24595b --- /dev/null +++ b/changelog.d/9713.misc @@ -0,0 +1 @@ +Add release helper script for automating part of the Synapse release process. diff --git a/scripts-dev/release.py b/scripts-dev/release.py new file mode 100755 index 000000000000..1042fa48bc8c --- /dev/null +++ b/scripts-dev/release.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""An interactive script for doing a release. See `run()` below. +""" + +import subprocess +import sys +from typing import Optional + +import click +import git +from packaging import version +from redbaron import RedBaron + + +@click.command() +def run(): + """An interactive script to walk through the initial stages of creating a + release, including creating release branch, updating changelog and pushing to + GitHub. + + Requires the dev dependencies be installed, which can be done via: + + pip install -e .[dev] + + """ + + # Make sure we're in a git repo. + try: + repo = git.Repo() + except git.InvalidGitRepositoryError: + raise click.ClickException("Not in Synapse repo.") + + if repo.is_dirty(): + raise click.ClickException("Uncommitted changes exist.") + + click.secho("Updating git repo...") + repo.remote().fetch() + + # Parse the AST and load the `__version__` node so that we can edit it + # later. + with open("synapse/__init__.py") as f: + red = RedBaron(f.read()) + + version_node = None + for node in red: + if node.type != "assignment": + continue + + if node.target.type != "name": + continue + + if node.target.value != "__version__": + continue + + version_node = node + break + + if not version_node: + print("Failed to find '__version__' definition in synapse/__init__.py") + sys.exit(1) + + # Parse the current version. + current_version = version.parse(version_node.value.value.strip('"')) + assert isinstance(current_version, version.Version) + + # Figure out what sort of release we're doing and calcuate the new version. + rc = click.confirm("RC", default=True) + if current_version.pre: + # If the current version is an RC we don't need to bump any of the + # version numbers (other than the RC number). + base_version = "{}.{}.{}".format( + current_version.major, + current_version.minor, + current_version.micro, + ) + + if rc: + new_version = "{}.{}.{}rc{}".format( + current_version.major, + current_version.minor, + current_version.micro, + current_version.pre[1] + 1, + ) + else: + new_version = base_version + else: + # If this is a new release cycle then we need to know if its a major + # version bump or a hotfix. + release_type = click.prompt( + "Release type", + type=click.Choice(("major", "hotfix")), + show_choices=True, + default="major", + ) + + if release_type == "major": + base_version = new_version = "{}.{}.{}".format( + current_version.major, + current_version.minor + 1, + 0, + ) + if rc: + new_version = "{}.{}.{}rc1".format( + current_version.major, + current_version.minor + 1, + 0, + ) + + else: + base_version = new_version = "{}.{}.{}".format( + current_version.major, + current_version.minor, + current_version.micro + 1, + ) + if rc: + new_version = "{}.{}.{}rc1".format( + current_version.major, + current_version.minor, + current_version.micro + 1, + ) + + # Confirm the calculated version is OK. + if not click.confirm(f"Create new version: {new_version}?", default=True): + click.get_current_context().abort() + + # Switch to the release branch. + release_branch_name = f"release-v{base_version}" + release_branch = find_ref(repo, release_branch_name) + if release_branch: + if release_branch.is_remote(): + # If the release branch only exists on the remote we check it out + # locally. + repo.git.checkout(release_branch_name) + release_branch = repo.active_branch + else: + # If a branch doesn't exist we create one. We ask which one branch it + # should be based off, defaulting to sensible values depending on the + # release type. + if current_version.is_prerelease: + default = release_branch_name + elif release_type == "major": + default = "develop" + else: + default = "master" + + branch_name = click.prompt( + "Which branch should the release be based on?", default=default + ) + + base_branch = find_ref(repo, branch_name) + if not base_branch: + print(f"Could not find base branch {branch_name}!") + click.get_current_context().abort() + + # Check out the base branch and ensure it's up to date + repo.head.reference = base_branch + repo.head.reset(index=True, working_tree=True) + if not base_branch.is_remote(): + update_branch(repo) + + # Create the new release branch + release_branch = repo.create_head(release_branch_name, commit=base_branch) + + # Switch to the release branch and ensure its up to date. + repo.git.checkout(release_branch_name) + update_branch(repo) + + # Update the `__version__` variable and write it back to the file. + version_node.value = '"' + new_version + '"' + with open("synapse/__init__.py", "w") as f: + f.write(red.dumps()) + + # Generate changelogs + subprocess.run("python3 -m towncrier", shell=True) + + # Generate debian changelogs if its not an RC. + if not rc: + subprocess.run( + f'dch -M -v {new_version} "New synapse release {new_version}."', shell=True + ) + subprocess.run('dch -M -r -D stable ""', shell=True) + + # Show the user the changes and ask if they want to edit the change log. + repo.git.add("-u") + subprocess.run("git diff --cached", shell=True) + + if click.confirm("Edit changelog?", default=False): + click.edit(filename="CHANGES.md") + + # Commit the changes. + repo.git.add("-u") + repo.git.commit(f"-m {new_version}") + + # We give the option to bail here in case the user wants to make sure things + # are OK before pushing. + if not click.confirm("Push branch to github?", default=True): + print("") + print("Run when ready to push:") + print("") + print(f"\tgit push -u {repo.remote().name} {repo.active_branch.name}") + print("") + sys.exit(0) + + # Otherwise, push and open the changelog in the browser. + repo.git.push("-u", repo.remote().name, repo.active_branch.name) + + click.launch( + f"https://github.com/matrix-org/synapse/blob/{repo.active_branch.name}/CHANGES.md" + ) + + +def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: + """Find the branch/ref, looking first locally then in the remote.""" + if ref_name in repo.refs: + return repo.refs[ref_name] + elif ref_name in repo.remote().refs: + return repo.remote().refs[ref_name] + else: + return None + + +def update_branch(repo: git.Repo): + """Ensure branch is up to date if it has a remote""" + if repo.active_branch.tracking_branch(): + repo.git.merge(repo.active_branch.tracking_branch().name) + + +if __name__ == "__main__": + run() diff --git a/setup.py b/setup.py index 4530df348ab9..e2e488761dbf 100755 --- a/setup.py +++ b/setup.py @@ -103,6 +103,13 @@ def exec_file(path_segments): "flake8", ] +CONDITIONAL_REQUIREMENTS["dev"] = CONDITIONAL_REQUIREMENTS["lint"] + [ + # The following are used by the release script + "click==7.1.2", + "redbaron==0.9.2", + "GitPython==3.1.14", +] + CONDITIONAL_REQUIREMENTS["mypy"] = ["mypy==0.812", "mypy-zope==0.2.13"] # Dependencies which are exclusively required by unit test code. This is From 3efd98aa1cb0ff8106cebb0d76fd5761c25d5250 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 13 Apr 2021 14:23:43 +0100 Subject: [PATCH 028/222] 1.32.0rc1 --- CHANGES.md | 72 ++++++++++++++++++++++++++++++++++++++++ changelog.d/8926.bugfix | 1 - changelog.d/9401.removal | 1 - changelog.d/9491.feature | 1 - changelog.d/9548.removal | 1 - changelog.d/9648.feature | 1 - changelog.d/9654.feature | 1 - changelog.d/9661.misc | 1 - changelog.d/9682.misc | 1 - changelog.d/9685.misc | 1 - changelog.d/9686.misc | 1 - changelog.d/9691.feature | 1 - changelog.d/9700.feature | 1 - changelog.d/9710.feature | 1 - changelog.d/9711.bugfix | 1 - changelog.d/9713.misc | 1 - changelog.d/9717.feature | 1 - changelog.d/9718.removal | 1 - changelog.d/9719.doc | 1 - changelog.d/9725.bugfix | 1 - changelog.d/9730.misc | 1 - changelog.d/9735.feature | 1 - changelog.d/9736.misc | 1 - changelog.d/9742.misc | 1 - changelog.d/9743.misc | 1 - changelog.d/9753.misc | 1 - changelog.d/9765.docker | 1 - changelog.d/9766.feature | 1 - changelog.d/9769.misc | 1 - changelog.d/9770.bugfix | 1 - changelog.d/9771.misc | 1 - changelog.d/9772.misc | 1 - changelog.d/9780.bugfix | 1 - changelog.d/9781.misc | 1 - changelog.d/9782.misc | 1 - changelog.d/9793.misc | 1 - synapse/__init__.py | 2 +- 37 files changed, 73 insertions(+), 36 deletions(-) delete mode 100644 changelog.d/8926.bugfix delete mode 100644 changelog.d/9401.removal delete mode 100644 changelog.d/9491.feature delete mode 100644 changelog.d/9548.removal delete mode 100644 changelog.d/9648.feature delete mode 100644 changelog.d/9654.feature delete mode 100644 changelog.d/9661.misc delete mode 100644 changelog.d/9682.misc delete mode 100644 changelog.d/9685.misc delete mode 100644 changelog.d/9686.misc delete mode 100644 changelog.d/9691.feature delete mode 100644 changelog.d/9700.feature delete mode 100644 changelog.d/9710.feature delete mode 100644 changelog.d/9711.bugfix delete mode 100644 changelog.d/9713.misc delete mode 100644 changelog.d/9717.feature delete mode 100644 changelog.d/9718.removal delete mode 100644 changelog.d/9719.doc delete mode 100644 changelog.d/9725.bugfix delete mode 100644 changelog.d/9730.misc delete mode 100644 changelog.d/9735.feature delete mode 100644 changelog.d/9736.misc delete mode 100644 changelog.d/9742.misc delete mode 100644 changelog.d/9743.misc delete mode 100644 changelog.d/9753.misc delete mode 100644 changelog.d/9765.docker delete mode 100644 changelog.d/9766.feature delete mode 100644 changelog.d/9769.misc delete mode 100644 changelog.d/9770.bugfix delete mode 100644 changelog.d/9771.misc delete mode 100644 changelog.d/9772.misc delete mode 100644 changelog.d/9780.bugfix delete mode 100644 changelog.d/9781.misc delete mode 100644 changelog.d/9782.misc delete mode 100644 changelog.d/9793.misc diff --git a/CHANGES.md b/CHANGES.md index 27483532d038..6e7cc5374a95 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,75 @@ +Synapse 1.32.0rc1 (2021-04-13) +============================== + +**Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+. + +This release removes the deprecated `GET /_synapse/admin/v1/users/` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities. + +This release requires Application Services to use type `m.login.application_services` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. + +Features +-------- + +- Add a Synapse module for routing presence updates between users. ([\#9491](https://github.com/matrix-org/synapse/issues/9491)) +- Add an admin API to manage ratelimit for a specific user. ([\#9648](https://github.com/matrix-org/synapse/issues/9648)) +- Include request information in structured logging output. ([\#9654](https://github.com/matrix-org/synapse/issues/9654)) +- Add `order_by` to the admin API `GET /_synapse/admin/v2/users`. Contributed by @dklimpel. ([\#9691](https://github.com/matrix-org/synapse/issues/9691)) +- Replace the `room_invite_state_types` configuration setting with `room_prejoin_state`. ([\#9700](https://github.com/matrix-org/synapse/issues/9700)) +- Add experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9717](https://github.com/matrix-org/synapse/issues/9717), [\#9735](https://github.com/matrix-org/synapse/issues/9735)) +- Update experimental support for Spaces: include `m.room.create` in the room state sent with room-invites. ([\#9710](https://github.com/matrix-org/synapse/issues/9710)) +- Synapse now requires Python 3.6 or later. It also requires Postgres 9.6 or later or SQLite 3.22 or later. ([\#9766](https://github.com/matrix-org/synapse/issues/9766)) + + +Bugfixes +-------- + +- Prevent `synapse_forward_extremities` and `synapse_excess_extremity_events` Prometheus metrics from initially reporting zero-values after startup. ([\#8926](https://github.com/matrix-org/synapse/issues/8926)) +- Fix recently added ratelimits to correctly honour the application service `rate_limited` flag. ([\#9711](https://github.com/matrix-org/synapse/issues/9711)) +- Fix longstanding bug which caused `duplicate key value violates unique constraint "remote_media_cache_thumbnails_media_origin_media_id_thumbna_key"` errors. ([\#9725](https://github.com/matrix-org/synapse/issues/9725)) +- Fix bug where sharded federation senders could get stuck repeatedly querying the DB in a loop, using lots of CPU. ([\#9770](https://github.com/matrix-org/synapse/issues/9770)) +- Fix duplicate logging of exceptions thrown during federation transaction processing. ([\#9780](https://github.com/matrix-org/synapse/issues/9780)) + + +Updates to the Docker image +--------------------------- + +- Move opencontainers labels to the final Docker image such that users can inspect them. ([\#9765](https://github.com/matrix-org/synapse/issues/9765)) + + +Improved Documentation +---------------------- + +- Make the `allowed_local_3pids` regex example in the sample config stricter. ([\#9719](https://github.com/matrix-org/synapse/issues/9719)) + + +Deprecations and Removals +------------------------- + +- Remove old admin API `GET /_synapse/admin/v1/users/`. ([\#9401](https://github.com/matrix-org/synapse/issues/9401)) +- Make `/_matrix/client/r0/register` expect a type of `m.login.application_service` when an Application Service registers a user, to align with [the relevant spec](https://spec.matrix.org/unstable/application-service-api/#server-admin-style-permissions). ([\#9548](https://github.com/matrix-org/synapse/issues/9548)) +- Replace deprecated `imp` module with successor `importlib`. Contributed by Cristina Muñoz. ([\#9718](https://github.com/matrix-org/synapse/issues/9718)) + + +Internal Changes +---------------- + +- Experiment with GitHub Actions for CI. ([\#9661](https://github.com/matrix-org/synapse/issues/9661)) +- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9682](https://github.com/matrix-org/synapse/issues/9682)) +- Update `scripts-dev/complement.sh` to use a local checkout of Complement, allow running a subset of tests and have it use Synapse's Complement test blacklist. ([\#9685](https://github.com/matrix-org/synapse/issues/9685)) +- Improve Jaeger tracing for `to_device` messages. ([\#9686](https://github.com/matrix-org/synapse/issues/9686)) +- Add release helper script for automating part of the Synapse release process. ([\#9713](https://github.com/matrix-org/synapse/issues/9713)) +- Add type hints to expiring cache. ([\#9730](https://github.com/matrix-org/synapse/issues/9730)) +- Convert various testcases to `HomeserverTestCase`. ([\#9736](https://github.com/matrix-org/synapse/issues/9736)) +- Start linting mypy with `no_implicit_optional`. ([\#9742](https://github.com/matrix-org/synapse/issues/9742)) +- Add missing type hints to federation handler and server. ([\#9743](https://github.com/matrix-org/synapse/issues/9743)) +- Check that a `ConfigError` is raised, rather than simply `Exception`, when appropriate in homeserver config file generation tests. ([\#9753](https://github.com/matrix-org/synapse/issues/9753)) +- Fix incompatibility with `tox` 2.5. ([\#9769](https://github.com/matrix-org/synapse/issues/9769)) +- Enable Complement tests for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary API. ([\#9771](https://github.com/matrix-org/synapse/issues/9771)) +- Use mock from the standard library instead of a separate package. ([\#9772](https://github.com/matrix-org/synapse/issues/9772)) +- Update Black configuration to target Python 3.6. ([\#9781](https://github.com/matrix-org/synapse/issues/9781)) +- Add option to skip unit tests when building Debian packages. ([\#9793](https://github.com/matrix-org/synapse/issues/9793)) + + Synapse 1.31.0 (2021-04-06) =========================== diff --git a/changelog.d/8926.bugfix b/changelog.d/8926.bugfix deleted file mode 100644 index aad7bd83ce36..000000000000 --- a/changelog.d/8926.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent `synapse_forward_extremities` and `synapse_excess_extremity_events` Prometheus metrics from initially reporting zero-values after startup. diff --git a/changelog.d/9401.removal b/changelog.d/9401.removal deleted file mode 100644 index 9c813e021574..000000000000 --- a/changelog.d/9401.removal +++ /dev/null @@ -1 +0,0 @@ -Remove old admin API `GET /_synapse/admin/v1/users/`. \ No newline at end of file diff --git a/changelog.d/9491.feature b/changelog.d/9491.feature deleted file mode 100644 index 8b56a95a44e2..000000000000 --- a/changelog.d/9491.feature +++ /dev/null @@ -1 +0,0 @@ -Add a Synapse module for routing presence updates between users. diff --git a/changelog.d/9548.removal b/changelog.d/9548.removal deleted file mode 100644 index 1fb88236c6b5..000000000000 --- a/changelog.d/9548.removal +++ /dev/null @@ -1 +0,0 @@ -Make `/_matrix/client/r0/register` expect a type of `m.login.application_service` when an Application Service registers a user, to align with [the relevant spec](https://spec.matrix.org/unstable/application-service-api/#server-admin-style-permissions). diff --git a/changelog.d/9648.feature b/changelog.d/9648.feature deleted file mode 100644 index bc77026039cb..000000000000 --- a/changelog.d/9648.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API to manage ratelimit for a specific user. \ No newline at end of file diff --git a/changelog.d/9654.feature b/changelog.d/9654.feature deleted file mode 100644 index a54c96cf19d1..000000000000 --- a/changelog.d/9654.feature +++ /dev/null @@ -1 +0,0 @@ -Include request information in structured logging output. diff --git a/changelog.d/9661.misc b/changelog.d/9661.misc deleted file mode 100644 index b5beb4626c8c..000000000000 --- a/changelog.d/9661.misc +++ /dev/null @@ -1 +0,0 @@ -Experiment with GitHub Actions for CI. diff --git a/changelog.d/9682.misc b/changelog.d/9682.misc deleted file mode 100644 index 428a466facfb..000000000000 --- a/changelog.d/9682.misc +++ /dev/null @@ -1 +0,0 @@ -Introduce flake8-bugbear to the test suite and fix some of its lint violations. diff --git a/changelog.d/9685.misc b/changelog.d/9685.misc deleted file mode 100644 index 0506d8af0c75..000000000000 --- a/changelog.d/9685.misc +++ /dev/null @@ -1 +0,0 @@ -Update `scripts-dev/complement.sh` to use a local checkout of Complement, allow running a subset of tests and have it use Synapse's Complement test blacklist. \ No newline at end of file diff --git a/changelog.d/9686.misc b/changelog.d/9686.misc deleted file mode 100644 index bb2335acf906..000000000000 --- a/changelog.d/9686.misc +++ /dev/null @@ -1 +0,0 @@ -Improve Jaeger tracing for `to_device` messages. diff --git a/changelog.d/9691.feature b/changelog.d/9691.feature deleted file mode 100644 index 3c711db4f56e..000000000000 --- a/changelog.d/9691.feature +++ /dev/null @@ -1 +0,0 @@ -Add `order_by` to the admin API `GET /_synapse/admin/v2/users`. Contributed by @dklimpel. \ No newline at end of file diff --git a/changelog.d/9700.feature b/changelog.d/9700.feature deleted file mode 100644 index 037de8367f93..000000000000 --- a/changelog.d/9700.feature +++ /dev/null @@ -1 +0,0 @@ -Replace the `room_invite_state_types` configuration setting with `room_prejoin_state`. diff --git a/changelog.d/9710.feature b/changelog.d/9710.feature deleted file mode 100644 index fce308cc41df..000000000000 --- a/changelog.d/9710.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental Spaces support: include `m.room.create` in the room state sent with room-invites. diff --git a/changelog.d/9711.bugfix b/changelog.d/9711.bugfix deleted file mode 100644 index 4ca3438d46b4..000000000000 --- a/changelog.d/9711.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix recently added ratelimits to correctly honour the application service `rate_limited` flag. diff --git a/changelog.d/9713.misc b/changelog.d/9713.misc deleted file mode 100644 index 908e7a24595b..000000000000 --- a/changelog.d/9713.misc +++ /dev/null @@ -1 +0,0 @@ -Add release helper script for automating part of the Synapse release process. diff --git a/changelog.d/9717.feature b/changelog.d/9717.feature deleted file mode 100644 index c2c74f13d578..000000000000 --- a/changelog.d/9717.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/changelog.d/9718.removal b/changelog.d/9718.removal deleted file mode 100644 index 6de7814217d4..000000000000 --- a/changelog.d/9718.removal +++ /dev/null @@ -1 +0,0 @@ -Replace deprecated `imp` module with successor `importlib`. Contributed by Cristina Muñoz. diff --git a/changelog.d/9719.doc b/changelog.d/9719.doc deleted file mode 100644 index f018606dd6e1..000000000000 --- a/changelog.d/9719.doc +++ /dev/null @@ -1 +0,0 @@ -Make the allowed_local_3pids regex example in the sample config stricter. diff --git a/changelog.d/9725.bugfix b/changelog.d/9725.bugfix deleted file mode 100644 index 71283685c80d..000000000000 --- a/changelog.d/9725.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix longstanding bug which caused `duplicate key value violates unique constraint "remote_media_cache_thumbnails_media_origin_media_id_thumbna_key"` errors. diff --git a/changelog.d/9730.misc b/changelog.d/9730.misc deleted file mode 100644 index 8063059b0b37..000000000000 --- a/changelog.d/9730.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to expiring cache. diff --git a/changelog.d/9735.feature b/changelog.d/9735.feature deleted file mode 100644 index c2c74f13d578..000000000000 --- a/changelog.d/9735.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/changelog.d/9736.misc b/changelog.d/9736.misc deleted file mode 100644 index 1e445e4344e0..000000000000 --- a/changelog.d/9736.misc +++ /dev/null @@ -1 +0,0 @@ -Convert various testcases to `HomeserverTestCase`. diff --git a/changelog.d/9742.misc b/changelog.d/9742.misc deleted file mode 100644 index 681ab04df8ab..000000000000 --- a/changelog.d/9742.misc +++ /dev/null @@ -1 +0,0 @@ -Start linting mypy with `no_implicit_optional`. \ No newline at end of file diff --git a/changelog.d/9743.misc b/changelog.d/9743.misc deleted file mode 100644 index c2f75c1df911..000000000000 --- a/changelog.d/9743.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to federation handler and server. diff --git a/changelog.d/9753.misc b/changelog.d/9753.misc deleted file mode 100644 index 31184fe0bd1a..000000000000 --- a/changelog.d/9753.misc +++ /dev/null @@ -1 +0,0 @@ -Check that a `ConfigError` is raised, rather than simply `Exception`, when appropriate in homeserver config file generation tests. \ No newline at end of file diff --git a/changelog.d/9765.docker b/changelog.d/9765.docker deleted file mode 100644 index f170a3671447..000000000000 --- a/changelog.d/9765.docker +++ /dev/null @@ -1 +0,0 @@ -Move opencontainers labels to the final Docker image such that users can inspect them. diff --git a/changelog.d/9766.feature b/changelog.d/9766.feature deleted file mode 100644 index ecf49cfee1a3..000000000000 --- a/changelog.d/9766.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now requires Python 3.6 or later. It also requires Postgres 9.6 or later or SQLite 3.22 or later. diff --git a/changelog.d/9769.misc b/changelog.d/9769.misc deleted file mode 100644 index 042a50615f06..000000000000 --- a/changelog.d/9769.misc +++ /dev/null @@ -1 +0,0 @@ -Fix incompatibility with `tox` 2.5. diff --git a/changelog.d/9770.bugfix b/changelog.d/9770.bugfix deleted file mode 100644 index baf93138de41..000000000000 --- a/changelog.d/9770.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where sharded federation senders could get stuck repeatedly querying the DB in a loop, using lots of CPU. diff --git a/changelog.d/9771.misc b/changelog.d/9771.misc deleted file mode 100644 index 42d651d4cc99..000000000000 --- a/changelog.d/9771.misc +++ /dev/null @@ -1 +0,0 @@ -Enable Complement tests for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary API. diff --git a/changelog.d/9772.misc b/changelog.d/9772.misc deleted file mode 100644 index ec7d94cc2576..000000000000 --- a/changelog.d/9772.misc +++ /dev/null @@ -1 +0,0 @@ -Use mock from the standard library instead of a separate package. diff --git a/changelog.d/9780.bugfix b/changelog.d/9780.bugfix deleted file mode 100644 index 70985a050f11..000000000000 --- a/changelog.d/9780.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix duplicate logging of exceptions thrown during federation transaction processing. diff --git a/changelog.d/9781.misc b/changelog.d/9781.misc deleted file mode 100644 index d1c73fc7419a..000000000000 --- a/changelog.d/9781.misc +++ /dev/null @@ -1 +0,0 @@ -Update Black configuration to target Python 3.6. diff --git a/changelog.d/9782.misc b/changelog.d/9782.misc deleted file mode 100644 index ecf49cfee1a3..000000000000 --- a/changelog.d/9782.misc +++ /dev/null @@ -1 +0,0 @@ -Synapse now requires Python 3.6 or later. It also requires Postgres 9.6 or later or SQLite 3.22 or later. diff --git a/changelog.d/9793.misc b/changelog.d/9793.misc deleted file mode 100644 index 6334689d26c1..000000000000 --- a/changelog.d/9793.misc +++ /dev/null @@ -1 +0,0 @@ -Add option to skip unit tests when building Debian packages. diff --git a/synapse/__init__.py b/synapse/__init__.py index 1d2883acf67d..125a73d37813 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.31.0" +__version__ = "1.32.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From d9bd181a3fd1431af784c525e7fe552e0bcad48e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 13 Apr 2021 14:39:06 +0100 Subject: [PATCH 029/222] Update changelog for v1.32.0 --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 6e7cc5374a95..41908f84be0b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -47,12 +47,12 @@ Deprecations and Removals - Remove old admin API `GET /_synapse/admin/v1/users/`. ([\#9401](https://github.com/matrix-org/synapse/issues/9401)) - Make `/_matrix/client/r0/register` expect a type of `m.login.application_service` when an Application Service registers a user, to align with [the relevant spec](https://spec.matrix.org/unstable/application-service-api/#server-admin-style-permissions). ([\#9548](https://github.com/matrix-org/synapse/issues/9548)) -- Replace deprecated `imp` module with successor `importlib`. Contributed by Cristina Muñoz. ([\#9718](https://github.com/matrix-org/synapse/issues/9718)) Internal Changes ---------------- +- Replace deprecated `imp` module with successor `importlib`. Contributed by Cristina Muñoz. ([\#9718](https://github.com/matrix-org/synapse/issues/9718)) - Experiment with GitHub Actions for CI. ([\#9661](https://github.com/matrix-org/synapse/issues/9661)) - Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9682](https://github.com/matrix-org/synapse/issues/9682)) - Update `scripts-dev/complement.sh` to use a local checkout of Complement, allow running a subset of tests and have it use Synapse's Complement test blacklist. ([\#9685](https://github.com/matrix-org/synapse/issues/9685)) From f16c6cf59aad485cac83ce9d6d87842ae53adedf Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 14 Apr 2021 12:06:19 +0100 Subject: [PATCH 030/222] Add note to docker docs explaining platform support (#9801) Context is in https://github.com/matrix-org/synapse/issues/9764#issuecomment-818615894. I struggled to find a more official link for this. The problem occurs when using WSL1 instead of WSL2, which some Windows platforms (at least Server 2019) still don't have. Docker have updated their documentation to paint a much happier picture now given WSL2's support. The last sentence here can probably be removed once WSL1 is no longer around... though that will likely not be for a very long time. --- changelog.d/9801.doc | 1 + docker/README.md | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9801.doc diff --git a/changelog.d/9801.doc b/changelog.d/9801.doc new file mode 100644 index 000000000000..8b8b9d01d493 --- /dev/null +++ b/changelog.d/9801.doc @@ -0,0 +1 @@ +Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms. diff --git a/docker/README.md b/docker/README.md index 3a7dc585e7b5..b65bcea636e8 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,9 +2,12 @@ This Docker image will run Synapse as a single process. By default it uses a sqlite database; for production use you should connect it to a separate -postgres database. +postgres database. The image also does *not* provide a TURN server. -The image also does *not* provide a TURN server. +This image should work on all platforms that are supported by Docker upstream. +Note that Docker's WS1-backend Linux Containers on Windows +platform is [experimental](https://github.com/docker/for-win/issues/6470) and +is not supported by this image. ## Volumes @@ -208,4 +211,4 @@ healthcheck: ## Using jemalloc Jemalloc is embedded in the image and will be used instead of the default allocator. -You can read about jemalloc by reading the Synapse [README](../README.md) \ No newline at end of file +You can read about jemalloc by reading the Synapse [README](../README.md) From 7e460ec2a566b19bbcda63bc04b1e422127a99b3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 14 Apr 2021 13:54:49 +0100 Subject: [PATCH 031/222] Add a dockerfile for running a set of Synapse worker processes (#9162) This PR adds a Dockerfile and some supporting files to the `docker/` directory. The Dockerfile's intention is to spin up a container with: * A Synapse main process. * Any desired worker processes, defined by a `SYNAPSE_WORKERS` environment variable supplied at runtime. * A redis for worker communication. * A nginx for routing traffic. * A supervisord to start all worker processes and monitor them if any go down. Note that **this is not currently intended to be used in production**. If you'd like to use Synapse workers with Docker, instead make use of the official image, with one worker per container. The purpose of this dockerfile is currently to allow testing Synapse in worker mode with the [Complement](https://github.com/matrix-org/complement/) test suite. `configure_workers_and_start.py` is where most of the magic happens in this PR. It reads from environment variables (documented in the file) and creates all necessary config files for the processes. It is the entrypoint of the Dockerfile, and thus is run any time the docker container is spun up, recreating all config files in case you want to use a different set of workers. One can specify which workers they'd like to use by setting the `SYNAPSE_WORKERS` environment variable (as a comma-separated list of arbitrary worker names) or by setting it to `*` for all worker processes. We will be using the latter in CI. Huge thanks to @MatMaul for helping get this all working :tada: This PR is paired with its equivalent on the Complement side: https://github.com/matrix-org/complement/pull/62. Note, for the purpose of testing this PR before it's merged: You'll need to (re)build the base Synapse docker image for everything to work (`matrixdotorg/synapse:latest`). Then build the worker-based docker image on top (`matrixdotorg/synapse:workers`). --- changelog.d/9162.misc | 1 + docker/Dockerfile-workers | 23 + docker/README-testing.md | 140 ++++++ docker/README.md | 12 +- docker/conf-workers/nginx.conf.j2 | 27 ++ docker/conf-workers/shared.yaml.j2 | 9 + docker/conf-workers/supervisord.conf.j2 | 41 ++ docker/conf-workers/worker.yaml.j2 | 26 ++ docker/conf/homeserver.yaml | 4 +- docker/conf/log.config | 32 +- docker/configure_workers_and_start.py | 558 ++++++++++++++++++++++++ 11 files changed, 867 insertions(+), 6 deletions(-) create mode 100644 changelog.d/9162.misc create mode 100644 docker/Dockerfile-workers create mode 100644 docker/README-testing.md create mode 100644 docker/conf-workers/nginx.conf.j2 create mode 100644 docker/conf-workers/shared.yaml.j2 create mode 100644 docker/conf-workers/supervisord.conf.j2 create mode 100644 docker/conf-workers/worker.yaml.j2 create mode 100755 docker/configure_workers_and_start.py diff --git a/changelog.d/9162.misc b/changelog.d/9162.misc new file mode 100644 index 000000000000..1083da8a7af8 --- /dev/null +++ b/changelog.d/9162.misc @@ -0,0 +1 @@ +Add a dockerfile for running Synapse in worker-mode under Complement. \ No newline at end of file diff --git a/docker/Dockerfile-workers b/docker/Dockerfile-workers new file mode 100644 index 000000000000..969cf9728658 --- /dev/null +++ b/docker/Dockerfile-workers @@ -0,0 +1,23 @@ +# Inherit from the official Synapse docker image +FROM matrixdotorg/synapse + +# Install deps +RUN apt-get update +RUN apt-get install -y supervisor redis nginx + +# Remove the default nginx sites +RUN rm /etc/nginx/sites-enabled/default + +# Copy Synapse worker, nginx and supervisord configuration template files +COPY ./docker/conf-workers/* /conf/ + +# Expose nginx listener port +EXPOSE 8080/tcp + +# Volume for user-editable config files, logs etc. +VOLUME ["/data"] + +# A script to read environment variables and create the necessary +# files to run the desired worker configuration. Will start supervisord. +COPY ./docker/configure_workers_and_start.py /configure_workers_and_start.py +ENTRYPOINT ["/configure_workers_and_start.py"] diff --git a/docker/README-testing.md b/docker/README-testing.md new file mode 100644 index 000000000000..6a5baf9e2835 --- /dev/null +++ b/docker/README-testing.md @@ -0,0 +1,140 @@ +# Running tests against a dockerised Synapse + +It's possible to run integration tests against Synapse +using [Complement](https://github.com/matrix-org/complement). Complement is a Matrix Spec +compliance test suite for homeservers, and supports any homeserver docker image configured +to listen on ports 8008/8448. This document contains instructions for building Synapse +docker images that can be run inside Complement for testing purposes. + +Note that running Synapse's unit tests from within the docker image is not supported. + +## Testing with SQLite and single-process Synapse + +> Note that `scripts-dev/complement.sh` is a script that will automatically build +> and run an SQLite-based, single-process of Synapse against Complement. + +The instructions below will set up Complement testing for a single-process, +SQLite-based Synapse deployment. + +Start by building the base Synapse docker image. If you wish to run tests with the latest +release of Synapse, instead of your current checkout, you can skip this step. From the +root of the repository: + +```sh +docker build -t matrixdotorg/synapse -f docker/Dockerfile . +``` + +This will build an image with the tag `matrixdotorg/synapse`. + +Next, build the Synapse image for Complement. You will need a local checkout +of Complement. Change to the root of your Complement checkout and run: + +```sh +docker build -t complement-synapse -f "dockerfiles/Synapse.Dockerfile" dockerfiles +``` + +This will build an image with the tag `complement-synapse`, which can be handed to +Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to +[Complement's documentation](https://github.com/matrix-org/complement/#running) for +how to run the tests, as well as the various available command line flags. + +## Testing with PostgreSQL and single or multi-process Synapse + +The above docker image only supports running Synapse with SQLite and in a +single-process topology. The following instructions are used to build a Synapse image for +Complement that supports either single or multi-process topology with a PostgreSQL +database backend. + +As with the single-process image, build the base Synapse docker image. If you wish to run +tests with the latest release of Synapse, instead of your current checkout, you can skip +this step. From the root of the repository: + +```sh +docker build -t matrixdotorg/synapse -f docker/Dockerfile . +``` + +This will build an image with the tag `matrixdotorg/synapse`. + +Next, we build a new image with worker support based on `matrixdotorg/synapse:latest`. +Again, from the root of the repository: + +```sh +docker build -t matrixdotorg/synapse-workers -f docker/Dockerfile-workers . +``` + +This will build an image with the tag` matrixdotorg/synapse-workers`. + +It's worth noting at this point that this image is fully functional, and +can be used for testing against locally. See instructions for using the container +under +[Running the Dockerfile-worker image standalone](#running-the-dockerfile-worker-image-standalone) +below. + +Finally, build the Synapse image for Complement, which is based on +`matrixdotorg/synapse-workers`. You will need a local checkout of Complement. Change to +the root of your Complement checkout and run: + +```sh +docker build -t matrixdotorg/complement-synapse-workers -f dockerfiles/SynapseWorkers.Dockerfile dockerfiles +``` + +This will build an image with the tag `complement-synapse`, which can be handed to +Complement for testing via the `COMPLEMENT_BASE_IMAGE` environment variable. Refer to +[Complement's documentation](https://github.com/matrix-org/complement/#running) for +how to run the tests, as well as the various available command line flags. + +## Running the Dockerfile-worker image standalone + +For manual testing of a multi-process Synapse instance in Docker, +[Dockerfile-workers](Dockerfile-workers) is a Dockerfile that will produce an image +bundling all necessary components together for a workerised homeserver instance. + +This includes any desired Synapse worker processes, a nginx to route traffic accordingly, +a redis for worker communication and a supervisord instance to start up and monitor all +processes. You will need to provide your own postgres container to connect to, and TLS +is not handled by the container. + +Once you've built the image using the above instructions, you can run it. Be sure +you've set up a volume according to the [usual Synapse docker instructions](README.md). +Then run something along the lines of: + +``` +docker run -d --name synapse \ + --mount type=volume,src=synapse-data,dst=/data \ + -p 8008:8008 \ + -e SYNAPSE_SERVER_NAME=my.matrix.host \ + -e SYNAPSE_REPORT_STATS=no \ + -e POSTGRES_HOST=postgres \ + -e POSTGRES_USER=postgres \ + -e POSTGRES_PASSWORD=somesecret \ + -e SYNAPSE_WORKER_TYPES=synchrotron,media_repository,user_dir \ + -e SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1 \ + matrixdotorg/synapse-workers +``` + +...substituting `POSTGRES*` variables for those that match a postgres host you have +available (usually a running postgres docker container). + +The `SYNAPSE_WORKER_TYPES` environment variable is a comma-separated list of workers to +use when running the container. All possible worker names are defined by the keys of the +`WORKERS_CONFIG` variable in [this script](configure_workers_and_start.py), which the +Dockerfile makes use of to generate appropriate worker, nginx and supervisord config +files. + +Sharding is supported for a subset of workers, in line with the +[worker documentation](../docs/workers.md). To run multiple instances of a given worker +type, simply specify the type multiple times in `SYNAPSE_WORKER_TYPES` +(e.g `SYNAPSE_WORKER_TYPES=event_creator,event_creator...`). + +Otherwise, `SYNAPSE_WORKER_TYPES` can either be left empty or unset to spawn no workers +(leaving only the main process). The container is configured to use redis-based worker +mode. + +Logs for workers and the main process are logged to stdout and can be viewed with +standard `docker logs` tooling. Worker logs contain their worker name +after the timestamp. + +Setting `SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK=1` will cause worker logs to be written to +`/logs/.log`. Logs are kept for 1 week and rotate every day at 00: +00, according to the container's clock. Logging for the main process must still be +configured by modifying the homeserver's log config in your Synapse data volume. diff --git a/docker/README.md b/docker/README.md index b65bcea636e8..a7d1e670fe91 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,7 +11,7 @@ is not supported by this image. ## Volumes -By default, the image expects a single volume, located at ``/data``, that will hold: +By default, the image expects a single volume, located at `/data`, that will hold: * configuration files; * uploaded media and thumbnails; @@ -19,11 +19,11 @@ By default, the image expects a single volume, located at ``/data``, that will h * the appservices configuration. You are free to use separate volumes depending on storage endpoints at your -disposal. For instance, ``/data/media`` could be stored on a large but low +disposal. For instance, `/data/media` could be stored on a large but low performance hdd storage while other files could be stored on high performance endpoints. -In order to setup an application service, simply create an ``appservices`` +In order to setup an application service, simply create an `appservices` directory in the data volume and write the application service Yaml configuration file there. Multiple application services are supported. @@ -56,6 +56,8 @@ The following environment variables are supported in `generate` mode: * `SYNAPSE_SERVER_NAME` (mandatory): the server public hostname. * `SYNAPSE_REPORT_STATS` (mandatory, `yes` or `no`): whether to enable anonymous statistics reporting. +* `SYNAPSE_HTTP_PORT`: the port Synapse should listen on for http traffic. + Defaults to `8008`. * `SYNAPSE_CONFIG_DIR`: where additional config files (such as the log config and event signing key) will be stored. Defaults to `/data`. * `SYNAPSE_CONFIG_PATH`: path to the file to be generated. Defaults to @@ -76,6 +78,8 @@ docker run -d --name synapse \ matrixdotorg/synapse:latest ``` +(assuming 8008 is the port Synapse is configured to listen on for http traffic.) + You can then check that it has started correctly with: ``` @@ -211,4 +215,4 @@ healthcheck: ## Using jemalloc Jemalloc is embedded in the image and will be used instead of the default allocator. -You can read about jemalloc by reading the Synapse [README](../README.md) +You can read about jemalloc by reading the Synapse [README](../README.md). diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2 new file mode 100644 index 000000000000..1081979e06a0 --- /dev/null +++ b/docker/conf-workers/nginx.conf.j2 @@ -0,0 +1,27 @@ +# This file contains the base config for the reverse proxy, as part of ../Dockerfile-workers. +# configure_workers_and_start.py uses and amends to this file depending on the workers +# that have been selected. + +{{ upstream_directives }} + +server { + # Listen on an unoccupied port number + listen 8008; + listen [::]:8008; + + server_name localhost; + + # Nginx by default only allows file uploads up to 1M in size + # Increase client_max_body_size to match max_upload_size defined in homeserver.yaml + client_max_body_size 100M; + +{{ worker_locations }} + + # Send all other traffic to the main process + location ~* ^(\\/_matrix|\\/_synapse) { + proxy_pass http://localhost:8080; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + } +} diff --git a/docker/conf-workers/shared.yaml.j2 b/docker/conf-workers/shared.yaml.j2 new file mode 100644 index 000000000000..f94b8c6aca0f --- /dev/null +++ b/docker/conf-workers/shared.yaml.j2 @@ -0,0 +1,9 @@ +# This file contains the base for the shared homeserver config file between Synapse workers, +# as part of ./Dockerfile-workers. +# configure_workers_and_start.py uses and amends to this file depending on the workers +# that have been selected. + +redis: + enabled: true + +{{ shared_worker_config }} \ No newline at end of file diff --git a/docker/conf-workers/supervisord.conf.j2 b/docker/conf-workers/supervisord.conf.j2 new file mode 100644 index 000000000000..0de2c6143b5a --- /dev/null +++ b/docker/conf-workers/supervisord.conf.j2 @@ -0,0 +1,41 @@ +# This file contains the base config for supervisord, as part of ../Dockerfile-workers. +# configure_workers_and_start.py uses and amends to this file depending on the workers +# that have been selected. +[supervisord] +nodaemon=true +user=root + +[program:nginx] +command=/usr/sbin/nginx -g "daemon off;" +priority=500 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +username=www-data +autorestart=true + +[program:redis] +command=/usr/bin/redis-server /etc/redis/redis.conf --daemonize no +priority=1 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +username=redis +autorestart=true + +[program:synapse_main] +command=/usr/local/bin/python -m synapse.app.homeserver --config-path="{{ main_config_path }}" --config-path=/conf/workers/shared.yaml +priority=10 +# Log startup failures to supervisord's stdout/err +# Regular synapse logs will still go in the configured data directory +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +autorestart=unexpected +exitcodes=0 + +# Additional process blocks +{{ worker_config }} \ No newline at end of file diff --git a/docker/conf-workers/worker.yaml.j2 b/docker/conf-workers/worker.yaml.j2 new file mode 100644 index 000000000000..42131afc9583 --- /dev/null +++ b/docker/conf-workers/worker.yaml.j2 @@ -0,0 +1,26 @@ +# This is a configuration template for a single worker instance, and is +# used by Dockerfile-workers. +# Values will be change depending on whichever workers are selected when +# running that image. + +worker_app: "{{ app }}" +worker_name: "{{ name }}" + +# The replication listener on the main synapse process. +worker_replication_host: 127.0.0.1 +worker_replication_http_port: 9093 + +worker_listeners: + - type: http + port: {{ port }} +{% if listener_resources %} + resources: + - names: +{%- for resource in listener_resources %} + - {{ resource }} +{%- endfor %} +{% endif %} + +worker_log_config: {{ worker_log_config_filepath }} + +{{ worker_extra_conf }} diff --git a/docker/conf/homeserver.yaml b/docker/conf/homeserver.yaml index a792899540ee..2b23d7f428d7 100644 --- a/docker/conf/homeserver.yaml +++ b/docker/conf/homeserver.yaml @@ -40,7 +40,9 @@ listeners: compress: false {% endif %} - - port: 8008 + # Allow configuring in case we want to reverse proxy 8008 + # using another process in the same container + - port: {{ SYNAPSE_HTTP_PORT or 8008 }} tls: false bind_addresses: ['::'] type: http diff --git a/docker/conf/log.config b/docker/conf/log.config index 491bbcc87ad7..34572bc0f36a 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -2,9 +2,34 @@ version: 1 formatters: precise: - format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +{% if worker_name %} + format: '%(asctime)s - worker:{{ worker_name }} - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +{% else %} + format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s' +{% endif %} handlers: + file: + class: logging.handlers.TimedRotatingFileHandler + formatter: precise + filename: {{ LOG_FILE_PATH or "homeserver.log" }} + when: "midnight" + backupCount: 6 # Does not include the current log file. + encoding: utf8 + + # Default to buffering writes to log file for efficiency. This means that + # there will be a delay for INFO/DEBUG logs to get written, but WARNING/ERROR + # logs will still be flushed immediately. + buffer: + class: logging.handlers.MemoryHandler + target: file + # The capacity is the number of log lines that are buffered before + # being written to disk. Increasing this will lead to better + # performance, at the expensive of it taking longer for log lines to + # be written to disk. + capacity: 10 + flushLevel: 30 # Flush for WARNING logs as well + console: class: logging.StreamHandler formatter: precise @@ -17,6 +42,11 @@ loggers: root: level: {{ SYNAPSE_LOG_LEVEL or "INFO" }} + +{% if LOG_FILE_PATH %} + handlers: [console, buffer] +{% else %} handlers: [console] +{% endif %} disable_existing_loggers: false diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py new file mode 100755 index 000000000000..4be6afc65d9a --- /dev/null +++ b/docker/configure_workers_and_start.py @@ -0,0 +1,558 @@ +#!/usr/bin/env python +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script reads environment variables and generates a shared Synapse worker, +# nginx and supervisord configs depending on the workers requested. +# +# The environment variables it reads are: +# * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver. +# * SYNAPSE_REPORT_STATS: Whether to report stats. +# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG +# below. Leave empty for no workers, or set to '*' for all possible workers. +# +# NOTE: According to Complement's ENTRYPOINT expectations for a homeserver image (as defined +# in the project's README), this script may be run multiple times, and functionality should +# continue to work if so. + +import os +import subprocess +import sys + +import jinja2 +import yaml + +MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 + + +WORKERS_CONFIG = { + "pusher": { + "app": "synapse.app.pusher", + "listener_resources": [], + "endpoint_patterns": [], + "shared_extra_conf": {"start_pushers": False}, + "worker_extra_conf": "", + }, + "user_dir": { + "app": "synapse.app.user_dir", + "listener_resources": ["client"], + "endpoint_patterns": [ + "^/_matrix/client/(api/v1|r0|unstable)/user_directory/search$" + ], + "shared_extra_conf": {"update_user_directory": False}, + "worker_extra_conf": "", + }, + "media_repository": { + "app": "synapse.app.media_repository", + "listener_resources": ["media"], + "endpoint_patterns": [ + "^/_matrix/media/", + "^/_synapse/admin/v1/purge_media_cache$", + "^/_synapse/admin/v1/room/.*/media.*$", + "^/_synapse/admin/v1/user/.*/media.*$", + "^/_synapse/admin/v1/media/.*$", + "^/_synapse/admin/v1/quarantine_media/.*$", + ], + "shared_extra_conf": {"enable_media_repo": False}, + "worker_extra_conf": "enable_media_repo: true", + }, + "appservice": { + "app": "synapse.app.appservice", + "listener_resources": [], + "endpoint_patterns": [], + "shared_extra_conf": {"notify_appservices": False}, + "worker_extra_conf": "", + }, + "federation_sender": { + "app": "synapse.app.federation_sender", + "listener_resources": [], + "endpoint_patterns": [], + "shared_extra_conf": {"send_federation": False}, + "worker_extra_conf": "", + }, + "synchrotron": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client"], + "endpoint_patterns": [ + "^/_matrix/client/(v2_alpha|r0)/sync$", + "^/_matrix/client/(api/v1|v2_alpha|r0)/events$", + "^/_matrix/client/(api/v1|r0)/initialSync$", + "^/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync$", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "federation_reader": { + "app": "synapse.app.generic_worker", + "listener_resources": ["federation"], + "endpoint_patterns": [ + "^/_matrix/federation/(v1|v2)/event/", + "^/_matrix/federation/(v1|v2)/state/", + "^/_matrix/federation/(v1|v2)/state_ids/", + "^/_matrix/federation/(v1|v2)/backfill/", + "^/_matrix/federation/(v1|v2)/get_missing_events/", + "^/_matrix/federation/(v1|v2)/publicRooms", + "^/_matrix/federation/(v1|v2)/query/", + "^/_matrix/federation/(v1|v2)/make_join/", + "^/_matrix/federation/(v1|v2)/make_leave/", + "^/_matrix/federation/(v1|v2)/send_join/", + "^/_matrix/federation/(v1|v2)/send_leave/", + "^/_matrix/federation/(v1|v2)/invite/", + "^/_matrix/federation/(v1|v2)/query_auth/", + "^/_matrix/federation/(v1|v2)/event_auth/", + "^/_matrix/federation/(v1|v2)/exchange_third_party_invite/", + "^/_matrix/federation/(v1|v2)/user/devices/", + "^/_matrix/federation/(v1|v2)/get_groups_publicised$", + "^/_matrix/key/v2/query", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "federation_inbound": { + "app": "synapse.app.generic_worker", + "listener_resources": ["federation"], + "endpoint_patterns": ["/_matrix/federation/(v1|v2)/send/"], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "event_persister": { + "app": "synapse.app.generic_worker", + "listener_resources": ["replication"], + "endpoint_patterns": [], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "background_worker": { + "app": "synapse.app.generic_worker", + "listener_resources": [], + "endpoint_patterns": [], + # This worker cannot be sharded. Therefore there should only ever be one background + # worker, and it should be named background_worker1 + "shared_extra_conf": {"run_background_tasks_on": "background_worker1"}, + "worker_extra_conf": "", + }, + "event_creator": { + "app": "synapse.app.generic_worker", + "listener_resources": ["client"], + "endpoint_patterns": [ + "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/redact", + "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send", + "^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$", + "^/_matrix/client/(api/v1|r0|unstable)/join/", + "^/_matrix/client/(api/v1|r0|unstable)/profile/", + ], + "shared_extra_conf": {}, + "worker_extra_conf": "", + }, + "frontend_proxy": { + "app": "synapse.app.frontend_proxy", + "listener_resources": ["client", "replication"], + "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|unstable)/keys/upload"], + "shared_extra_conf": {}, + "worker_extra_conf": ( + "worker_main_http_uri: http://127.0.0.1:%d" + % (MAIN_PROCESS_HTTP_LISTENER_PORT,), + ), + }, +} + +# Templates for sections that may be inserted multiple times in config files +SUPERVISORD_PROCESS_CONFIG_BLOCK = """ +[program:synapse_{name}] +command=/usr/local/bin/python -m {app} \ + --config-path="{config_path}" \ + --config-path=/conf/workers/shared.yaml \ + --config-path=/conf/workers/{name}.yaml +autorestart=unexpected +priority=500 +exitcodes=0 +stdout_logfile=/dev/stdout +stdout_logfile_maxbytes=0 +stderr_logfile=/dev/stderr +stderr_logfile_maxbytes=0 +""" + +NGINX_LOCATION_CONFIG_BLOCK = """ + location ~* {endpoint} { + proxy_pass {upstream}; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $host; + } +""" + +NGINX_UPSTREAM_CONFIG_BLOCK = """ +upstream {upstream_worker_type} { +{body} +} +""" + + +# Utility functions +def log(txt: str): + """Log something to the stdout. + + Args: + txt: The text to log. + """ + print(txt) + + +def error(txt: str): + """Log something and exit with an error code. + + Args: + txt: The text to log in error. + """ + log(txt) + sys.exit(2) + + +def convert(src: str, dst: str, **template_vars): + """Generate a file from a template + + Args: + src: Path to the input file. + dst: Path to write to. + template_vars: The arguments to replace placeholder variables in the template with. + """ + # Read the template file + with open(src) as infile: + template = infile.read() + + # Generate a string from the template. We disable autoescape to prevent template + # variables from being escaped. + rendered = jinja2.Template(template, autoescape=False).render(**template_vars) + + # Write the generated contents to a file + # + # We use append mode in case the files have already been written to by something else + # (for instance, as part of the instructions in a dockerfile). + with open(dst, "a") as outfile: + # In case the existing file doesn't end with a newline + outfile.write("\n") + + outfile.write(rendered) + + +def add_sharding_to_shared_config( + shared_config: dict, + worker_type: str, + worker_name: str, + worker_port: int, +) -> None: + """Given a dictionary representing a config file shared across all workers, + append sharded worker information to it for the current worker_type instance. + + Args: + shared_config: The config dict that all worker instances share (after being converted to YAML) + worker_type: The type of worker (one of those defined in WORKERS_CONFIG). + worker_name: The name of the worker instance. + worker_port: The HTTP replication port that the worker instance is listening on. + """ + # The instance_map config field marks the workers that write to various replication streams + instance_map = shared_config.setdefault("instance_map", {}) + + # Worker-type specific sharding config + if worker_type == "pusher": + shared_config.setdefault("pusher_instances", []).append(worker_name) + + elif worker_type == "federation_sender": + shared_config.setdefault("federation_sender_instances", []).append(worker_name) + + elif worker_type == "event_persister": + # Event persisters write to the events stream, so we need to update + # the list of event stream writers + shared_config.setdefault("stream_writers", {}).setdefault("events", []).append( + worker_name + ) + + # Map of stream writer instance names to host/ports combos + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } + + elif worker_type == "media_repository": + # The first configured media worker will run the media background jobs + shared_config.setdefault("media_instance_running_background_jobs", worker_name) + + +def generate_base_homeserver_config(): + """Starts Synapse and generates a basic homeserver config, which will later be + modified for worker support. + + Raises: CalledProcessError if calling start.py returned a non-zero exit code. + """ + # start.py already does this for us, so just call that. + # note that this script is copied in in the official, monolith dockerfile + os.environ["SYNAPSE_HTTP_PORT"] = str(MAIN_PROCESS_HTTP_LISTENER_PORT) + subprocess.check_output(["/usr/local/bin/python", "/start.py", "migrate_config"]) + + +def generate_worker_files(environ, config_path: str, data_dir: str): + """Read the desired list of workers from environment variables and generate + shared homeserver, nginx and supervisord configs. + + Args: + environ: _Environ[str] + config_path: Where to output the generated Synapse main worker config file. + data_dir: The location of the synapse data directory. Where log and + user-facing config files live. + """ + # Note that yaml cares about indentation, so care should be taken to insert lines + # into files at the correct indentation below. + + # shared_config is the contents of a Synapse config file that will be shared amongst + # the main Synapse process as well as all workers. + # It is intended mainly for disabling functionality when certain workers are spun up, + # and adding a replication listener. + + # First read the original config file and extract the listeners block. Then we'll add + # another listener for replication. Later we'll write out the result. + listeners = [ + { + "port": 9093, + "bind_address": "127.0.0.1", + "type": "http", + "resources": [{"names": ["replication"]}], + } + ] + with open(config_path) as file_stream: + original_config = yaml.safe_load(file_stream) + original_listeners = original_config.get("listeners") + if original_listeners: + listeners += original_listeners + + # The shared homeserver config. The contents of which will be inserted into the + # base shared worker jinja2 template. + # + # This config file will be passed to all workers, included Synapse's main process. + shared_config = {"listeners": listeners} + + # The supervisord config. The contents of which will be inserted into the + # base supervisord jinja2 template. + # + # Supervisord will be in charge of running everything, from redis to nginx to Synapse + # and all of its worker processes. Load the config template, which defines a few + # services that are necessary to run. + supervisord_config = "" + + # Upstreams for load-balancing purposes. This dict takes the form of a worker type to the + # ports of each worker. For example: + # { + # worker_type: {1234, 1235, ...}} + # } + # and will be used to construct 'upstream' nginx directives. + nginx_upstreams = {} + + # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be + # placed after the proxy_pass directive. The main benefit to representing this data as a + # dict over a str is that we can easily deduplicate endpoints across multiple instances + # of the same worker. + # + # An nginx site config that will be amended to depending on the workers that are + # spun up. To be placed in /etc/nginx/conf.d. + nginx_locations = {} + + # Read the desired worker configuration from the environment + worker_types = environ.get("SYNAPSE_WORKER_TYPES") + if worker_types is None: + # No workers, just the main process + worker_types = [] + else: + # Split type names by comma + worker_types = worker_types.split(",") + + # Create the worker configuration directory if it doesn't already exist + os.makedirs("/conf/workers", exist_ok=True) + + # Start worker ports from this arbitrary port + worker_port = 18009 + + # A counter of worker_type -> int. Used for determining the name for a given + # worker type when generating its config file, as each worker's name is just + # worker_type + instance # + worker_type_counter = {} + + # For each worker type specified by the user, create config values + for worker_type in worker_types: + worker_type = worker_type.strip() + + worker_config = WORKERS_CONFIG.get(worker_type) + if worker_config: + worker_config = worker_config.copy() + else: + log(worker_type + " is an unknown worker type! It will be ignored") + continue + + new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1 + worker_type_counter[worker_type] = new_worker_count + + # Name workers by their type concatenated with an incrementing number + # e.g. federation_reader1 + worker_name = worker_type + str(new_worker_count) + worker_config.update( + {"name": worker_name, "port": worker_port, "config_path": config_path} + ) + + # Update the shared config with any worker-type specific options + shared_config.update(worker_config["shared_extra_conf"]) + + # Check if more than one instance of this worker type has been specified + worker_type_total_count = worker_types.count(worker_type) + if worker_type_total_count > 1: + # Update the shared config with sharding-related options if necessary + add_sharding_to_shared_config( + shared_config, worker_type, worker_name, worker_port + ) + + # Enable the worker in supervisord + supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config) + + # Add nginx location blocks for this worker's endpoints (if any are defined) + for pattern in worker_config["endpoint_patterns"]: + # Determine whether we need to load-balance this worker + if worker_type_total_count > 1: + # Create or add to a load-balanced upstream for this worker + nginx_upstreams.setdefault(worker_type, set()).add(worker_port) + + # Upstreams are named after the worker_type + upstream = "http://" + worker_type + else: + upstream = "http://localhost:%d" % (worker_port,) + + # Note that this endpoint should proxy to this upstream + nginx_locations[pattern] = upstream + + # Write out the worker's logging config file + + # Check whether we should write worker logs to disk, in addition to the console + extra_log_template_args = {} + if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"): + extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format( + dir=data_dir, name=worker_name + ) + + # Render and write the file + log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name) + convert( + "/conf/log.config", + log_config_filepath, + worker_name=worker_name, + **extra_log_template_args, + ) + + # Then a worker config file + convert( + "/conf/worker.yaml.j2", + "/conf/workers/{name}.yaml".format(name=worker_name), + **worker_config, + worker_log_config_filepath=log_config_filepath, + ) + + worker_port += 1 + + # Build the nginx location config blocks + nginx_location_config = "" + for endpoint, upstream in nginx_locations.items(): + nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format( + endpoint=endpoint, + upstream=upstream, + ) + + # Determine the load-balancing upstreams to configure + nginx_upstream_config = "" + for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items(): + body = "" + for port in upstream_worker_ports: + body += " server localhost:%d;\n" % (port,) + + # Add to the list of configured upstreams + nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format( + upstream_worker_type=upstream_worker_type, + body=body, + ) + + # Finally, we'll write out the config files. + + # Shared homeserver config + convert( + "/conf/shared.yaml.j2", + "/conf/workers/shared.yaml", + shared_worker_config=yaml.dump(shared_config), + ) + + # Nginx config + convert( + "/conf/nginx.conf.j2", + "/etc/nginx/conf.d/matrix-synapse.conf", + worker_locations=nginx_location_config, + upstream_directives=nginx_upstream_config, + ) + + # Supervisord config + convert( + "/conf/supervisord.conf.j2", + "/etc/supervisor/conf.d/supervisord.conf", + main_config_path=config_path, + worker_config=supervisord_config, + ) + + # Ensure the logging directory exists + log_dir = data_dir + "/logs" + if not os.path.exists(log_dir): + os.mkdir(log_dir) + + +def start_supervisord(): + """Starts up supervisord which then starts and monitors all other necessary processes + + Raises: CalledProcessError if calling start.py return a non-zero exit code. + """ + subprocess.run(["/usr/bin/supervisord"], stdin=subprocess.PIPE) + + +def main(args, environ): + config_dir = environ.get("SYNAPSE_CONFIG_DIR", "/data") + config_path = environ.get("SYNAPSE_CONFIG_PATH", config_dir + "/homeserver.yaml") + data_dir = environ.get("SYNAPSE_DATA_DIR", "/data") + + # override SYNAPSE_NO_TLS, we don't support TLS in worker mode, + # this needs to be handled by a frontend proxy + environ["SYNAPSE_NO_TLS"] = "yes" + + # Generate the base homeserver config if one does not yet exist + if not os.path.exists(config_path): + log("Generating base homeserver config") + generate_base_homeserver_config() + + # This script may be run multiple times (mostly by Complement, see note at top of file). + # Don't re-configure workers in this instance. + mark_filepath = "/conf/workers_have_been_configured" + if not os.path.exists(mark_filepath): + # Always regenerate all other config files + generate_worker_files(environ, config_path, data_dir) + + # Mark workers as being configured + with open(mark_filepath, "w") as f: + f.write("") + + # Start supervisord, which will start Synapse, all of the configured worker + # processes, redis, nginx etc. according to the config we created above. + start_supervisord() + + +if __name__ == "__main__": + main(sys.argv, os.environ) From 4b965c862dc66c0da5d3240add70e9b5f0aa720b Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 14 Apr 2021 16:34:27 +0200 Subject: [PATCH 032/222] Remove redundant "coding: utf-8" lines (#9786) Part of #9744 Removes all redundant `# -*- coding: utf-8 -*-` lines from files, as python 3 automatically reads source code as utf-8 now. `Signed-off-by: Jonathan de Jong ` --- .buildkite/scripts/create_postgres_db.py | 1 - changelog.d/9786.misc | 1 + contrib/cmdclient/http.py | 1 - contrib/experiments/test_messaging.py | 1 - scripts-dev/mypy_synapse_plugin.py | 1 - scripts-dev/sign_json | 1 - scripts-dev/update_database | 1 - scripts/export_signing_key | 1 - scripts/generate_log_config | 1 - scripts/generate_signing_key.py | 1 - scripts/move_remote_media_to_new_store.py | 1 - scripts/register_new_matrix_user | 1 - scripts/synapse_port_db | 1 - stubs/frozendict.pyi | 1 - stubs/txredisapi.pyi | 1 - synapse/__init__.py | 1 - synapse/_scripts/register_new_matrix_user.py | 1 - synapse/api/__init__.py | 1 - synapse/api/auth.py | 1 - synapse/api/auth_blocking.py | 1 - synapse/api/constants.py | 1 - synapse/api/errors.py | 1 - synapse/api/filtering.py | 1 - synapse/api/presence.py | 1 - synapse/api/room_versions.py | 1 - synapse/api/urls.py | 1 - synapse/app/__init__.py | 1 - synapse/app/_base.py | 1 - synapse/app/admin_cmd.py | 1 - synapse/app/appservice.py | 1 - synapse/app/client_reader.py | 1 - synapse/app/event_creator.py | 1 - synapse/app/federation_reader.py | 1 - synapse/app/federation_sender.py | 1 - synapse/app/frontend_proxy.py | 1 - synapse/app/generic_worker.py | 1 - synapse/app/homeserver.py | 1 - synapse/app/media_repository.py | 1 - synapse/app/pusher.py | 1 - synapse/app/synchrotron.py | 1 - synapse/app/user_dir.py | 1 - synapse/appservice/__init__.py | 1 - synapse/appservice/api.py | 1 - synapse/appservice/scheduler.py | 1 - synapse/config/__init__.py | 1 - synapse/config/__main__.py | 1 - synapse/config/_base.py | 1 - synapse/config/_util.py | 1 - synapse/config/auth.py | 1 - synapse/config/cache.py | 1 - synapse/config/cas.py | 1 - synapse/config/consent_config.py | 1 - synapse/config/database.py | 1 - synapse/config/emailconfig.py | 1 - synapse/config/experimental.py | 1 - synapse/config/federation.py | 1 - synapse/config/groups.py | 1 - synapse/config/homeserver.py | 1 - synapse/config/jwt_config.py | 1 - synapse/config/key.py | 1 - synapse/config/logger.py | 1 - synapse/config/metrics.py | 1 - synapse/config/oidc_config.py | 1 - synapse/config/password_auth_providers.py | 1 - synapse/config/push.py | 1 - synapse/config/redis.py | 1 - synapse/config/registration.py | 1 - synapse/config/repository.py | 1 - synapse/config/room.py | 1 - synapse/config/room_directory.py | 1 - synapse/config/saml2_config.py | 1 - synapse/config/server.py | 1 - synapse/config/server_notices_config.py | 1 - synapse/config/spam_checker.py | 1 - synapse/config/sso.py | 1 - synapse/config/stats.py | 1 - synapse/config/third_party_event_rules.py | 1 - synapse/config/tls.py | 1 - synapse/config/tracer.py | 1 - synapse/config/user_directory.py | 1 - synapse/config/workers.py | 1 - synapse/crypto/__init__.py | 1 - synapse/crypto/event_signing.py | 1 - synapse/crypto/keyring.py | 1 - synapse/event_auth.py | 1 - synapse/events/__init__.py | 1 - synapse/events/builder.py | 1 - synapse/events/presence_router.py | 1 - synapse/events/snapshot.py | 1 - synapse/events/spamcheck.py | 1 - synapse/events/third_party_rules.py | 1 - synapse/events/utils.py | 1 - synapse/events/validator.py | 1 - synapse/federation/__init__.py | 1 - synapse/federation/federation_base.py | 1 - synapse/federation/federation_client.py | 1 - synapse/federation/federation_server.py | 1 - synapse/federation/persistence.py | 1 - synapse/federation/send_queue.py | 1 - synapse/federation/sender/__init__.py | 1 - synapse/federation/sender/per_destination_queue.py | 1 - synapse/federation/sender/transaction_manager.py | 1 - synapse/federation/transport/__init__.py | 1 - synapse/federation/transport/client.py | 1 - synapse/federation/transport/server.py | 1 - synapse/federation/units.py | 1 - synapse/groups/attestations.py | 1 - synapse/groups/groups_server.py | 1 - synapse/handlers/__init__.py | 1 - synapse/handlers/_base.py | 1 - synapse/handlers/account_data.py | 1 - synapse/handlers/account_validity.py | 1 - synapse/handlers/acme.py | 1 - synapse/handlers/acme_issuing_service.py | 1 - synapse/handlers/admin.py | 1 - synapse/handlers/appservice.py | 1 - synapse/handlers/auth.py | 1 - synapse/handlers/cas_handler.py | 1 - synapse/handlers/deactivate_account.py | 1 - synapse/handlers/device.py | 1 - synapse/handlers/devicemessage.py | 1 - synapse/handlers/directory.py | 1 - synapse/handlers/e2e_keys.py | 1 - synapse/handlers/e2e_room_keys.py | 1 - synapse/handlers/events.py | 1 - synapse/handlers/federation.py | 1 - synapse/handlers/groups_local.py | 1 - synapse/handlers/identity.py | 1 - synapse/handlers/initial_sync.py | 1 - synapse/handlers/message.py | 1 - synapse/handlers/oidc_handler.py | 1 - synapse/handlers/pagination.py | 1 - synapse/handlers/password_policy.py | 1 - synapse/handlers/presence.py | 1 - synapse/handlers/profile.py | 1 - synapse/handlers/read_marker.py | 1 - synapse/handlers/receipts.py | 1 - synapse/handlers/register.py | 1 - synapse/handlers/room.py | 1 - synapse/handlers/room_list.py | 1 - synapse/handlers/room_member.py | 1 - synapse/handlers/room_member_worker.py | 1 - synapse/handlers/saml_handler.py | 1 - synapse/handlers/search.py | 1 - synapse/handlers/set_password.py | 1 - synapse/handlers/space_summary.py | 1 - synapse/handlers/sso.py | 1 - synapse/handlers/state_deltas.py | 1 - synapse/handlers/stats.py | 1 - synapse/handlers/sync.py | 1 - synapse/handlers/typing.py | 1 - synapse/handlers/ui_auth/__init__.py | 1 - synapse/handlers/ui_auth/checkers.py | 1 - synapse/handlers/user_directory.py | 1 - synapse/http/__init__.py | 1 - synapse/http/additional_resource.py | 1 - synapse/http/client.py | 1 - synapse/http/connectproxyclient.py | 1 - synapse/http/federation/__init__.py | 1 - synapse/http/federation/matrix_federation_agent.py | 1 - synapse/http/federation/srv_resolver.py | 1 - synapse/http/federation/well_known_resolver.py | 1 - synapse/http/matrixfederationclient.py | 1 - synapse/http/proxyagent.py | 1 - synapse/http/request_metrics.py | 1 - synapse/http/server.py | 1 - synapse/http/servlet.py | 1 - synapse/logging/__init__.py | 1 - synapse/logging/_remote.py | 1 - synapse/logging/_structured.py | 1 - synapse/logging/_terse_json.py | 1 - synapse/logging/filter.py | 1 - synapse/logging/formatter.py | 1 - synapse/logging/opentracing.py | 1 - synapse/logging/scopecontextmanager.py | 1 - synapse/logging/utils.py | 1 - synapse/metrics/__init__.py | 1 - synapse/metrics/_exposition.py | 1 - synapse/metrics/background_process_metrics.py | 1 - synapse/module_api/__init__.py | 1 - synapse/module_api/errors.py | 1 - synapse/notifier.py | 1 - synapse/push/__init__.py | 1 - synapse/push/action_generator.py | 1 - synapse/push/bulk_push_rule_evaluator.py | 1 - synapse/push/clientformat.py | 1 - synapse/push/emailpusher.py | 1 - synapse/push/httppusher.py | 1 - synapse/push/mailer.py | 1 - synapse/push/presentable_names.py | 1 - synapse/push/push_rule_evaluator.py | 1 - synapse/push/push_tools.py | 1 - synapse/push/pusher.py | 1 - synapse/push/pusherpool.py | 1 - synapse/replication/__init__.py | 1 - synapse/replication/http/__init__.py | 1 - synapse/replication/http/_base.py | 1 - synapse/replication/http/account_data.py | 1 - synapse/replication/http/devices.py | 1 - synapse/replication/http/federation.py | 1 - synapse/replication/http/login.py | 1 - synapse/replication/http/membership.py | 1 - synapse/replication/http/presence.py | 1 - synapse/replication/http/push.py | 1 - synapse/replication/http/register.py | 1 - synapse/replication/http/send_event.py | 1 - synapse/replication/http/streams.py | 1 - synapse/replication/slave/__init__.py | 1 - synapse/replication/slave/storage/__init__.py | 1 - synapse/replication/slave/storage/_base.py | 1 - synapse/replication/slave/storage/_slaved_id_tracker.py | 1 - synapse/replication/slave/storage/account_data.py | 1 - synapse/replication/slave/storage/appservice.py | 1 - synapse/replication/slave/storage/client_ips.py | 1 - synapse/replication/slave/storage/deviceinbox.py | 1 - synapse/replication/slave/storage/devices.py | 1 - synapse/replication/slave/storage/directory.py | 1 - synapse/replication/slave/storage/events.py | 1 - synapse/replication/slave/storage/filtering.py | 1 - synapse/replication/slave/storage/groups.py | 1 - synapse/replication/slave/storage/keys.py | 1 - synapse/replication/slave/storage/presence.py | 1 - synapse/replication/slave/storage/profile.py | 1 - synapse/replication/slave/storage/push_rule.py | 1 - synapse/replication/slave/storage/pushers.py | 1 - synapse/replication/slave/storage/receipts.py | 1 - synapse/replication/slave/storage/registration.py | 1 - synapse/replication/slave/storage/room.py | 1 - synapse/replication/slave/storage/transactions.py | 1 - synapse/replication/tcp/__init__.py | 1 - synapse/replication/tcp/client.py | 1 - synapse/replication/tcp/commands.py | 1 - synapse/replication/tcp/external_cache.py | 1 - synapse/replication/tcp/handler.py | 1 - synapse/replication/tcp/protocol.py | 1 - synapse/replication/tcp/redis.py | 1 - synapse/replication/tcp/resource.py | 1 - synapse/replication/tcp/streams/__init__.py | 1 - synapse/replication/tcp/streams/_base.py | 1 - synapse/replication/tcp/streams/events.py | 1 - synapse/replication/tcp/streams/federation.py | 1 - synapse/rest/__init__.py | 1 - synapse/rest/admin/__init__.py | 1 - synapse/rest/admin/_base.py | 1 - synapse/rest/admin/devices.py | 1 - synapse/rest/admin/event_reports.py | 1 - synapse/rest/admin/groups.py | 1 - synapse/rest/admin/media.py | 1 - synapse/rest/admin/purge_room_servlet.py | 1 - synapse/rest/admin/rooms.py | 1 - synapse/rest/admin/server_notice_servlet.py | 1 - synapse/rest/admin/statistics.py | 1 - synapse/rest/admin/users.py | 1 - synapse/rest/client/__init__.py | 1 - synapse/rest/client/transactions.py | 1 - synapse/rest/client/v1/__init__.py | 1 - synapse/rest/client/v1/directory.py | 1 - synapse/rest/client/v1/events.py | 1 - synapse/rest/client/v1/initial_sync.py | 1 - synapse/rest/client/v1/login.py | 1 - synapse/rest/client/v1/logout.py | 1 - synapse/rest/client/v1/presence.py | 1 - synapse/rest/client/v1/profile.py | 1 - synapse/rest/client/v1/push_rule.py | 1 - synapse/rest/client/v1/pusher.py | 1 - synapse/rest/client/v1/room.py | 1 - synapse/rest/client/v1/voip.py | 1 - synapse/rest/client/v2_alpha/__init__.py | 1 - synapse/rest/client/v2_alpha/_base.py | 1 - synapse/rest/client/v2_alpha/account.py | 1 - synapse/rest/client/v2_alpha/account_data.py | 1 - synapse/rest/client/v2_alpha/account_validity.py | 1 - synapse/rest/client/v2_alpha/auth.py | 1 - synapse/rest/client/v2_alpha/capabilities.py | 1 - synapse/rest/client/v2_alpha/devices.py | 1 - synapse/rest/client/v2_alpha/filter.py | 1 - synapse/rest/client/v2_alpha/groups.py | 1 - synapse/rest/client/v2_alpha/keys.py | 1 - synapse/rest/client/v2_alpha/notifications.py | 1 - synapse/rest/client/v2_alpha/openid.py | 1 - synapse/rest/client/v2_alpha/password_policy.py | 1 - synapse/rest/client/v2_alpha/read_marker.py | 1 - synapse/rest/client/v2_alpha/receipts.py | 1 - synapse/rest/client/v2_alpha/register.py | 1 - synapse/rest/client/v2_alpha/relations.py | 1 - synapse/rest/client/v2_alpha/report_event.py | 1 - synapse/rest/client/v2_alpha/room_keys.py | 1 - synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py | 1 - synapse/rest/client/v2_alpha/sendtodevice.py | 1 - synapse/rest/client/v2_alpha/shared_rooms.py | 1 - synapse/rest/client/v2_alpha/sync.py | 1 - synapse/rest/client/v2_alpha/tags.py | 1 - synapse/rest/client/v2_alpha/thirdparty.py | 1 - synapse/rest/client/v2_alpha/tokenrefresh.py | 1 - synapse/rest/client/v2_alpha/user_directory.py | 1 - synapse/rest/client/versions.py | 1 - synapse/rest/consent/consent_resource.py | 1 - synapse/rest/health.py | 1 - synapse/rest/key/__init__.py | 1 - synapse/rest/key/v2/__init__.py | 1 - synapse/rest/key/v2/local_key_resource.py | 1 - synapse/rest/media/v1/__init__.py | 1 - synapse/rest/media/v1/_base.py | 1 - synapse/rest/media/v1/config_resource.py | 1 - synapse/rest/media/v1/download_resource.py | 1 - synapse/rest/media/v1/filepath.py | 1 - synapse/rest/media/v1/media_repository.py | 1 - synapse/rest/media/v1/media_storage.py | 1 - synapse/rest/media/v1/preview_url_resource.py | 1 - synapse/rest/media/v1/storage_provider.py | 1 - synapse/rest/media/v1/thumbnail_resource.py | 1 - synapse/rest/media/v1/thumbnailer.py | 1 - synapse/rest/media/v1/upload_resource.py | 1 - synapse/rest/synapse/__init__.py | 1 - synapse/rest/synapse/client/__init__.py | 1 - synapse/rest/synapse/client/new_user_consent.py | 1 - synapse/rest/synapse/client/oidc/__init__.py | 1 - synapse/rest/synapse/client/oidc/callback_resource.py | 1 - synapse/rest/synapse/client/password_reset.py | 1 - synapse/rest/synapse/client/pick_idp.py | 1 - synapse/rest/synapse/client/pick_username.py | 1 - synapse/rest/synapse/client/saml2/__init__.py | 1 - synapse/rest/synapse/client/saml2/metadata_resource.py | 1 - synapse/rest/synapse/client/saml2/response_resource.py | 1 - synapse/rest/synapse/client/sso_register.py | 1 - synapse/rest/well_known.py | 1 - synapse/secrets.py | 1 - synapse/server.py | 1 - synapse/server_notices/consent_server_notices.py | 1 - synapse/server_notices/resource_limits_server_notices.py | 1 - synapse/server_notices/server_notices_manager.py | 1 - synapse/server_notices/server_notices_sender.py | 1 - synapse/server_notices/worker_server_notices_sender.py | 1 - synapse/spam_checker_api/__init__.py | 1 - synapse/state/__init__.py | 1 - synapse/state/v1.py | 1 - synapse/state/v2.py | 1 - synapse/storage/__init__.py | 1 - synapse/storage/_base.py | 1 - synapse/storage/background_updates.py | 1 - synapse/storage/database.py | 1 - synapse/storage/databases/__init__.py | 1 - synapse/storage/databases/main/__init__.py | 1 - synapse/storage/databases/main/account_data.py | 1 - synapse/storage/databases/main/appservice.py | 1 - synapse/storage/databases/main/cache.py | 1 - synapse/storage/databases/main/censor_events.py | 1 - synapse/storage/databases/main/client_ips.py | 1 - synapse/storage/databases/main/deviceinbox.py | 1 - synapse/storage/databases/main/devices.py | 1 - synapse/storage/databases/main/directory.py | 1 - synapse/storage/databases/main/e2e_room_keys.py | 1 - synapse/storage/databases/main/end_to_end_keys.py | 1 - synapse/storage/databases/main/event_federation.py | 1 - synapse/storage/databases/main/event_push_actions.py | 1 - synapse/storage/databases/main/events.py | 1 - synapse/storage/databases/main/events_bg_updates.py | 1 - synapse/storage/databases/main/events_forward_extremities.py | 1 - synapse/storage/databases/main/events_worker.py | 1 - synapse/storage/databases/main/filtering.py | 1 - synapse/storage/databases/main/group_server.py | 1 - synapse/storage/databases/main/keys.py | 1 - synapse/storage/databases/main/media_repository.py | 1 - synapse/storage/databases/main/metrics.py | 1 - synapse/storage/databases/main/monthly_active_users.py | 1 - synapse/storage/databases/main/presence.py | 1 - synapse/storage/databases/main/profile.py | 1 - synapse/storage/databases/main/purge_events.py | 1 - synapse/storage/databases/main/push_rule.py | 1 - synapse/storage/databases/main/pusher.py | 1 - synapse/storage/databases/main/receipts.py | 1 - synapse/storage/databases/main/registration.py | 1 - synapse/storage/databases/main/rejections.py | 1 - synapse/storage/databases/main/relations.py | 1 - synapse/storage/databases/main/room.py | 1 - synapse/storage/databases/main/roommember.py | 1 - .../main/schema/delta/50/make_event_content_nullable.py | 1 - .../databases/main/schema/delta/57/local_current_membership.py | 1 - synapse/storage/databases/main/search.py | 1 - synapse/storage/databases/main/signatures.py | 1 - synapse/storage/databases/main/state.py | 1 - synapse/storage/databases/main/state_deltas.py | 1 - synapse/storage/databases/main/stats.py | 1 - synapse/storage/databases/main/stream.py | 1 - synapse/storage/databases/main/tags.py | 1 - synapse/storage/databases/main/transactions.py | 1 - synapse/storage/databases/main/ui_auth.py | 1 - synapse/storage/databases/main/user_directory.py | 1 - synapse/storage/databases/main/user_erasure_store.py | 1 - synapse/storage/databases/state/__init__.py | 1 - synapse/storage/databases/state/bg_updates.py | 1 - synapse/storage/databases/state/store.py | 1 - synapse/storage/engines/__init__.py | 1 - synapse/storage/engines/_base.py | 1 - synapse/storage/engines/postgres.py | 1 - synapse/storage/engines/sqlite.py | 1 - synapse/storage/keys.py | 1 - synapse/storage/persist_events.py | 1 - synapse/storage/prepare_database.py | 1 - synapse/storage/purge_events.py | 1 - synapse/storage/push_rule.py | 1 - synapse/storage/relations.py | 1 - synapse/storage/roommember.py | 1 - synapse/storage/state.py | 1 - synapse/storage/types.py | 1 - synapse/storage/util/__init__.py | 1 - synapse/storage/util/id_generators.py | 1 - synapse/storage/util/sequence.py | 1 - synapse/streams/__init__.py | 1 - synapse/streams/config.py | 1 - synapse/streams/events.py | 1 - synapse/types.py | 1 - synapse/util/__init__.py | 1 - synapse/util/async_helpers.py | 1 - synapse/util/caches/__init__.py | 1 - synapse/util/caches/cached_call.py | 1 - synapse/util/caches/deferred_cache.py | 1 - synapse/util/caches/descriptors.py | 1 - synapse/util/caches/dictionary_cache.py | 1 - synapse/util/caches/expiringcache.py | 1 - synapse/util/caches/lrucache.py | 1 - synapse/util/caches/response_cache.py | 1 - synapse/util/caches/stream_change_cache.py | 1 - synapse/util/caches/ttlcache.py | 1 - synapse/util/daemonize.py | 1 - synapse/util/distributor.py | 1 - synapse/util/file_consumer.py | 1 - synapse/util/frozenutils.py | 1 - synapse/util/hash.py | 2 -- synapse/util/iterutils.py | 1 - synapse/util/jsonobject.py | 1 - synapse/util/macaroons.py | 1 - synapse/util/metrics.py | 1 - synapse/util/module_loader.py | 1 - synapse/util/msisdn.py | 1 - synapse/util/patch_inline_callbacks.py | 1 - synapse/util/ratelimitutils.py | 1 - synapse/util/retryutils.py | 1 - synapse/util/rlimit.py | 1 - synapse/util/stringutils.py | 1 - synapse/util/templates.py | 1 - synapse/util/threepids.py | 1 - synapse/util/versionstring.py | 1 - synapse/util/wheel_timer.py | 1 - synapse/visibility.py | 1 - synctl | 1 - synmark/__init__.py | 1 - synmark/__main__.py | 1 - synmark/suites/logging.py | 1 - synmark/suites/lrucache.py | 1 - synmark/suites/lrucache_evict.py | 1 - tests/__init__.py | 1 - tests/api/test_auth.py | 1 - tests/api/test_filtering.py | 1 - tests/app/test_frontend_proxy.py | 1 - tests/app/test_openid_listener.py | 1 - tests/appservice/__init__.py | 1 - tests/appservice/test_appservice.py | 1 - tests/appservice/test_scheduler.py | 1 - tests/config/__init__.py | 1 - tests/config/test_base.py | 1 - tests/config/test_cache.py | 1 - tests/config/test_database.py | 1 - tests/config/test_generate.py | 1 - tests/config/test_load.py | 1 - tests/config/test_ratelimiting.py | 1 - tests/config/test_room_directory.py | 1 - tests/config/test_server.py | 1 - tests/config/test_tls.py | 1 - tests/config/test_util.py | 1 - tests/crypto/__init__.py | 1 - tests/crypto/test_event_signing.py | 1 - tests/crypto/test_keyring.py | 1 - tests/events/test_presence_router.py | 1 - tests/events/test_snapshot.py | 1 - tests/events/test_utils.py | 1 - tests/federation/test_complexity.py | 1 - tests/federation/test_federation_sender.py | 1 - tests/federation/test_federation_server.py | 1 - tests/federation/transport/test_server.py | 1 - tests/handlers/test_admin.py | 1 - tests/handlers/test_appservice.py | 1 - tests/handlers/test_auth.py | 1 - tests/handlers/test_device.py | 1 - tests/handlers/test_directory.py | 1 - tests/handlers/test_e2e_keys.py | 1 - tests/handlers/test_e2e_room_keys.py | 1 - tests/handlers/test_federation.py | 1 - tests/handlers/test_message.py | 1 - tests/handlers/test_oidc.py | 1 - tests/handlers/test_password_providers.py | 1 - tests/handlers/test_presence.py | 1 - tests/handlers/test_profile.py | 1 - tests/handlers/test_register.py | 1 - tests/handlers/test_stats.py | 1 - tests/handlers/test_sync.py | 1 - tests/handlers/test_typing.py | 1 - tests/handlers/test_user_directory.py | 1 - tests/http/__init__.py | 1 - tests/http/federation/__init__.py | 1 - tests/http/federation/test_matrix_federation_agent.py | 1 - tests/http/federation/test_srv_resolver.py | 1 - tests/http/test_additional_resource.py | 1 - tests/http/test_endpoint.py | 1 - tests/http/test_fedclient.py | 1 - tests/http/test_proxyagent.py | 1 - tests/http/test_servlet.py | 1 - tests/http/test_simple_client.py | 1 - tests/logging/__init__.py | 1 - tests/logging/test_remote_handler.py | 1 - tests/logging/test_terse_json.py | 1 - tests/module_api/test_api.py | 1 - tests/push/test_email.py | 1 - tests/push/test_http.py | 1 - tests/push/test_push_rule_evaluator.py | 1 - tests/replication/__init__.py | 1 - tests/replication/_base.py | 1 - tests/replication/slave/__init__.py | 1 - tests/replication/slave/storage/__init__.py | 1 - tests/replication/tcp/__init__.py | 1 - tests/replication/tcp/streams/__init__.py | 1 - tests/replication/tcp/streams/test_account_data.py | 1 - tests/replication/tcp/streams/test_events.py | 1 - tests/replication/tcp/streams/test_federation.py | 1 - tests/replication/tcp/streams/test_receipts.py | 1 - tests/replication/tcp/streams/test_typing.py | 1 - tests/replication/tcp/test_commands.py | 1 - tests/replication/tcp/test_remote_server_up.py | 1 - tests/replication/test_auth.py | 1 - tests/replication/test_client_reader_shard.py | 1 - tests/replication/test_federation_ack.py | 1 - tests/replication/test_federation_sender_shard.py | 1 - tests/replication/test_multi_media_repo.py | 1 - tests/replication/test_pusher_shard.py | 1 - tests/replication/test_sharded_event_persister.py | 1 - tests/rest/__init__.py | 1 - tests/rest/admin/__init__.py | 1 - tests/rest/admin/test_admin.py | 1 - tests/rest/admin/test_device.py | 1 - tests/rest/admin/test_event_reports.py | 1 - tests/rest/admin/test_media.py | 1 - tests/rest/admin/test_room.py | 1 - tests/rest/admin/test_statistics.py | 1 - tests/rest/admin/test_user.py | 1 - tests/rest/client/__init__.py | 1 - tests/rest/client/test_consent.py | 1 - tests/rest/client/test_ephemeral_message.py | 1 - tests/rest/client/test_identity.py | 1 - tests/rest/client/test_power_levels.py | 1 - tests/rest/client/test_redactions.py | 1 - tests/rest/client/test_retention.py | 1 - tests/rest/client/test_third_party_rules.py | 1 - tests/rest/client/v1/__init__.py | 1 - tests/rest/client/v1/test_directory.py | 1 - tests/rest/client/v1/test_events.py | 1 - tests/rest/client/v1/test_login.py | 1 - tests/rest/client/v1/test_presence.py | 1 - tests/rest/client/v1/test_profile.py | 1 - tests/rest/client/v1/test_push_rule_attrs.py | 1 - tests/rest/client/v1/test_rooms.py | 1 - tests/rest/client/v1/test_typing.py | 1 - tests/rest/client/v1/utils.py | 1 - tests/rest/client/v2_alpha/test_account.py | 1 - tests/rest/client/v2_alpha/test_auth.py | 1 - tests/rest/client/v2_alpha/test_capabilities.py | 1 - tests/rest/client/v2_alpha/test_filter.py | 1 - tests/rest/client/v2_alpha/test_password_policy.py | 1 - tests/rest/client/v2_alpha/test_register.py | 1 - tests/rest/client/v2_alpha/test_relations.py | 1 - tests/rest/client/v2_alpha/test_shared_rooms.py | 1 - tests/rest/client/v2_alpha/test_sync.py | 1 - tests/rest/client/v2_alpha/test_upgrade_room.py | 1 - tests/rest/key/v2/test_remote_key_resource.py | 1 - tests/rest/media/__init__.py | 1 - tests/rest/media/v1/__init__.py | 1 - tests/rest/media/v1/test_base.py | 1 - tests/rest/media/v1/test_media_storage.py | 1 - tests/rest/media/v1/test_url_preview.py | 1 - tests/rest/test_health.py | 1 - tests/rest/test_well_known.py | 1 - tests/scripts/test_new_matrix_user.py | 1 - tests/server_notices/test_consent.py | 1 - tests/server_notices/test_resource_limits_server_notices.py | 1 - tests/state/test_v2.py | 1 - tests/storage/test__base.py | 1 - tests/storage/test_account_data.py | 1 - tests/storage/test_appservice.py | 1 - tests/storage/test_base.py | 1 - tests/storage/test_cleanup_extrems.py | 1 - tests/storage/test_client_ips.py | 1 - tests/storage/test_database.py | 1 - tests/storage/test_devices.py | 1 - tests/storage/test_directory.py | 1 - tests/storage/test_e2e_room_keys.py | 1 - tests/storage/test_end_to_end_keys.py | 1 - tests/storage/test_event_chain.py | 1 - tests/storage/test_event_federation.py | 1 - tests/storage/test_event_metrics.py | 1 - tests/storage/test_event_push_actions.py | 1 - tests/storage/test_events.py | 1 - tests/storage/test_id_generators.py | 1 - tests/storage/test_keys.py | 1 - tests/storage/test_main.py | 1 - tests/storage/test_monthly_active_users.py | 1 - tests/storage/test_profile.py | 1 - tests/storage/test_purge.py | 1 - tests/storage/test_redaction.py | 1 - tests/storage/test_registration.py | 1 - tests/storage/test_room.py | 1 - tests/storage/test_roommember.py | 1 - tests/storage/test_state.py | 1 - tests/storage/test_transactions.py | 1 - tests/storage/test_user_directory.py | 1 - tests/test_distributor.py | 1 - tests/test_event_auth.py | 1 - tests/test_federation.py | 1 - tests/test_mau.py | 1 - tests/test_metrics.py | 1 - tests/test_phone_home.py | 1 - tests/test_preview.py | 1 - tests/test_state.py | 1 - tests/test_test_utils.py | 1 - tests/test_types.py | 1 - tests/test_utils/__init__.py | 1 - tests/test_utils/event_injection.py | 1 - tests/test_utils/html_parsers.py | 1 - tests/test_utils/logging_setup.py | 1 - tests/test_visibility.py | 1 - tests/unittest.py | 1 - tests/util/__init__.py | 1 - tests/util/caches/__init__.py | 1 - tests/util/caches/test_cached_call.py | 1 - tests/util/caches/test_deferred_cache.py | 1 - tests/util/caches/test_descriptors.py | 1 - tests/util/caches/test_ttlcache.py | 1 - tests/util/test_async_utils.py | 1 - tests/util/test_dict_cache.py | 1 - tests/util/test_expiring_cache.py | 1 - tests/util/test_file_consumer.py | 1 - tests/util/test_itertools.py | 1 - tests/util/test_linearizer.py | 1 - tests/util/test_logformatter.py | 1 - tests/util/test_lrucache.py | 1 - tests/util/test_ratelimitutils.py | 1 - tests/util/test_retryutils.py | 1 - tests/util/test_rwlock.py | 1 - tests/util/test_stringutils.py | 1 - tests/util/test_threepids.py | 1 - tests/util/test_treecache.py | 1 - tests/util/test_wheel_timer.py | 1 - tests/utils.py | 1 - 651 files changed, 1 insertion(+), 651 deletions(-) create mode 100644 changelog.d/9786.misc diff --git a/.buildkite/scripts/create_postgres_db.py b/.buildkite/scripts/create_postgres_db.py index 956339de5cf1..cc829db2161d 100755 --- a/.buildkite/scripts/create_postgres_db.py +++ b/.buildkite/scripts/create_postgres_db.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/changelog.d/9786.misc b/changelog.d/9786.misc new file mode 100644 index 000000000000..cf265db749e2 --- /dev/null +++ b/changelog.d/9786.misc @@ -0,0 +1 @@ +Apply `pyupgrade` across the codebase. \ No newline at end of file diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py index 1cf913756e6a..1310f078e3ac 100644 --- a/contrib/cmdclient/http.py +++ b/contrib/cmdclient/http.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py index 7fbc7d8fc6fd..31b8a6822504 100644 --- a/contrib/experiments/test_messaging.py +++ b/contrib/experiments/test_messaging.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 18df68305b88..1217e148747d 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts-dev/sign_json b/scripts-dev/sign_json index 44553fb79aa8..4a43d3f2b058 100755 --- a/scripts-dev/sign_json +++ b/scripts-dev/sign_json @@ -1,6 +1,5 @@ #!/usr/bin/env python # -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts-dev/update_database b/scripts-dev/update_database index 56365e2b58bf..87f709b6ed43 100755 --- a/scripts-dev/update_database +++ b/scripts-dev/update_database @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/export_signing_key b/scripts/export_signing_key index 8aec9d802bf4..0ed167ea855c 100755 --- a/scripts/export_signing_key +++ b/scripts/export_signing_key @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/generate_log_config b/scripts/generate_log_config index a13a5634a30e..e72a0dafb769 100755 --- a/scripts/generate_log_config +++ b/scripts/generate_log_config @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py index 16d7c4f38284..07df25a8099d 100755 --- a/scripts/generate_signing_key.py +++ b/scripts/generate_signing_key.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/move_remote_media_to_new_store.py b/scripts/move_remote_media_to_new_store.py index 8477955a906d..875aa4781f49 100755 --- a/scripts/move_remote_media_to_new_store.py +++ b/scripts/move_remote_media_to_new_store.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/register_new_matrix_user b/scripts/register_new_matrix_user index 8b9d30877de9..00104b9d62cb 100755 --- a/scripts/register_new_matrix_user +++ b/scripts/register_new_matrix_user @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 58edf6af6c8c..b7c1ffc956f7 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/stubs/frozendict.pyi b/stubs/frozendict.pyi index 0368ba47038b..24c6f3af77b1 100644 --- a/stubs/frozendict.pyi +++ b/stubs/frozendict.pyi @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/stubs/txredisapi.pyi b/stubs/txredisapi.pyi index 080ca40287d2..c1a06ae022f6 100644 --- a/stubs/txredisapi.pyi +++ b/stubs/txredisapi.pyi @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/__init__.py b/synapse/__init__.py index 125a73d37813..c2709c2ad48f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-9 New Vector Ltd # diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index dfe26dea6dfc..dae986c78869 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector # diff --git a/synapse/api/__init__.py b/synapse/api/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/api/__init__.py +++ b/synapse/api/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 7d9930ae7b7c..6c13f53957c4 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index d8088f524ac7..a8df60cb890b 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/constants.py b/synapse/api/constants.py index a8ae41de480d..31a59bceec20 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 2a789ea3e823..0231c79079e1 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 5caf336fd0cb..ce49a0ad58d1 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/synapse/api/presence.py b/synapse/api/presence.py index b9a8e294609e..a3bf0348d115 100644 --- a/synapse/api/presence.py +++ b/synapse/api/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 87038d436d28..c9f9596ada92 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 6379c86ddea0..4b1f213c75f8 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/app/__init__.py b/synapse/app/__init__.py index d1a2cd5e192b..f9940491e8e4 100644 --- a/synapse/app/__init__.py +++ b/synapse/app/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 3912c8994cf0..2113c4f37002 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2019-2021 The Matrix.org Foundation C.I.C # diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 9f99651aa219..eb256db749da 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/appservice.py b/synapse/app/appservice.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/appservice.py +++ b/synapse/app/appservice.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/event_creator.py b/synapse/app/event_creator.py index e9c098c4e7de..57af28f10a57 100644 --- a/synapse/app/event_creator.py +++ b/synapse/app/event_creator.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_reader.py b/synapse/app/federation_reader.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/federation_reader.py +++ b/synapse/app/federation_reader.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index d1c207923399..e35e17492c63 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3bfe9d507ff9..679b7f428929 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/app/media_repository.py b/synapse/app/media_repository.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/media_repository.py +++ b/synapse/app/media_repository.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/pusher.py b/synapse/app/pusher.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/pusher.py +++ b/synapse/app/pusher.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/synchrotron.py b/synapse/app/synchrotron.py index add43147b31f..2d50060ffb0b 100644 --- a/synapse/app/synchrotron.py +++ b/synapse/app/synchrotron.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/app/user_dir.py b/synapse/app/user_dir.py index 503d44f687d7..a368efb354ed 100644 --- a/synapse/app/user_dir.py +++ b/synapse/app/user_dir.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 0bfc5e445f5f..6504c6bd3f59 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 9d3bbe3b8b05..fe04d7a67293 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 5203ffe90fdd..6a2ce99b55dc 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/__init__.py b/synapse/config/__init__.py index 1e76e9559df6..d2f889159e75 100644 --- a/synapse/config/__init__.py +++ b/synapse/config/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/__main__.py b/synapse/config/__main__.py index 65043d5b5b5f..b5b6735a8faa 100644 --- a/synapse/config/__main__.py +++ b/synapse/config/__main__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/_base.py b/synapse/config/_base.py index ba9cd63cf2d5..08e2c2c543a0 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/config/_util.py b/synapse/config/_util.py index 8fce7f6bb133..3edb4b71068f 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/auth.py b/synapse/config/auth.py index 9aabaadf9e54..e10d641a9655 100644 --- a/synapse/config/auth.py +++ b/synapse/config/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/cache.py b/synapse/config/cache.py index 4e8abbf88aeb..41b9b3f51f78 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/cas.py b/synapse/config/cas.py index dbf50859659f..901f4123e187 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/consent_config.py b/synapse/config/consent_config.py index c47f364b146d..30d07cc219bc 100644 --- a/synapse/config/consent_config.py +++ b/synapse/config/consent_config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/database.py b/synapse/config/database.py index e7889b9c20a3..79a02706b4f6 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 52505ac5d2b5..c587939c7a7a 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index eb96ecda74ea..a693fba87779 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 55e4db54425d..090ba047fa23 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/groups.py b/synapse/config/groups.py index 7b7860ea713f..15c2e64bda2f 100644 --- a/synapse/config/groups.py +++ b/synapse/config/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 64a2429f7787..130953506825 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt_config.py index f30330abb6d5..9e07e7300814 100644 --- a/synapse/config/jwt_config.py +++ b/synapse/config/jwt_config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 Niklas Riekenbrauck # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/key.py b/synapse/config/key.py index 350ff1d6654c..94a9063043a2 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 999aecce5c78..b174e0df6d54 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/metrics.py b/synapse/config/metrics.py index 2b289f4208d0..7ac82edb0ed1 100644 --- a/synapse/config/metrics.py +++ b/synapse/config/metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc_config.py index 05733ec41dcb..5fb94376fdb0 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc_config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/password_auth_providers.py b/synapse/config/password_auth_providers.py index 85d07c4f8f2a..1cf69734bb5f 100644 --- a/synapse/config/password_auth_providers.py +++ b/synapse/config/password_auth_providers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 Openmarket # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/push.py b/synapse/config/push.py index 7831a2ef7921..6ef8491caf4b 100644 --- a/synapse/config/push.py +++ b/synapse/config/push.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # diff --git a/synapse/config/redis.py b/synapse/config/redis.py index 1373302335b3..33104af73472 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/registration.py b/synapse/config/registration.py index f27d1e14acba..f8a2768af835 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 061c4ec83fc7..146bc55d6f77 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014, 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/room.py b/synapse/config/room.py index 692d7a19361e..d889d90dbc7b 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 2dd719c388ac..56981cac79c2 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 6db9cb5ced3c..55a7838b1004 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/config/server.py b/synapse/config/server.py index 8decc9d10d76..02b86b11a5ee 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices_config.py index 57f69dc8e27d..48bf3241b659 100644 --- a/synapse/config/server_notices_config.py +++ b/synapse/config/server_notices_config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/spam_checker.py b/synapse/config/spam_checker.py index 3d05abc1586a..447ba3303b3a 100644 --- a/synapse/config/spam_checker.py +++ b/synapse/config/spam_checker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/sso.py b/synapse/config/sso.py index 243cc681e88d..af645c930d0d 100644 --- a/synapse/config/sso.py +++ b/synapse/config/sso.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/stats.py b/synapse/config/stats.py index 2258329a52d8..3d44b5120106 100644 --- a/synapse/config/stats.py +++ b/synapse/config/stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/third_party_event_rules.py b/synapse/config/third_party_event_rules.py index c04e1c4e077e..f502ff539e27 100644 --- a/synapse/config/third_party_event_rules.py +++ b/synapse/config/third_party_event_rules.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 85b5db4c40a8..b0418697583e 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index 727a1e700838..db22b5b19fb6 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C.d # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index 8d05ef173c05..4cbf79eeed4c 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/config/workers.py b/synapse/config/workers.py index ac92375a85e6..b2540163d1b9 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/crypto/__init__.py b/synapse/crypto/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/crypto/__init__.py +++ b/synapse/crypto/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 8fb116ae182c..0f2b632e4738 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d5fb51513b59..40073dc7c2b1 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017, 2018 New Vector Ltd # diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 9863953f5c5d..5234e3f81e3f 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index f9032e36977f..c8b52cbc7a09 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index c1c0426f6ea0..5793553a8883 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/presence_router.py b/synapse/events/presence_router.py index 24cd389d80c6..6c37c8a7a430 100644 --- a/synapse/events/presence_router.py +++ b/synapse/events/presence_router.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 7295df74fed6..f8d898c3b1d1 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index a9185987a237..c727b48c1ef5 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 9767d2394050..f7944fd8344f 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 0f8a3b5ad839..7d7cd9aaee5a 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/events/validator.py b/synapse/events/validator.py index f8f3b1a31e0b..fa6987d7cbac 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/__init__.py b/synapse/federation/__init__.py index f5f0bdfca3ec..46300cba2564 100644 --- a/synapse/federation/__init__.py +++ b/synapse/federation/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 383737520afa..949dcd46141f 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 55533d75014c..f93335edaa1c 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index b9f8d966a621..3ff6479cfbde 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Federation C.I.C diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index ce5fc758f0e6..2f9c9bc2cdc8 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 0c18c49abb70..e3f0bc2471f4 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index d821dcbf6a69..155161685d0e 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index e9c8a9f20a66..3b053ebcfb08 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 07b740c2f2fc..12fe3a719bc3 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/transport/__init__.py b/synapse/federation/transport/__init__.py index 5db733af98ca..3c9a0f694482 100644 --- a/synapse/federation/transport/__init__.py +++ b/synapse/federation/transport/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 6aee47c43116..ada322a81e14 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a9c1391d27ef..a3759bdda1d9 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/federation/units.py b/synapse/federation/units.py index 0f8bf000ac3d..c83a261918c0 100644 --- a/synapse/federation/units.py +++ b/synapse/federation/units.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 368c44708dac..d2fc8be5f571 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/groups/groups_server.py b/synapse/groups/groups_server.py index 4b16a4ac29ea..a06d060ebf4a 100644 --- a/synapse/groups/groups_server.py +++ b/synapse/groups/groups_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # Copyright 2019 Michael Telatynski <7t3chguy@gmail.com> diff --git a/synapse/handlers/__init__.py b/synapse/handlers/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/handlers/__init__.py +++ b/synapse/handlers/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index fb899aa90d4d..d800e1691280 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 1ce6d697ed30..affb54e0ee4f 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index bee1447c2ee5..66ce7e8b8318 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/acme.py b/synapse/handlers/acme.py index 2a25af62880c..16ab93f58048 100644 --- a/synapse/handlers/acme.py +++ b/synapse/handlers/acme.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/acme_issuing_service.py b/synapse/handlers/acme_issuing_service.py index ae2a9dd9c219..a972d3fa0af6 100644 --- a/synapse/handlers/acme_issuing_service.py +++ b/synapse/handlers/acme_issuing_service.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index c494de49a35b..f72ded038e83 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 9fb7ee335d38..d7bc4e23edd4 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 08e413bc98e0..b8a37b647772 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2019 - 2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas_handler.py index 5060936f943a..7346ccfe9396 100644 --- a/synapse/handlers/cas_handler.py +++ b/synapse/handlers/cas_handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 2bcd8f5435a1..3f6f9f7f3d57 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 7e76db3e2a3f..d75edb184b3a 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index c971eeb4d26c..c5d631de07a9 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index abcf86352dad..90932316f3b7 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 92b18378fcfb..974487800da5 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index a910d246d692..31742236a94d 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index f46cab73251c..d82144d7fa8d 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 67888898ffb5..fe1d83f6b812 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index a41ca5df9c9c..157f2ff2189b 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index d89fa5fb305d..87a8b89237e7 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 13f8152283f2..76242865ae28 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 125dae6d2577..ec8eb2167463 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc_handler.py index 6624212d6ff2..b156196a70b7 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc_handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # Copyright 2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 66dc886c8100..1e1186c29e7d 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2017 - 2018 New Vector Ltd # diff --git a/synapse/handlers/password_policy.py b/synapse/handlers/password_policy.py index 92cefa11aadc..cd21efdcc642 100644 --- a/synapse/handlers/password_policy.py +++ b/synapse/handlers/password_policy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 0047907cd9ca..251b48148d48 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a755363c3f44..05b4a97b590b 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/read_marker.py b/synapse/handlers/read_marker.py index a54fe1968e3f..c679a8303ed4 100644 --- a/synapse/handlers/read_marker.py +++ b/synapse/handlers/read_marker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index dbfe9bfacadc..f782d9db3205 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 3b6660c873f7..007fb1284091 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 4b3d0d72e387..5a888b794168 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/handlers/room_list.py b/synapse/handlers/room_list.py index 924b81db7c1d..141c9c044400 100644 --- a/synapse/handlers/room_list.py +++ b/synapse/handlers/room_list.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 894ef859f4d4..2bbfac6471e5 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016-2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 3a90fc0c16d3..3e89dd2315f8 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml_handler.py index ec2ba11c7584..80ba65b9e019 100644 --- a/synapse/handlers/saml_handler.py +++ b/synapse/handlers/saml_handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index d742dfbd5333..4e718d3f633b 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/set_password.py b/synapse/handlers/set_password.py index f98a338ec5f5..a63fac828342 100644 --- a/synapse/handlers/set_password.py +++ b/synapse/handlers/set_password.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 5d9418969d84..01e3e050f93c 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 415b1c2d17c7..8d00ffdc73bc 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/state_deltas.py b/synapse/handlers/state_deltas.py index ee8f87e59a36..077c7c064965 100644 --- a/synapse/handlers/state_deltas.py +++ b/synapse/handlers/state_deltas.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 8730f99d03ba..383e34026e9b 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f8d88ef77be9..dc8ee8cd1749 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018, 2019 New Vector Ltd # diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index bb35af099d7a..e22393adc48d 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/ui_auth/__init__.py b/synapse/handlers/ui_auth/__init__.py index a68d5e790e3a..4c3b669faeef 100644 --- a/synapse/handlers/ui_auth/__init__.py +++ b/synapse/handlers/ui_auth/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 3d66bf305e7f..0eeb7c03f2d5 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index b121286d9563..9b1e6d5c18e8 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/__init__.py b/synapse/http/__init__.py index 142b007d010e..ed4671b7deee 100644 --- a/synapse/http/__init__.py +++ b/synapse/http/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/additional_resource.py b/synapse/http/additional_resource.py index 479746c9c56c..55ea97a07f29 100644 --- a/synapse/http/additional_resource.py +++ b/synapse/http/additional_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/client.py b/synapse/http/client.py index f7a07f0466c5..1730187ffae2 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/connectproxyclient.py b/synapse/http/connectproxyclient.py index b797e3ce80b3..17e1c5abb13d 100644 --- a/synapse/http/connectproxyclient.py +++ b/synapse/http/connectproxyclient.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/federation/__init__.py b/synapse/http/federation/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/synapse/http/federation/__init__.py +++ b/synapse/http/federation/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 5935a125fd60..950770201a79 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/federation/srv_resolver.py b/synapse/http/federation/srv_resolver.py index d9620032d2d7..b8ed4ec905d4 100644 --- a/synapse/http/federation/srv_resolver.py +++ b/synapse/http/federation/srv_resolver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index ce4079f15c4c..20d39a4ea64f 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index ab47dec8f2f6..d48721a4e2a4 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index ea5ad14cb07c..7dfae8b786b9 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/http/request_metrics.py b/synapse/http/request_metrics.py index 0ec5d941b8fe..602f93c49710 100644 --- a/synapse/http/request_metrics.py +++ b/synapse/http/request_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/server.py b/synapse/http/server.py index fa89260850e6..845651e60634 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 0e637f47016f..31897546a967 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/__init__.py b/synapse/logging/__init__.py index b28b7b2ef761..e00969f8b18c 100644 --- a/synapse/logging/__init__.py +++ b/synapse/logging/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index 643492ceaf83..4e8b0f8d10f2 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py index 3e054f615c48..c7a971a9d60b 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 2fbf5549a1fb..8002a250a268 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/filter.py b/synapse/logging/filter.py index 1baf8dd67934..ed51a4726cda 100644 --- a/synapse/logging/filter.py +++ b/synapse/logging/filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/formatter.py b/synapse/logging/formatter.py index 11f60a77f795..c0f12ecd15b8 100644 --- a/synapse/logging/formatter.py +++ b/synapse/logging/formatter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index bfe9136fd8df..fba2fa390434 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/scopecontextmanager.py b/synapse/logging/scopecontextmanager.py index 7b9c65745627..b1e8e08fe96f 100644 --- a/synapse/logging/scopecontextmanager.py +++ b/synapse/logging/scopecontextmanager.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/logging/utils.py b/synapse/logging/utils.py index fd3543ab0428..08895e72eedd 100644 --- a/synapse/logging/utils.py +++ b/synapse/logging/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 13a5bc4558cb..31b7b3c256a7 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/metrics/_exposition.py b/synapse/metrics/_exposition.py index 71320a140223..8002be56e0e8 100644 --- a/synapse/metrics/_exposition.py +++ b/synapse/metrics/_exposition.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015-2019 Prometheus Python Client Developers # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index e8a9096c039e..cbd0894e57a8 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index ca1bd4cdc96a..b7dbbfc27c18 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/module_api/errors.py b/synapse/module_api/errors.py index b15441772c26..d24864c5492a 100644 --- a/synapse/module_api/errors.py +++ b/synapse/module_api/errors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/notifier.py b/synapse/notifier.py index 7ce34380af3c..d5ab77058dd3 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 9fc3da49a281..2c23afe8e3ab 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/action_generator.py b/synapse/push/action_generator.py index 38a47a600f8e..60758df01664 100644 --- a/synapse/push/action_generator.py +++ b/synapse/push/action_generator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 1897f5915390..50b470c310b1 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd # Copyright 2017 New Vector Ltd # diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 0cadba761afe..2ee0ccd58aa9 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index c0968dc7a141..cd89b54305ab 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index 26af5309c1bb..06bf5f8ada84 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 2e5161de2c5b..c4b43b0d3fc8 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index 04c2c1482ce2..412941393fe3 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index ba1877adcd96..49ecb38522ae 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index df34103224fe..9c85200c0fb4 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index cb9412785060..c51938b8cf48 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 4c7f5fecee98..564a5ed0df53 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/__init__.py b/synapse/replication/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/synapse/replication/__init__.py +++ b/synapse/replication/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index cb4a52dbe9b4..ba8114ac9e13 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index b7aa0c280fe5..ece03467b57d 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py index 60899b6ad622..70e951af6376 100644 --- a/synapse/replication/http/account_data.py +++ b/synapse/replication/http/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 807b85d2e124..5a5818ef61e2 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index 82ea3b895f9a..79cadb7b574c 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index 4ec1bfa6eaf9..c2e8c0029312 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index c10992ff51e5..289a397d6885 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index bc9aa82cb495..f25307620d55 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py index 054ed64d34dd..139427cb1f29 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 73d747785420..d6dd7242eb20 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index a4c5b4429207..fae5ffa451d3 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py index 309159e3048b..9afa147d00c1 100644 --- a/synapse/replication/http/streams.py +++ b/synapse/replication/http/streams.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/__init__.py b/synapse/replication/slave/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/synapse/replication/slave/__init__.py +++ b/synapse/replication/slave/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/__init__.py b/synapse/replication/slave/storage/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/synapse/replication/slave/storage/__init__.py +++ b/synapse/replication/slave/storage/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/_base.py b/synapse/replication/slave/storage/_base.py index 693c9ab901cd..faa99387a745 100644 --- a/synapse/replication/slave/storage/_base.py +++ b/synapse/replication/slave/storage/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/_slaved_id_tracker.py b/synapse/replication/slave/storage/_slaved_id_tracker.py index 0d39a93ed229..2cb7489047f7 100644 --- a/synapse/replication/slave/storage/_slaved_id_tracker.py +++ b/synapse/replication/slave/storage/_slaved_id_tracker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/account_data.py b/synapse/replication/slave/storage/account_data.py index 21afe5f15551..ee74ee7d8547 100644 --- a/synapse/replication/slave/storage/account_data.py +++ b/synapse/replication/slave/storage/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/appservice.py b/synapse/replication/slave/storage/appservice.py index 0f8d7037bde1..29f50c0add5f 100644 --- a/synapse/replication/slave/storage/appservice.py +++ b/synapse/replication/slave/storage/appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 0f5b7adef781..8730966380de 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/deviceinbox.py b/synapse/replication/slave/storage/deviceinbox.py index 1260f6d1412f..e94075108465 100644 --- a/synapse/replication/slave/storage/deviceinbox.py +++ b/synapse/replication/slave/storage/deviceinbox.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index e0d86240dd19..70207420a6b0 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/directory.py b/synapse/replication/slave/storage/directory.py index 1945bcf9a8d8..71fde0c96cc6 100644 --- a/synapse/replication/slave/storage/directory.py +++ b/synapse/replication/slave/storage/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index fbffe6d85c28..d4d3f8c44876 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/filtering.py b/synapse/replication/slave/storage/filtering.py index 6a232528610b..37875bc9730f 100644 --- a/synapse/replication/slave/storage/filtering.py +++ b/synapse/replication/slave/storage/filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/groups.py b/synapse/replication/slave/storage/groups.py index 30955bcbfe0f..e9bdc3847006 100644 --- a/synapse/replication/slave/storage/groups.py +++ b/synapse/replication/slave/storage/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py index 961579751cdf..a00b38c512a0 100644 --- a/synapse/replication/slave/storage/keys.py +++ b/synapse/replication/slave/storage/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py index 55620c03d8c3..57327d910d56 100644 --- a/synapse/replication/slave/storage/presence.py +++ b/synapse/replication/slave/storage/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/profile.py b/synapse/replication/slave/storage/profile.py index f85b20a07177..99f4a2264287 100644 --- a/synapse/replication/slave/storage/profile.py +++ b/synapse/replication/slave/storage/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/push_rule.py b/synapse/replication/slave/storage/push_rule.py index de904c943cc0..4d5f86286213 100644 --- a/synapse/replication/slave/storage/push_rule.py +++ b/synapse/replication/slave/storage/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/pushers.py b/synapse/replication/slave/storage/pushers.py index 93161c3dfb97..2672a2c94b15 100644 --- a/synapse/replication/slave/storage/pushers.py +++ b/synapse/replication/slave/storage/pushers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/receipts.py b/synapse/replication/slave/storage/receipts.py index 3dfdd9961d26..3826b87decaf 100644 --- a/synapse/replication/slave/storage/receipts.py +++ b/synapse/replication/slave/storage/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/replication/slave/storage/registration.py b/synapse/replication/slave/storage/registration.py index a40f064e2b63..5dae35a9605f 100644 --- a/synapse/replication/slave/storage/registration.py +++ b/synapse/replication/slave/storage/registration.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/room.py b/synapse/replication/slave/storage/room.py index 109ac6bea141..8cc6de3f4698 100644 --- a/synapse/replication/slave/storage/room.py +++ b/synapse/replication/slave/storage/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/slave/storage/transactions.py b/synapse/replication/slave/storage/transactions.py index 2091ac0df67d..a59e54392441 100644 --- a/synapse/replication/slave/storage/transactions.py +++ b/synapse/replication/slave/storage/transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/__init__.py b/synapse/replication/tcp/__init__.py index 1b8718b11daa..1fa60af8e6bc 100644 --- a/synapse/replication/tcp/__init__.py +++ b/synapse/replication/tcp/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 3455839d672f..ced69ee904c6 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 8abed1f52d3e..505d450e1991 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index d89a36f25a59..1a3b051e3cb2 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index a8894beadfd1..2ce1b9f2224f 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index d10d57424618..6860576e7817 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 98bdeb0ec698..6a2c2655e4bd 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 2018f9f29ed5..bd47d8425814 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index d1a61c331480..fb74ac4e98ad 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 3dfee7674344..520c45f151d4 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index fa5e37ba7bc5..e7e87bac9241 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/replication/tcp/streams/federation.py b/synapse/replication/tcp/streams/federation.py index 9bb8e9e17731..096a85d36306 100644 --- a/synapse/replication/tcp/streams/federation.py +++ b/synapse/replication/tcp/streams/federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2019 New Vector Ltd # diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 40f5c32db2df..79d52d2dcb43 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 2dec818a5f5d..9cb9a9f6aa93 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2020, 2021 The Matrix.org Foundation C.I.C. diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py index 7681e55b5832..f203f6fdc6e5 100644 --- a/synapse/rest/admin/_base.py +++ b/synapse/rest/admin/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/devices.py b/synapse/rest/admin/devices.py index 5996de11c3ed..5715190a78cc 100644 --- a/synapse/rest/admin/devices.py +++ b/synapse/rest/admin/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index 381c3fe6853d..bbfcaf723b7b 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py index ebc587aa0603..3b3ffde0b65c 100644 --- a/synapse/rest/admin/groups.py +++ b/synapse/rest/admin/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 40646ef241cc..24dd46113a29 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # diff --git a/synapse/rest/admin/purge_room_servlet.py b/synapse/rest/admin/purge_room_servlet.py index 49966ee3e044..2365ff7a0f26 100644 --- a/synapse/rest/admin/purge_room_servlet.py +++ b/synapse/rest/admin/purge_room_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index cfe1bebb91b2..d0cf12174360 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index f495666f4a71..cc3ab5854b0b 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index f2490e382dcf..948de94ccd6a 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 04990c71fb57..edda7861fae5 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/__init__.py b/synapse/rest/client/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/synapse/rest/client/__init__.py +++ b/synapse/rest/client/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 7be5c0fb8807..94ff3719ce88 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/__init__.py b/synapse/rest/client/v1/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/rest/client/v1/__init__.py +++ b/synapse/rest/client/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index e5af26b176dc..ae92a3df8e35 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index 6de4078290ba..ee7454996e5a 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 91da0ee57303..bef1edc838ab 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 3151e72d4f19..42e709ec14bd 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index ad8cea49c6ed..5aa7908d73a6 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 23a529f8e3d3..c232484f29ec 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index 717c5f2b108a..f42f4b35674f 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 241e535917a7..be29a0b39ec6 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 0c148a213db4..18102eca6c1b 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 525efdf22137..5cab4d3c7baf 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index d07ca2c47cd2..c780ffded5e9 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/__init__.py b/synapse/rest/client/v2_alpha/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/rest/client/v2_alpha/__init__.py +++ b/synapse/rest/client/v2_alpha/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index f016b4f1bd41..0443f4571c1c 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 411fb57c473d..3aad15132d66 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index 3f28c0bc3eb6..7517e9304e8d 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index bd7f9ae2039b..0ad07fb89526 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 75ece1c911d0..6ea1b50a625e 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index 44ccf10ed45e..6a240214845d 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 3d07aadd39ba..9af05f9b11a4 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index 7cc692643b1d..411667a9c8d9 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 08fb6b2b06ad..6285680c00c8 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index f092e5b3a2de..a57ccbb5e5d5 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 87063ec8b1e1..0ede643c2d91 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index 5b996e2d6318..d3322acc384b 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/password_policy.py b/synapse/rest/client/v2_alpha/password_policy.py index 68b27ff23a46..a83927aee641 100644 --- a/synapse/rest/client/v2_alpha/password_policy.py +++ b/synapse/rest/client/v2_alpha/password_policy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index 55c6688f529f..5988fa47e5e9 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index 6f7246a39429..8cf4aebdbec3 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 4a064849c1b8..b26aad7b3419 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 - 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index fe765da23c5b..c7da6759dbf8 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 215d619ca102..2c169abbf313 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 53de97923fa1..263596be8658 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 147920767fd2..6d1b083acb47 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 79c1b526eedd..f8dcee603ce8 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/shared_rooms.py b/synapse/rest/client/v2_alpha/shared_rooms.py index c866d5151c99..d2e7f04b406c 100644 --- a/synapse/rest/client/v2_alpha/shared_rooms.py +++ b/synapse/rest/client/v2_alpha/shared_rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Half-Shot # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 3481770c83e2..95ee3f1b84f3 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index a97cd66c5234..c14f83be1878 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index 0c127a1b5fd8..b5c67c9bb67e 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 79317c74bae1..b2f858545cbe 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index ad598cefe00e..7e8912f0b919 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 3e3d8839f494..4582c274c7ad 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index 8b9ef26cf2c0..c4550d3cf06b 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/health.py b/synapse/rest/health.py index 0170950bf382..4487b54abf39 100644 --- a/synapse/rest/health.py +++ b/synapse/rest/health.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/key/__init__.py b/synapse/rest/key/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/synapse/rest/key/__init__.py +++ b/synapse/rest/key/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/key/v2/__init__.py b/synapse/rest/key/v2/__init__.py index cb5abcf826be..c6c63073eac0 100644 --- a/synapse/rest/key/v2/__init__.py +++ b/synapse/rest/key/v2/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index d8e8e48c1c1e..e8dbe240d81e 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/media/v1/__init__.py b/synapse/rest/media/v1/__init__.py index 3b8c96e267fe..d20186bbd095 100644 --- a/synapse/rest/media/v1/__init__.py +++ b/synapse/rest/media/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 636694707117..0fb4cd81f1a2 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/v1/config_resource.py index c41a7ab412c9..b20c29f0071e 100644 --- a/synapse/rest/media/v1/config_resource.py +++ b/synapse/rest/media/v1/config_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 Will Hunt # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/v1/download_resource.py index 5dadaeaf5701..cd2468f9c59a 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/v1/download_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 7792f26e7846..4088e7a059ad 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 0c041b542d4d..87e3645ddcd8 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index b1b1c9e6ecc9..c7fd97c46c9d 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 814145a04a15..0adfb1a70f73 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 031947557d8b..0ff6ad3c0c22 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index af802bc0b125..a029d426f0b6 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 988f52c78f73..37fe582390e3 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 0138b2e2d127..80f017a4ddfb 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/rest/synapse/__init__.py b/synapse/rest/synapse/__init__.py index c0b733488b5f..6ef4fbe8f77b 100644 --- a/synapse/rest/synapse/__init__.py +++ b/synapse/rest/synapse/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/__init__.py b/synapse/rest/synapse/client/__init__.py index 9eeb970580ed..47a2f72b3273 100644 --- a/synapse/rest/synapse/client/__init__.py +++ b/synapse/rest/synapse/client/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/new_user_consent.py b/synapse/rest/synapse/client/new_user_consent.py index 78ee0b5e88eb..e5634f967966 100644 --- a/synapse/rest/synapse/client/new_user_consent.py +++ b/synapse/rest/synapse/client/new_user_consent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/oidc/__init__.py b/synapse/rest/synapse/client/oidc/__init__.py index 64c0deb75dde..36ba40165621 100644 --- a/synapse/rest/synapse/client/oidc/__init__.py +++ b/synapse/rest/synapse/client/oidc/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/oidc/callback_resource.py b/synapse/rest/synapse/client/oidc/callback_resource.py index 1af33f0a45ba..7785f17e9094 100644 --- a/synapse/rest/synapse/client/oidc/callback_resource.py +++ b/synapse/rest/synapse/client/oidc/callback_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/password_reset.py b/synapse/rest/synapse/client/password_reset.py index d26ce46efcfe..f2800bf2db1d 100644 --- a/synapse/rest/synapse/client/password_reset.py +++ b/synapse/rest/synapse/client/password_reset.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/pick_idp.py b/synapse/rest/synapse/client/pick_idp.py index 9550b829980d..d3a94a9349be 100644 --- a/synapse/rest/synapse/client/pick_idp.py +++ b/synapse/rest/synapse/client/pick_idp.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/pick_username.py b/synapse/rest/synapse/client/pick_username.py index d9ffe84489af..9b002cc15ebe 100644 --- a/synapse/rest/synapse/client/pick_username.py +++ b/synapse/rest/synapse/client/pick_username.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/saml2/__init__.py b/synapse/rest/synapse/client/saml2/__init__.py index 3e8235ee1e37..781ccb237c89 100644 --- a/synapse/rest/synapse/client/saml2/__init__.py +++ b/synapse/rest/synapse/client/saml2/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/saml2/metadata_resource.py b/synapse/rest/synapse/client/saml2/metadata_resource.py index 1e8526e22e71..b37c7083dc7f 100644 --- a/synapse/rest/synapse/client/saml2/metadata_resource.py +++ b/synapse/rest/synapse/client/saml2/metadata_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/synapse/client/saml2/response_resource.py b/synapse/rest/synapse/client/saml2/response_resource.py index 4dfadf1bfb07..774ccd870fa3 100644 --- a/synapse/rest/synapse/client/saml2/response_resource.py +++ b/synapse/rest/synapse/client/saml2/response_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # # Copyright 2018 New Vector Ltd # diff --git a/synapse/rest/synapse/client/sso_register.py b/synapse/rest/synapse/client/sso_register.py index f2acce2437ea..70cd148a760e 100644 --- a/synapse/rest/synapse/client/sso_register.py +++ b/synapse/rest/synapse/client/sso_register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index f591cc6c5c7b..19ac3af33753 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/secrets.py b/synapse/secrets.py index 7939db75e781..bf829251fd0a 100644 --- a/synapse/secrets.py +++ b/synapse/secrets.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server.py b/synapse/server.py index cfb55c230ded..6c35ae6e5032 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index a9349bf9a17a..e65f6f88fe74 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/resource_limits_server_notices.py b/synapse/server_notices/resource_limits_server_notices.py index a18a2e76c999..e4b0bc5c7235 100644 --- a/synapse/server_notices/resource_limits_server_notices.py +++ b/synapse/server_notices/resource_limits_server_notices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 144e1da78e2e..f19075b76050 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/server_notices_sender.py b/synapse/server_notices/server_notices_sender.py index 965c6458893d..c875b15b326d 100644 --- a/synapse/server_notices/server_notices_sender.py +++ b/synapse/server_notices/server_notices_sender.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/server_notices/worker_server_notices_sender.py b/synapse/server_notices/worker_server_notices_sender.py index c76bd5746044..cc5331849102 100644 --- a/synapse/server_notices/worker_server_notices_sender.py +++ b/synapse/server_notices/worker_server_notices_sender.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/spam_checker_api/__init__.py b/synapse/spam_checker_api/__init__.py index 3ce25bb012a0..73018f2d002e 100644 --- a/synapse/spam_checker_api/__init__.py +++ b/synapse/spam_checker_api/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index c0f79ffdc8d1..c7ee73115436 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/state/v1.py b/synapse/state/v1.py index ce255da6fd07..318e9988130f 100644 --- a/synapse/state/v1.py +++ b/synapse/state/v1.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/state/v2.py b/synapse/state/v2.py index e73a548ee412..32671ddbde4f 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 0b9007e51fd3..105e4e1fec1b 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018,2019 New Vector Ltd # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 240905329fb7..56dd3a4861d9 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index ccb06aab391d..142787fdfd1c 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 77ef29ec71eb..9a6d2b21f9a7 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/databases/__init__.py b/synapse/storage/databases/__init__.py index 379c78bb83bb..20b755056b7f 100644 --- a/synapse/storage/databases/__init__.py +++ b/synapse/storage/databases/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index b3d16ca7ac01..5c50f5f950a4 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2019-2021 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index a277a1ef1349..1d02795f4346 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 85bb853d33cf..9f182c2a890f 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 1e7637a6f5c4..ecc1f935e228 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 3e26d5ba8797..f22c1f241b65 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index ea3c15fd0ee5..d60010e942ab 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 691080ce742b..7c9d1f744eaf 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 9bf8ba888f76..b20487558060 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 267b948397be..86075bc55b23 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 12cecceec2a4..b15fb71e6258 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index f1e7859d26e7..88afe97c41ac 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index a956be491a6e..32ce70a3962e 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 78245ad5bd30..584532211837 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ad17123915b4..bed4326d1137 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 79e7df6ca9c1..cbe4be1437a0 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/events_forward_extremities.py b/synapse/storage/databases/main/events_forward_extremities.py index b3703ae161bd..6d2688d71189 100644 --- a/synapse/storage/databases/main/events_forward_extremities.py +++ b/synapse/storage/databases/main/events_forward_extremities.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index c00780969f6e..64d70785b806 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index d2f5b9a50220..bb244a03c0a8 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/group_server.py b/synapse/storage/databases/main/group_server.py index bd7826f4e9b7..66ad363bfb29 100644 --- a/synapse/storage/databases/main/group_server.py +++ b/synapse/storage/databases/main/group_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index d504323b0330..0e8680783404 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd. # diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index b7820ac7ff30..c584868188e9 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020-2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 614a418a158a..c3f551d37756 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index 757da3d55dd1..fe25638289cd 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 0ff693a3109f..c207d917b109 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/profile.py b/synapse/storage/databases/main/profile.py index ba01d3108a96..9b4e95e134e7 100644 --- a/synapse/storage/databases/main/profile.py +++ b/synapse/storage/databases/main/profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 41f4fe7f95cd..8f83748b5edb 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 9e58dc0e6ae4..db5217633796 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index c65558c2800d..b48fe086d4cc 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 43c852c96c00..3647276acb95 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 90a8f664ef95..833214b7e07b 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019,2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/databases/main/rejections.py b/synapse/storage/databases/main/rejections.py index 1e361aaa9a73..167318b314ae 100644 --- a/synapse/storage/databases/main/rejections.py +++ b/synapse/storage/databases/main/rejections.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 5cd61547f733..2bbf6d6a95ed 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 47fb12f3f64f..5f38634f48da 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index a9216ca9ae52..ef5587f87a9f 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py b/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py index b1684a8441dc..acd6ad1e1fca 100644 --- a/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py +++ b/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py index 44917f0a2ef3..66989222e6d2 100644 --- a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py +++ b/synapse/storage/databases/main/schema/delta/57/local_current_membership.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index f5e7d9ef98fc..0276f30656cf 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py index c8c67953e47b..ab2159c2d378 100644 --- a/synapse/storage/databases/main/signatures.py +++ b/synapse/storage/databases/main/signatures.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 93431efe0023..1757064a686f 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 0dbb501f16dc..bff7d0404f1f 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index bce8946c21ed..ae9f88096593 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018, 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 91f8abb67d59..db5ce4ea0104 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index 50067eabfc5c..1d62c6140f00 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index b7072f1f5ef2..82335e7a9dad 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/ui_auth.py b/synapse/storage/databases/main/ui_auth.py index 5473ec1485d0..22c05cdde7df 100644 --- a/synapse/storage/databases/main/ui_auth.py +++ b/synapse/storage/databases/main/ui_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 1026f321e51e..7a082fdd217f 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index f9575b1f1fd8..acf6b2fb643d 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/state/__init__.py b/synapse/storage/databases/state/__init__.py index c90d0228993c..e5100d610818 100644 --- a/synapse/storage/databases/state/__init__.py +++ b/synapse/storage/databases/state/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index 75c09b3687fd..c2891cb07f11 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index dfcf89d91c9b..e38461adbc72 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index d15ccfacdeb7..9abc02046ee9 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 21db1645d3c5..1882bfd9cf8d 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index dba8cc51d357..21411c5fea5c 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index f4f16456f2ab..5fe1b205e140 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index c03871f3933e..540adb878174 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd. # diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 3a0d6fb32e84..87e040b014a0 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c7f0b8ccb530..05a93559748f 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py index ad954990a752..30669beb7c6a 100644 --- a/synapse/storage/purge_events.py +++ b/synapse/storage/purge_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/push_rule.py b/synapse/storage/push_rule.py index f47cec0d8647..2d5c21ef72c0 100644 --- a/synapse/storage/push_rule.py +++ b/synapse/storage/push_rule.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 2564f34b4713..c552dbf04c8e 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index d2ff4da6b9fe..c34fbf21bc42 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/storage/state.py b/synapse/storage/state.py index c1c147c62ac2..cfafba22c5e0 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 17291c9d5e22..57f4883bf47a 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/util/__init__.py b/synapse/storage/util/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/storage/util/__init__.py +++ b/synapse/storage/util/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 32d6cc16b9a6..b1bd3a52d98f 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index 36a67e701994..30b6b8e0caea 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/synapse/streams/__init__.py +++ b/synapse/streams/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/streams/config.py b/synapse/streams/config.py index fdda21d16584..13d300588bbc 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 92fd5d489f0a..20fceaa93528 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/types.py b/synapse/types.py index b08ce9014028..21654ae68661 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 517686f0a67d..0f84fa3f4e3e 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index c3b2d981eadb..5c55bb0125d9 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 48f64eeb387b..46af7fa47312 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019, 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/caches/cached_call.py b/synapse/util/caches/cached_call.py index 3ee0f2317a5e..a301c9e89bf9 100644 --- a/synapse/util/caches/cached_call.py +++ b/synapse/util/caches/cached_call.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index dd392cf69437..484097a48a45 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 4e843799147d..ac4a078b260b 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synapse/util/caches/dictionary_cache.py b/synapse/util/caches/dictionary_cache.py index b3b413b02cd2..56d94d96ce0b 100644 --- a/synapse/util/caches/dictionary_cache.py +++ b/synapse/util/caches/dictionary_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 4dc3477e8944..ac47a31cd7ef 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 20c8e2d9f5ec..a21d34fcb4d7 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 46ea8e09644c..2529845c9ef0 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 644e9e778a29..0469e7d1204b 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/caches/ttlcache.py b/synapse/util/caches/ttlcache.py index 96a82749408e..c276107d5690 100644 --- a/synapse/util/caches/ttlcache.py +++ b/synapse/util/caches/ttlcache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/daemonize.py b/synapse/util/daemonize.py index 23393cf49bb7..31b24dd18882 100644 --- a/synapse/util/daemonize.py +++ b/synapse/util/daemonize.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright (c) 2012, 2013, 2014 Ilya Otyutskiy # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 3c47285d05ca..1f803aef6d1b 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/file_consumer.py b/synapse/util/file_consumer.py index 68dc632491dc..e946189f9a72 100644 --- a/synapse/util/file_consumer.py +++ b/synapse/util/file_consumer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 5ca2e71e604e..2ac7c2913cdc 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/hash.py b/synapse/util/hash.py index 359168704e1f..ba676e176240 100644 --- a/synapse/util/hash.py +++ b/synapse/util/hash.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 98707c119deb..6f73b1d56de4 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/jsonobject.py b/synapse/util/jsonobject.py index e3a8ed5b2f27..abc12f08374d 100644 --- a/synapse/util/jsonobject.py +++ b/synapse/util/jsonobject.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/macaroons.py b/synapse/util/macaroons.py index 12cdd53327cd..f6ebfd7e7d41 100644 --- a/synapse/util/macaroons.py +++ b/synapse/util/macaroons.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # Copyright 2021 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 1023c856d143..6ed7179e8cd7 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index d184e2a90cb6..8acbe276e4bc 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py index c8bcbe297ab4..bbbdebf2648e 100644 --- a/synapse/util/msisdn.py +++ b/synapse/util/msisdn.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index d9f9ae99d639..eed0291cae88 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 70d11e1ec378..a654c6968492 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 4ab379e42998..f9c370a814cc 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/rlimit.py b/synapse/util/rlimit.py index 207cd17c2a50..bf812ab5166a 100644 --- a/synapse/util/rlimit.py +++ b/synapse/util/rlimit.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 9ce7873ab573..c0e6fb9a60a2 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. # diff --git a/synapse/util/templates.py b/synapse/util/templates.py index 392dae4a40f5..38543dd1ea19 100644 --- a/synapse/util/templates.py +++ b/synapse/util/templates.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py index 43c2e0ac230c..281c5be4fb84 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/versionstring.py b/synapse/util/versionstring.py index ab7d03af3a87..dfa30a62296a 100644 --- a/synapse/util/versionstring.py +++ b/synapse/util/versionstring.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index be3b22469db1..61814aff241f 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synapse/visibility.py b/synapse/visibility.py index ff53a49b3a70..490fb26e8114 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synctl b/synctl index 56c0e3940fc8..ccf404accb45 100755 --- a/synctl +++ b/synctl @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/synmark/__init__.py b/synmark/__init__.py index 3d4ec3e1846b..2cc00b0f03d3 100644 --- a/synmark/__init__.py +++ b/synmark/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synmark/__main__.py b/synmark/__main__.py index f55968a5a420..35a59e347a46 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index b3abc6b254b8..9419892e957c 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synmark/suites/lrucache.py b/synmark/suites/lrucache.py index 69ab042ccc48..9b4a4241493f 100644 --- a/synmark/suites/lrucache.py +++ b/synmark/suites/lrucache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/synmark/suites/lrucache_evict.py b/synmark/suites/lrucache_evict.py index 532b1cc70220..0ee202ed3626 100644 --- a/synmark/suites/lrucache_evict.py +++ b/synmark/suites/lrucache_evict.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/__init__.py b/tests/__init__.py index ed805db1c2d0..5fced5cc4c3e 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index 28d77f0ca238..c0ed64f78430 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015 - 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index ab7d2907247e..f44c91a373f9 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py index e0ca28882981..3d45da38ab25 100644 --- a/tests/app/test_frontend_proxy.py +++ b/tests/app/test_frontend_proxy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 33a37fe35eac..276f09015ed0 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/appservice/__init__.py b/tests/appservice/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/tests/appservice/__init__.py +++ b/tests/appservice/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index 03a7440eec5f..f386b5e128bf 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 3c27d797fb37..a2b5ed2030d0 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/__init__.py b/tests/config/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/config/__init__.py +++ b/tests/config/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_base.py b/tests/config/test_base.py index 42ee5f56d93e..84ae3b88ae9b 100644 --- a/tests/config/test_base.py +++ b/tests/config/test_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_cache.py b/tests/config/test_cache.py index 2b7f09c14b27..857d9cd0969b 100644 --- a/tests/config/test_cache.py +++ b/tests/config/test_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_database.py b/tests/config/test_database.py index f675bde68e9e..9eca10bbe9b6 100644 --- a/tests/config/test_database.py +++ b/tests/config/test_database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index 463855ecc8db..fdfbb0e38e9c 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_load.py b/tests/config/test_load.py index c109425671ef..ebe2c0516570 100644 --- a/tests/config/test_load.py +++ b/tests/config/test_load.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_ratelimiting.py b/tests/config/test_ratelimiting.py index 13ab282384e3..3c7bb32e079d 100644 --- a/tests/config/test_ratelimiting.py +++ b/tests/config/test_ratelimiting.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py index 0ec10019b3aa..db745815eff2 100644 --- a/tests/config/test_room_directory.py +++ b/tests/config/test_room_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_server.py b/tests/config/test_server.py index 98af7aa67577..6f2b9e997d19 100644 --- a/tests/config/test_server.py +++ b/tests/config/test_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index ec32d4b1ca50..183034f7d4aa 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/tests/config/test_util.py b/tests/config/test_util.py index 10363e3765d9..3d4929daacf0 100644 --- a/tests/config/test_util.py +++ b/tests/config/test_util.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/crypto/__init__.py b/tests/crypto/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/tests/crypto/__init__.py +++ b/tests/crypto/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/crypto/test_event_signing.py b/tests/crypto/test_event_signing.py index 62f639a18d0b..1c920157f506 100644 --- a/tests/crypto/test_event_signing.py +++ b/tests/crypto/test_event_signing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index a56063315be0..2775dfd8807d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index c996ecc221a1..01d257307c6b 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py index ec85324c0c62..48e98aac797d 100644 --- a/tests/events/test_snapshot.py +++ b/tests/events/test_snapshot.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 8ba36c60748b..9274ce4c396d 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 701fa8379fff..1a809b2a6ae0 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index deb12433cf4f..b00dd143d677 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index cfeccc05779e..8508b6bd3b53 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Federation C.I.C # diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py index 85500e169c88..84fa72b9ff14 100644 --- a/tests/federation/transport/test_server.py +++ b/tests/federation/transport/test_server.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index 32669ae9ced6..18a734daf461 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 6e325b24cee1..b037b12a0f3d 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index 321c5ba045fb..fe7e9484fd5b 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index 821629bc38a3..84c38b295db1 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C. diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 6ae9d4f8656f..1908d3c2c6fb 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 6915ac0205f0..61a00130b814 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 07893302ecbe..9b7e7a8e9aff 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 3af361195b57..c7b0975a19e5 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index a0d1ebdbe3c1..a8a9fc5b628e 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 8702ee70e0de..34d2fc1dfb11 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Quentin Gliech # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index e28e4159eba5..32651db09669 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 9f16cc65fc2e..2d12e8289748 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_profile.py b/tests/handlers/test_profile.py index d8b1bcac8b49..5330a9b34e22 100644 --- a/tests/handlers/test_profile.py +++ b/tests/handlers/test_profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 69279a5ce9fd..608f8f3d3323 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index 312c0a0d4172..c9d4fd93368f 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 8e950f25c53c..c8b43305f433 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 9fa231a37a95..0c89487eaf34 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index c68cb830afcd..daac37abd876 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 3e5a856584bd..e74f7f5b48f1 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/federation/__init__.py b/tests/http/federation/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/http/federation/__init__.py +++ b/tests/http/federation/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index ae9d4504a8c0..e45980316b6d 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 466ce722d9ce..c49be33b9f7c 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/tests/http/test_additional_resource.py b/tests/http/test_additional_resource.py index 453391a5a5f0..768c2ba4ead3 100644 --- a/tests/http/test_additional_resource.py +++ b/tests/http/test_additional_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_endpoint.py b/tests/http/test_endpoint.py index d06ea518cee6..1f9a2f9b1d32 100644 --- a/tests/http/test_endpoint.py +++ b/tests/http/test_endpoint.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 21c1297171af..9e9718550711 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 3ea8b5bec7ec..fefc8099c9d2 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index f979c96f7cdc..a80bfb9f4eb5 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py index cc4cae320d36..c85a3665c127 100644 --- a/tests/http/test_simple_client.py +++ b/tests/http/test_simple_client.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/logging/__init__.py b/tests/logging/__init__.py index a58d51441c6e..1acf5666a856 100644 --- a/tests/logging/__init__.py +++ b/tests/logging/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index 4bc27a1d7d1c..b0d046fe0079 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 215fd8b0f9e3..6cddb95c30af 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 349f93560e9d..742ad14b8c3a 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 941cf4242954..e04bc5c9a661 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 4074ade87a14..ffd75b14914f 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 4a841f5bb844..45906ce72083 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/__init__.py b/tests/replication/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/replication/__init__.py +++ b/tests/replication/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/_base.py b/tests/replication/_base.py index aff19d9fb3c5..36138d69aa74 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/slave/__init__.py b/tests/replication/slave/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/replication/slave/__init__.py +++ b/tests/replication/slave/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/slave/storage/__init__.py b/tests/replication/slave/storage/__init__.py index b7df13c9eebc..f43a360a807c 100644 --- a/tests/replication/slave/storage/__init__.py +++ b/tests/replication/slave/storage/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/__init__.py b/tests/replication/tcp/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/replication/tcp/__init__.py +++ b/tests/replication/tcp/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/__init__.py b/tests/replication/tcp/streams/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/replication/tcp/streams/__init__.py +++ b/tests/replication/tcp/streams/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_account_data.py b/tests/replication/tcp/streams/test_account_data.py index 153634d4eeaf..cdd052001b6d 100644 --- a/tests/replication/tcp/streams/test_account_data.py +++ b/tests/replication/tcp/streams/test_account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 77856fc30445..323237c1bb88 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_federation.py b/tests/replication/tcp/streams/test_federation.py index aa4bf1c7e3b0..ffec06a0d653 100644 --- a/tests/replication/tcp/streams/test_federation.py +++ b/tests/replication/tcp/streams/test_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_receipts.py b/tests/replication/tcp/streams/test_receipts.py index 7d848e41ffed..7f5d932f0bb2 100644 --- a/tests/replication/tcp/streams/test_receipts.py +++ b/tests/replication/tcp/streams/test_receipts.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index 4a0b34226443..ecd360c2d068 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/test_commands.py b/tests/replication/tcp/test_commands.py index 60c10a441a87..cca7ebb7195d 100644 --- a/tests/replication/tcp/test_commands.py +++ b/tests/replication/tcp/test_commands.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/tcp/test_remote_server_up.py b/tests/replication/tcp/test_remote_server_up.py index 1fe9d5b4d076..262c35cef3c6 100644 --- a/tests/replication/tcp/test_remote_server_up.py +++ b/tests/replication/tcp/test_remote_server_up.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_auth.py b/tests/replication/test_auth.py index f8fd8a843c82..1346e0e160a4 100644 --- a/tests/replication/test_auth.py +++ b/tests/replication/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py index 5da1d5dc4d8b..b9751efdc53b 100644 --- a/tests/replication/test_client_reader_shard.py +++ b/tests/replication/test_client_reader_shard.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 44ad5eec57ef..04a869e29549 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 8ca595c3eeab..48ab3aa4e36b 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index b0800f98408f..76e6644353d0 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index 1f12bde1aa47..1e4e3821b9df 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 6c2e1674cb21..d739eb6b17f0 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/__init__.py b/tests/rest/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/tests/rest/__init__.py +++ b/tests/rest/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/__init__.py b/tests/rest/admin/__init__.py index 1453d045718f..743fb9904a8f 100644 --- a/tests/rest/admin/__init__.py +++ b/tests/rest/admin/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 4abcbe3f553a..2f7090e5543b 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 2a1bcf1760ed..ecbee30bb52f 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index e30ffe4fa0c1..8c66da3af406 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index 31db472cd32b..ac7b21970033 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 85f77c0a659e..6bcd997085dd 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 1f1d11f527d5..363bdeeb2dae 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 5070c9698469..2844c493fc0a 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/__init__.py b/tests/rest/client/__init__.py index fe0ac3f8e952..629e2df74a4f 100644 --- a/tests/rest/client/__init__.py +++ b/tests/rest/client/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index c74693e9b288..5cc62a910a43 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py index 56937dcd2ee4..eec0fc01f938 100644 --- a/tests/rest/client/test_ephemeral_message.py +++ b/tests/rest/client/test_ephemeral_message.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index c0a9fc6925d9..478296ba0efa 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_power_levels.py b/tests/rest/client/test_power_levels.py index 5256c11fe672..ba5ad47df5a0 100644 --- a/tests/rest/client/test_power_levels.py +++ b/tests/rest/client/test_power_levels.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index e0c74591b643..dfd85221d01c 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index f892a7122894..e1a6e73e17be 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index a7ebe0c3e921..e1fe72fc5d4c 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/rest/client/v1/__init__.py b/tests/rest/client/v1/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/tests/rest/client/v1/__init__.py +++ b/tests/rest/client/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index edd1d184f871..8ed470490b4a 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index 87a18d2cb94e..852bda408c7f 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index c7b79ab8a706..605b9523162e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index c136827f7993..3a050659caa6 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index f3448c94dd9d..165ad33fb740 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_push_rule_attrs.py b/tests/rest/client/v1/test_push_rule_attrs.py index 2bc512d75e7b..d0776160824c 100644 --- a/tests/rest/client/v1/test_push_rule_attrs.py +++ b/tests/rest/client/v1/test_push_rule_attrs.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 4df20c90fda8..92babf65e0f3 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/tests/rest/client/v1/test_typing.py b/tests/rest/client/v1/test_typing.py index 0b8f5651211c..0aad48a162b4 100644 --- a/tests/rest/client/v1/test_typing.py +++ b/tests/rest/client/v1/test_typing.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector # diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index a6a292b20cfa..ed55a640afd2 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd # Copyright 2018-2019 New Vector Ltd diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index e72b61963d11..4ef19145d1b9 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index ed433d93334c..485e3650c3f2 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # Copyright 2020-2021 The Matrix.org Foundation C.I.C # diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index 287a1a485c93..874052c61ca2 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_filter.py b/tests/rest/client/v2_alpha/test_filter.py index f761c4493616..c7e47725b789 100644 --- a/tests/rest/client/v2_alpha/test_filter.py +++ b/tests/rest/client/v2_alpha/test_filter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_password_policy.py b/tests/rest/client/v2_alpha/test_password_policy.py index 5ebc5707a5f1..6f07ff6cbbca 100644 --- a/tests/rest/client/v2_alpha/test_password_policy.py +++ b/tests/rest/client/v2_alpha/test_password_policy.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index cd60ea70811e..054d4e41402d 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 21ee436b919b..856aa8682f7e 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_shared_rooms.py b/tests/rest/client/v2_alpha/test_shared_rooms.py index dd83a1f8ff77..cedb9614a8ae 100644 --- a/tests/rest/client/v2_alpha/test_shared_rooms.py +++ b/tests/rest/client/v2_alpha/test_shared_rooms.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Half-Shot # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 2dbf42397a6b..dbcbdf159a1a 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2019 New Vector Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/tests/rest/client/v2_alpha/test_upgrade_room.py b/tests/rest/client/v2_alpha/test_upgrade_room.py index d890d11863a5..5f3f15fc57cd 100644 --- a/tests/rest/client/v2_alpha/test_upgrade_room.py +++ b/tests/rest/client/v2_alpha/test_upgrade_room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index eb8687ce68c2..3b275bc23b47 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/__init__.py b/tests/rest/media/__init__.py index a354d38ca859..b1ee10cfcc44 100644 --- a/tests/rest/media/__init__.py +++ b/tests/rest/media/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/v1/__init__.py b/tests/rest/media/v1/__init__.py index a354d38ca859..b1ee10cfcc44 100644 --- a/tests/rest/media/v1/__init__.py +++ b/tests/rest/media/v1/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/v1/test_base.py b/tests/rest/media/v1/test_base.py index ebd78692082f..f761e23f1bf0 100644 --- a/tests/rest/media/v1/test_base.py +++ b/tests/rest/media/v1/test_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 375f0b797704..4a213d13ddf9 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 9067463e541f..d3ef7bb4c637 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/test_health.py b/tests/rest/test_health.py index 32acd93dc11a..01d48c3860d4 100644 --- a/tests/rest/test_health.py +++ b/tests/rest/test_health.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 14de0921be7b..ac0e42775239 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py index 885b95a51f5e..6f3c365c9aad 100644 --- a/tests/scripts/test_new_matrix_user.py +++ b/tests/scripts/test_new_matrix_user.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index 4dd5a361784e..ac98259b7ee6 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 450b4ec710a2..d46521ccdc0f 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018, 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 66e3cafe8e9f..43fc79ca746d 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 1ac4ebc61d4c..6339a43f0cb3 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2019 New Vector Ltd # diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 38444e48e295..01af49a16b0f 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index e755a4db625d..666bffe2574f 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index 54e9e7f6fe98..3b45a7efd899 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index b02fb32ced73..aa20588bbe5a 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index f7f75320ba98..e57fce9694bf 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index a906d30e73b3..6fbac0ab1466 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index ef4cf8d0f18e..6790aa524291 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_directory.py b/tests/storage/test_directory.py index 0db233fd68ce..41bef62ca8fc 100644 --- a/tests/storage/test_directory.py +++ b/tests/storage/test_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py index 3d7760d5d9cf..9b6b42542532 100644 --- a/tests/storage/test_e2e_room_keys.py +++ b/tests/storage/test_e2e_room_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py index 1e54b940fd74..3bf6e337f4e1 100644 --- a/tests/storage/test_end_to_end_keys.py +++ b/tests/storage/test_end_to_end_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 16daa66cc919..d87f124c2638 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index d597d712d675..a0e22594785f 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 7691f2d790fb..397e68fe0ad3 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the 'License'); diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 0289942f88d2..1930b37eda1e 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index ed898b8dbb70..617bc8091fa8 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index 6c389fe9acd8..792b1c44c198 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 95f309fbbc41..a94b5fd721f2 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_main.py b/tests/storage/test_main.py index e9e3bca3bf59..d2b7b8995200 100644 --- a/tests/storage/test_main.py +++ b/tests/storage/test_main.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Awesome Technologies Innovationslabor GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index 47556791f4f1..944dbc34a26b 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_profile.py b/tests/storage/test_profile.py index d18ceb41a9da..8a446da848d7 100644 --- a/tests/storage/test_profile.py +++ b/tests/storage/test_profile.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 41af8c484784..54c5b470c789 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 2d2f58903c64..bb31ab756d0d 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index c82cf15bc238..97480652824d 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_room.py b/tests/storage/test_room.py index 0089d33c93a4..70257bf21027 100644 --- a/tests/storage/test_room.py +++ b/tests/storage/test_room.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index d2aed66f6d40..9fa968f6bb30 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2019 The Matrix.org Foundation C.I.C. # diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index f06b452fa985..86952645955b 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index 8e817e2c7f8c..b7f7eae8d096 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 019c5b7b1497..222e5d129d73 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_distributor.py b/tests/test_distributor.py index 6a6cf709f615..f8341041ee6e 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index b5f18344dcf1..88888319ccb7 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_federation.py b/tests/test_federation.py index 8928597d17db..86a44a13da94 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_mau.py b/tests/test_mau.py index 7d92a16a8d5a..fa6ef92b3bd8 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_metrics.py b/tests/test_metrics.py index f696fcf89ef9..b4574b2ffed2 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2019 Matrix.org Foundation C.I.C. # diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index 0f800a075bd4..09707a74d731 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_preview.py b/tests/test_preview.py index ea8329991816..cac3d81ac14d 100644 --- a/tests/test_preview.py +++ b/tests/test_preview.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_state.py b/tests/test_state.py index 0d626f49f6f1..62f70958732c 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py index b921ac52c07a..f2ef1c6051eb 100644 --- a/tests/test_test_utils.py +++ b/tests/test_test_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_types.py b/tests/test_types.py index acdeea7a099b..d7881021d3f5 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index b557ffd69251..be6302d170e8 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C # diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index 3dfbf8f8a9e4..e9ec9e085b53 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # Copyright 2020 The Matrix.org Foundation C.I.C # diff --git a/tests/test_utils/html_parsers.py b/tests/test_utils/html_parsers.py index ad563eb3f0b8..1fbb38f4be03 100644 --- a/tests/test_utils/html_parsers.py +++ b/tests/test_utils/html_parsers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 74568b34f8c8..51a197a8c621 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/test_visibility.py b/tests/test_visibility.py index e502ac197e84..94b19788d737 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/unittest.py b/tests/unittest.py index 92764434bdf8..d890ad981f26 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector # Copyright 2019 Matrix.org Federation C.I.C diff --git a/tests/util/__init__.py b/tests/util/__init__.py index bfebb0f644f4..5e83dba2ed6f 100644 --- a/tests/util/__init__.py +++ b/tests/util/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/__init__.py b/tests/util/caches/__init__.py index 451dae3b6c8e..830e2dfe916c 100644 --- a/tests/util/caches/__init__.py +++ b/tests/util/caches/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 Vector Creations Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/test_cached_call.py b/tests/util/caches/test_cached_call.py index f349b5ced071..80b97167bac0 100644 --- a/tests/util/caches/test_cached_call.py +++ b/tests/util/caches/test_cached_call.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/test_deferred_cache.py b/tests/util/caches/test_deferred_cache.py index c24c33ee9132..54a88a83255b 100644 --- a/tests/util/caches/test_deferred_cache.py +++ b/tests/util/caches/test_deferred_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 2d1f9360e0b0..40cd98e2d81a 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/tests/util/caches/test_ttlcache.py b/tests/util/caches/test_ttlcache.py index 23018081e5a6..fe8314057da1 100644 --- a/tests/util/caches/test_ttlcache.py +++ b/tests/util/caches/test_ttlcache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py index 17fd86d02de7..069f875962f5 100644 --- a/tests/util/test_async_utils.py +++ b/tests/util/test_async_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_dict_cache.py b/tests/util/test_dict_cache.py index 2f41333f4c11..bee66dee4328 100644 --- a/tests/util/test_dict_cache.py +++ b/tests/util/test_dict_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_expiring_cache.py b/tests/util/test_expiring_cache.py index 49ffeebd0ed5..e6e13ba06cc8 100644 --- a/tests/util/test_expiring_cache.py +++ b/tests/util/test_expiring_cache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2017 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_file_consumer.py b/tests/util/test_file_consumer.py index d1372f6bc251..3bb469540527 100644 --- a/tests/util/test_file_consumer.py +++ b/tests/util/test_file_consumer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py index e931a7ec1852..1bd0b45d940a 100644 --- a/tests/util/test_itertools.py +++ b/tests/util/test_itertools.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index 0e52811948b2..c4a3917b2301 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd # diff --git a/tests/util/test_logformatter.py b/tests/util/test_logformatter.py index 0fb60caacb1a..a2e08281e6c5 100644 --- a/tests/util/test_logformatter.py +++ b/tests/util/test_logformatter.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index ce4f1cc30a57..df3e27779fd2 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py index 3fed55090a7c..34aaffe85954 100644 --- a/tests/util/test_ratelimitutils.py +++ b/tests/util/test_ratelimitutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 5f46ed0cefd9..9b2be83a43a1 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_rwlock.py b/tests/util/test_rwlock.py index d3dea3b52a8a..a10071c70fc9 100644 --- a/tests/util/test_rwlock.py +++ b/tests/util/test_rwlock.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py index 8491f7cc8346..f7fecd9cf301 100644 --- a/tests/util/test_stringutils.py +++ b/tests/util/test_stringutils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_threepids.py b/tests/util/test_threepids.py index 5513724d87cf..d957b953bb09 100644 --- a/tests/util/test_threepids.py +++ b/tests/util/test_threepids.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2020 Dirk Klimpel # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index a5f226120835..3b077af27e90 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py index 03201a4d9b90..0d5039de0406 100644 --- a/tests/util/test_wheel_timer.py +++ b/tests/util/test_wheel_timer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/utils.py b/tests/utils.py index c78d3e5ba706..af6b32fc66c2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018-2019 New Vector Ltd # From c9a2b5d4022feab97fa1bce1d67360d09a6e3dcc Mon Sep 17 00:00:00 2001 From: rkfg Date: Wed, 14 Apr 2021 18:30:59 +0300 Subject: [PATCH 033/222] More robust handling of the Content-Type header for thumbnail generation (#9788) Signed-off-by: Sergey Shpikin --- changelog.d/9788.bugfix | 1 + synapse/config/repository.py | 1 + synapse/rest/media/v1/media_repository.py | 3 +++ 3 files changed, 5 insertions(+) create mode 100644 changelog.d/9788.bugfix diff --git a/changelog.d/9788.bugfix b/changelog.d/9788.bugfix new file mode 100644 index 000000000000..edb58fbd5b37 --- /dev/null +++ b/changelog.d/9788.bugfix @@ -0,0 +1 @@ +Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg. diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 146bc55d6f77..c78a83abe16e 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -70,6 +70,7 @@ def parse_thumbnail_requirements(thumbnail_sizes): jpeg_thumbnail = ThumbnailRequirement(width, height, method, "image/jpeg") png_thumbnail = ThumbnailRequirement(width, height, method, "image/png") requirements.setdefault("image/jpeg", []).append(jpeg_thumbnail) + requirements.setdefault("image/jpg", []).append(jpeg_thumbnail) requirements.setdefault("image/webp", []).append(jpeg_thumbnail) requirements.setdefault("image/gif", []).append(png_thumbnail) requirements.setdefault("image/png", []).append(png_thumbnail) diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 87e3645ddcd8..e8a875b90093 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -467,6 +467,9 @@ async def _download_remote_file( return media_info def _get_thumbnail_requirements(self, media_type): + scpos = media_type.find(";") + if scpos > 0: + media_type = media_type[:scpos] return self.thumbnail_requirements.get(media_type, ()) def _generate_thumbnail( From 00a6db967655daf1d6db290b7e0d2bb53827ade9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 14 Apr 2021 17:06:06 +0100 Subject: [PATCH 034/222] Move some replication processing out of generic_worker (#9796) Co-authored-by: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/9796.misc | 1 + synapse/app/generic_worker.py | 470 +----------------------------- synapse/handlers/presence.py | 246 ++++++++++++++++ synapse/replication/tcp/client.py | 231 ++++++++++++++- synapse/server.py | 13 +- tests/replication/_base.py | 8 +- 6 files changed, 486 insertions(+), 483 deletions(-) create mode 100644 changelog.d/9796.misc diff --git a/changelog.d/9796.misc b/changelog.d/9796.misc new file mode 100644 index 000000000000..59bb1813c32f --- /dev/null +++ b/changelog.d/9796.misc @@ -0,0 +1 @@ +Move some replication processing out of `generic_worker`. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index e35e17492c63..28e3b1aa3cb4 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -13,12 +13,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib import logging import sys -from typing import Dict, Iterable, Optional, Set - -from typing_extensions import ContextManager +from typing import Dict, Iterable, Optional from twisted.internet import address from twisted.web.resource import IResource @@ -40,24 +37,13 @@ from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging from synapse.config.server import ListenerConfig -from synapse.federation import send_queue from synapse.federation.transport.server import TransportLayerServer -from synapse.handlers.presence import ( - BasePresenceHandler, - PresenceState, - get_interested_parties, -) from synapse.http.server import JsonResource, OptionsResource from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseSite from synapse.logging.context import LoggingContext from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy -from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource -from synapse.replication.http.presence import ( - ReplicationBumpPresenceActiveTime, - ReplicationPresenceSetState, -) from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore @@ -77,19 +63,6 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore -from synapse.replication.tcp.client import ReplicationDataHandler -from synapse.replication.tcp.commands import ClearUserSyncsCommand -from synapse.replication.tcp.streams import ( - AccountDataStream, - DeviceListsStream, - GroupServerStream, - PresenceStream, - PushersStream, - PushRulesStream, - ReceiptsStream, - TagAccountDataStream, - ToDeviceStream, -) from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client.v1 import events, login, room from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet @@ -128,7 +101,7 @@ from synapse.rest.health import HealthResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.synapse.client import build_synapse_client_resource_tree -from synapse.server import HomeServer, cache_in_self +from synapse.server import HomeServer from synapse.storage.databases.main.censor_events import CensorEventsStore from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyStore @@ -137,14 +110,11 @@ from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) -from synapse.storage.databases.main.presence import UserPresenceState from synapse.storage.databases.main.search import SearchWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore from synapse.storage.databases.main.user_directory import UserDirectoryStore -from synapse.types import ReadReceipt -from synapse.util.async_helpers import Linearizer from synapse.util.httpresourcetree import create_resource_tree from synapse.util.versionstring import get_version_string @@ -264,214 +234,6 @@ async def on_POST(self, request: Request, device_id: Optional[str]): return 200, {"one_time_key_counts": result} -class _NullContextManager(ContextManager[None]): - """A context manager which does nothing.""" - - def __exit__(self, exc_type, exc_val, exc_tb): - pass - - -UPDATE_SYNCING_USERS_MS = 10 * 1000 - - -class GenericWorkerPresence(BasePresenceHandler): - def __init__(self, hs): - super().__init__(hs) - self.hs = hs - self.is_mine_id = hs.is_mine_id - - self.presence_router = hs.get_presence_router() - self._presence_enabled = hs.config.use_presence - - # The number of ongoing syncs on this process, by user id. - # Empty if _presence_enabled is false. - self._user_to_num_current_syncs = {} # type: Dict[str, int] - - self.notifier = hs.get_notifier() - self.instance_id = hs.get_instance_id() - - # user_id -> last_sync_ms. Lists the users that have stopped syncing - # but we haven't notified the master of that yet - self.users_going_offline = {} - - self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) - self._set_state_client = ReplicationPresenceSetState.make_client(hs) - - self._send_stop_syncing_loop = self.clock.looping_call( - self.send_stop_syncing, UPDATE_SYNCING_USERS_MS - ) - - self._busy_presence_enabled = hs.config.experimental.msc3026_enabled - - hs.get_reactor().addSystemEventTrigger( - "before", - "shutdown", - run_as_background_process, - "generic_presence.on_shutdown", - self._on_shutdown, - ) - - def _on_shutdown(self): - if self._presence_enabled: - self.hs.get_tcp_replication().send_command( - ClearUserSyncsCommand(self.instance_id) - ) - - def send_user_sync(self, user_id, is_syncing, last_sync_ms): - if self._presence_enabled: - self.hs.get_tcp_replication().send_user_sync( - self.instance_id, user_id, is_syncing, last_sync_ms - ) - - def mark_as_coming_online(self, user_id): - """A user has started syncing. Send a UserSync to the master, unless they - had recently stopped syncing. - - Args: - user_id (str) - """ - going_offline = self.users_going_offline.pop(user_id, None) - if not going_offline: - # Safe to skip because we haven't yet told the master they were offline - self.send_user_sync(user_id, True, self.clock.time_msec()) - - def mark_as_going_offline(self, user_id): - """A user has stopped syncing. We wait before notifying the master as - its likely they'll come back soon. This allows us to avoid sending - a stopped syncing immediately followed by a started syncing notification - to the master - - Args: - user_id (str) - """ - self.users_going_offline[user_id] = self.clock.time_msec() - - def send_stop_syncing(self): - """Check if there are any users who have stopped syncing a while ago - and haven't come back yet. If there are poke the master about them. - """ - now = self.clock.time_msec() - for user_id, last_sync_ms in list(self.users_going_offline.items()): - if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: - self.users_going_offline.pop(user_id, None) - self.send_user_sync(user_id, False, last_sync_ms) - - async def user_syncing( - self, user_id: str, affect_presence: bool - ) -> ContextManager[None]: - """Record that a user is syncing. - - Called by the sync and events servlets to record that a user has connected to - this worker and is waiting for some events. - """ - if not affect_presence or not self._presence_enabled: - return _NullContextManager() - - curr_sync = self._user_to_num_current_syncs.get(user_id, 0) - self._user_to_num_current_syncs[user_id] = curr_sync + 1 - - # If we went from no in flight sync to some, notify replication - if self._user_to_num_current_syncs[user_id] == 1: - self.mark_as_coming_online(user_id) - - def _end(): - # We check that the user_id is in user_to_num_current_syncs because - # user_to_num_current_syncs may have been cleared if we are - # shutting down. - if user_id in self._user_to_num_current_syncs: - self._user_to_num_current_syncs[user_id] -= 1 - - # If we went from one in flight sync to non, notify replication - if self._user_to_num_current_syncs[user_id] == 0: - self.mark_as_going_offline(user_id) - - @contextlib.contextmanager - def _user_syncing(): - try: - yield - finally: - _end() - - return _user_syncing() - - async def notify_from_replication(self, states, stream_id): - parties = await get_interested_parties(self.store, self.presence_router, states) - room_ids_to_states, users_to_states = parties - - self.notifier.on_new_event( - "presence_key", - stream_id, - rooms=room_ids_to_states.keys(), - users=users_to_states.keys(), - ) - - async def process_replication_rows(self, token, rows): - states = [ - UserPresenceState( - row.user_id, - row.state, - row.last_active_ts, - row.last_federation_update_ts, - row.last_user_sync_ts, - row.status_msg, - row.currently_active, - ) - for row in rows - ] - - for state in states: - self.user_to_current_state[state.user_id] = state - - stream_id = token - await self.notify_from_replication(states, stream_id) - - def get_currently_syncing_users_for_replication(self) -> Iterable[str]: - return [ - user_id - for user_id, count in self._user_to_num_current_syncs.items() - if count > 0 - ] - - async def set_state(self, target_user, state, ignore_status_msg=False): - """Set the presence state of the user.""" - presence = state["presence"] - - valid_presence = ( - PresenceState.ONLINE, - PresenceState.UNAVAILABLE, - PresenceState.OFFLINE, - PresenceState.BUSY, - ) - - if presence not in valid_presence or ( - presence == PresenceState.BUSY and not self._busy_presence_enabled - ): - raise SynapseError(400, "Invalid presence state") - - user_id = target_user.to_string() - - # If presence is disabled, no-op - if not self.hs.config.use_presence: - return - - # Proxy request to master - await self._set_state_client( - user_id=user_id, state=state, ignore_status_msg=ignore_status_msg - ) - - async def bump_presence_active_time(self, user): - """We've seen the user do something that indicates they're interacting - with the app. - """ - # If presence is disabled, no-op - if not self.hs.config.use_presence: - return - - # Proxy request to master - user_id = user.to_string() - await self._bump_active_client(user_id=user_id) - - class GenericWorkerSlavedStore( # FIXME(#3714): We need to add UserDirectoryStore as we write directly # rather than going via the correct worker. @@ -657,234 +419,6 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): self.get_tcp_replication().start_replication(self) - @cache_in_self - def get_replication_data_handler(self): - return GenericWorkerReplicationHandler(self) - - @cache_in_self - def get_presence_handler(self): - return GenericWorkerPresence(self) - - -class GenericWorkerReplicationHandler(ReplicationDataHandler): - def __init__(self, hs): - super().__init__(hs) - - self.store = hs.get_datastore() - self.presence_handler = hs.get_presence_handler() # type: GenericWorkerPresence - self.notifier = hs.get_notifier() - - self.notify_pushers = hs.config.start_pushers - self.pusher_pool = hs.get_pusherpool() - - self.send_handler = None # type: Optional[FederationSenderHandler] - if hs.config.send_federation: - self.send_handler = FederationSenderHandler(hs) - - async def on_rdata(self, stream_name, instance_name, token, rows): - await super().on_rdata(stream_name, instance_name, token, rows) - await self._process_and_notify(stream_name, instance_name, token, rows) - - async def _process_and_notify(self, stream_name, instance_name, token, rows): - try: - if self.send_handler: - await self.send_handler.process_replication_rows( - stream_name, token, rows - ) - - if stream_name == PushRulesStream.NAME: - self.notifier.on_new_event( - "push_rules_key", token, users=[row.user_id for row in rows] - ) - elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): - self.notifier.on_new_event( - "account_data_key", token, users=[row.user_id for row in rows] - ) - elif stream_name == ReceiptsStream.NAME: - self.notifier.on_new_event( - "receipt_key", token, rooms=[row.room_id for row in rows] - ) - await self.pusher_pool.on_new_receipts( - token, token, {row.room_id for row in rows} - ) - elif stream_name == ToDeviceStream.NAME: - entities = [row.entity for row in rows if row.entity.startswith("@")] - if entities: - self.notifier.on_new_event("to_device_key", token, users=entities) - elif stream_name == DeviceListsStream.NAME: - all_room_ids = set() # type: Set[str] - for row in rows: - if row.entity.startswith("@"): - room_ids = await self.store.get_rooms_for_user(row.entity) - all_room_ids.update(room_ids) - self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) - elif stream_name == PresenceStream.NAME: - await self.presence_handler.process_replication_rows(token, rows) - elif stream_name == GroupServerStream.NAME: - self.notifier.on_new_event( - "groups_key", token, users=[row.user_id for row in rows] - ) - elif stream_name == PushersStream.NAME: - for row in rows: - if row.deleted: - self.stop_pusher(row.user_id, row.app_id, row.pushkey) - else: - await self.start_pusher(row.user_id, row.app_id, row.pushkey) - except Exception: - logger.exception("Error processing replication") - - async def on_position(self, stream_name: str, instance_name: str, token: int): - await super().on_position(stream_name, instance_name, token) - # Also call on_rdata to ensure that stream positions are properly reset. - await self.on_rdata(stream_name, instance_name, token, []) - - def stop_pusher(self, user_id, app_id, pushkey): - if not self.notify_pushers: - return - - key = "%s:%s" % (app_id, pushkey) - pushers_for_user = self.pusher_pool.pushers.get(user_id, {}) - pusher = pushers_for_user.pop(key, None) - if pusher is None: - return - logger.info("Stopping pusher %r / %r", user_id, key) - pusher.on_stop() - - async def start_pusher(self, user_id, app_id, pushkey): - if not self.notify_pushers: - return - - key = "%s:%s" % (app_id, pushkey) - logger.info("Starting pusher %r / %r", user_id, key) - return await self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) - - def on_remote_server_up(self, server: str): - """Called when get a new REMOTE_SERVER_UP command.""" - - # Let's wake up the transaction queue for the server in case we have - # pending stuff to send to it. - if self.send_handler: - self.send_handler.wake_destination(server) - - -class FederationSenderHandler: - """Processes the fedration replication stream - - This class is only instantiate on the worker responsible for sending outbound - federation transactions. It receives rows from the replication stream and forwards - the appropriate entries to the FederationSender class. - """ - - def __init__(self, hs: GenericWorkerServer): - self.store = hs.get_datastore() - self._is_mine_id = hs.is_mine_id - self.federation_sender = hs.get_federation_sender() - self._hs = hs - - # Stores the latest position in the federation stream we've gotten up - # to. This is always set before we use it. - self.federation_position = None - - self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") - - def wake_destination(self, server: str): - self.federation_sender.wake_destination(server) - - async def process_replication_rows(self, stream_name, token, rows): - # The federation stream contains things that we want to send out, e.g. - # presence, typing, etc. - if stream_name == "federation": - send_queue.process_rows_for_federation(self.federation_sender, rows) - await self.update_token(token) - - # ... and when new receipts happen - elif stream_name == ReceiptsStream.NAME: - await self._on_new_receipts(rows) - - # ... as well as device updates and messages - elif stream_name == DeviceListsStream.NAME: - # The entities are either user IDs (starting with '@') whose devices - # have changed, or remote servers that we need to tell about - # changes. - hosts = {row.entity for row in rows if not row.entity.startswith("@")} - for host in hosts: - self.federation_sender.send_device_messages(host) - - elif stream_name == ToDeviceStream.NAME: - # The to_device stream includes stuff to be pushed to both local - # clients and remote servers, so we ignore entities that start with - # '@' (since they'll be local users rather than destinations). - hosts = {row.entity for row in rows if not row.entity.startswith("@")} - for host in hosts: - self.federation_sender.send_device_messages(host) - - async def _on_new_receipts(self, rows): - """ - Args: - rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): - new receipts to be processed - """ - for receipt in rows: - # we only want to send on receipts for our own users - if not self._is_mine_id(receipt.user_id): - continue - receipt_info = ReadReceipt( - receipt.room_id, - receipt.receipt_type, - receipt.user_id, - [receipt.event_id], - receipt.data, - ) - await self.federation_sender.send_read_receipt(receipt_info) - - async def update_token(self, token): - """Update the record of where we have processed to in the federation stream. - - Called after we have processed a an update received over replication. Sends - a FEDERATION_ACK back to the master, and stores the token that we have processed - in `federation_stream_position` so that we can restart where we left off. - """ - self.federation_position = token - - # We save and send the ACK to master asynchronously, so we don't block - # processing on persistence. We don't need to do this operation for - # every single RDATA we receive, we just need to do it periodically. - - if self._fed_position_linearizer.is_queued(None): - # There is already a task queued up to save and send the token, so - # no need to queue up another task. - return - - run_as_background_process("_save_and_send_ack", self._save_and_send_ack) - - async def _save_and_send_ack(self): - """Save the current federation position in the database and send an ACK - to master with where we're up to. - """ - try: - # We linearize here to ensure we don't have races updating the token - # - # XXX this appears to be redundant, since the ReplicationCommandHandler - # has a linearizer which ensures that we only process one line of - # replication data at a time. Should we remove it, or is it doing useful - # service for robustness? Or could we replace it with an assertion that - # we're not being re-entered? - - with (await self._fed_position_linearizer.queue(None)): - # We persist and ack the same position, so we take a copy of it - # here as otherwise it can get modified from underneath us. - current_position = self.federation_position - - await self.store.update_federation_out_pos( - "federation", current_position - ) - - # We ACK this token over replication so that the master can drop - # its in memory queues - self._hs.get_tcp_replication().send_federation_ack(current_position) - except Exception: - logger.exception("Error updating federation stream position") - def start(config_options): try: diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 251b48148d48..e120dd1f4860 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -22,6 +22,7 @@ - should_notify """ import abc +import contextlib import logging from contextlib import contextmanager from typing import ( @@ -48,6 +49,11 @@ from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.replication.http.presence import ( + ReplicationBumpPresenceActiveTime, + ReplicationPresenceSetState, +) +from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.state import StateHandler from synapse.storage.databases.main import DataStore from synapse.types import Collection, JsonDict, UserID, get_domain_from_id @@ -104,6 +110,10 @@ # are dead. EXTERNAL_PROCESS_EXPIRY = 5 * 60 * 1000 +# Delay before a worker tells the presence handler that a user has stopped +# syncing. +UPDATE_SYNCING_USERS_MS = 10 * 1000 + assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER @@ -208,6 +218,242 @@ async def bump_presence_active_time(self, user: UserID): with the app. """ + async def update_external_syncs_row( + self, process_id, user_id, is_syncing, sync_time_msec + ): + """Update the syncing users for an external process as a delta. + + This is a no-op when presence is handled by a different worker. + + Args: + process_id (str): An identifier for the process the users are + syncing against. This allows synapse to process updates + as user start and stop syncing against a given process. + user_id (str): The user who has started or stopped syncing + is_syncing (bool): Whether or not the user is now syncing + sync_time_msec(int): Time in ms when the user was last syncing + """ + pass + + async def update_external_syncs_clear(self, process_id): + """Marks all users that had been marked as syncing by a given process + as offline. + + Used when the process has stopped/disappeared. + + This is a no-op when presence is handled by a different worker. + """ + pass + + async def process_replication_rows(self, token, rows): + """Process presence stream rows received over replication.""" + pass + + +class _NullContextManager(ContextManager[None]): + """A context manager which does nothing.""" + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +class WorkerPresenceHandler(BasePresenceHandler): + def __init__(self, hs): + super().__init__(hs) + self.hs = hs + self.is_mine_id = hs.is_mine_id + + self.presence_router = hs.get_presence_router() + self._presence_enabled = hs.config.use_presence + + # The number of ongoing syncs on this process, by user id. + # Empty if _presence_enabled is false. + self._user_to_num_current_syncs = {} # type: Dict[str, int] + + self.notifier = hs.get_notifier() + self.instance_id = hs.get_instance_id() + + # user_id -> last_sync_ms. Lists the users that have stopped syncing + # but we haven't notified the master of that yet + self.users_going_offline = {} + + self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) + self._set_state_client = ReplicationPresenceSetState.make_client(hs) + + self._send_stop_syncing_loop = self.clock.looping_call( + self.send_stop_syncing, UPDATE_SYNCING_USERS_MS + ) + + self._busy_presence_enabled = hs.config.experimental.msc3026_enabled + + hs.get_reactor().addSystemEventTrigger( + "before", + "shutdown", + run_as_background_process, + "generic_presence.on_shutdown", + self._on_shutdown, + ) + + def _on_shutdown(self): + if self._presence_enabled: + self.hs.get_tcp_replication().send_command( + ClearUserSyncsCommand(self.instance_id) + ) + + def send_user_sync(self, user_id, is_syncing, last_sync_ms): + if self._presence_enabled: + self.hs.get_tcp_replication().send_user_sync( + self.instance_id, user_id, is_syncing, last_sync_ms + ) + + def mark_as_coming_online(self, user_id): + """A user has started syncing. Send a UserSync to the master, unless they + had recently stopped syncing. + + Args: + user_id (str) + """ + going_offline = self.users_going_offline.pop(user_id, None) + if not going_offline: + # Safe to skip because we haven't yet told the master they were offline + self.send_user_sync(user_id, True, self.clock.time_msec()) + + def mark_as_going_offline(self, user_id): + """A user has stopped syncing. We wait before notifying the master as + its likely they'll come back soon. This allows us to avoid sending + a stopped syncing immediately followed by a started syncing notification + to the master + + Args: + user_id (str) + """ + self.users_going_offline[user_id] = self.clock.time_msec() + + def send_stop_syncing(self): + """Check if there are any users who have stopped syncing a while ago + and haven't come back yet. If there are poke the master about them. + """ + now = self.clock.time_msec() + for user_id, last_sync_ms in list(self.users_going_offline.items()): + if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: + self.users_going_offline.pop(user_id, None) + self.send_user_sync(user_id, False, last_sync_ms) + + async def user_syncing( + self, user_id: str, affect_presence: bool + ) -> ContextManager[None]: + """Record that a user is syncing. + + Called by the sync and events servlets to record that a user has connected to + this worker and is waiting for some events. + """ + if not affect_presence or not self._presence_enabled: + return _NullContextManager() + + curr_sync = self._user_to_num_current_syncs.get(user_id, 0) + self._user_to_num_current_syncs[user_id] = curr_sync + 1 + + # If we went from no in flight sync to some, notify replication + if self._user_to_num_current_syncs[user_id] == 1: + self.mark_as_coming_online(user_id) + + def _end(): + # We check that the user_id is in user_to_num_current_syncs because + # user_to_num_current_syncs may have been cleared if we are + # shutting down. + if user_id in self._user_to_num_current_syncs: + self._user_to_num_current_syncs[user_id] -= 1 + + # If we went from one in flight sync to non, notify replication + if self._user_to_num_current_syncs[user_id] == 0: + self.mark_as_going_offline(user_id) + + @contextlib.contextmanager + def _user_syncing(): + try: + yield + finally: + _end() + + return _user_syncing() + + async def notify_from_replication(self, states, stream_id): + parties = await get_interested_parties(self.store, self.presence_router, states) + room_ids_to_states, users_to_states = parties + + self.notifier.on_new_event( + "presence_key", + stream_id, + rooms=room_ids_to_states.keys(), + users=users_to_states.keys(), + ) + + async def process_replication_rows(self, token, rows): + states = [ + UserPresenceState( + row.user_id, + row.state, + row.last_active_ts, + row.last_federation_update_ts, + row.last_user_sync_ts, + row.status_msg, + row.currently_active, + ) + for row in rows + ] + + for state in states: + self.user_to_current_state[state.user_id] = state + + stream_id = token + await self.notify_from_replication(states, stream_id) + + def get_currently_syncing_users_for_replication(self) -> Iterable[str]: + return [ + user_id + for user_id, count in self._user_to_num_current_syncs.items() + if count > 0 + ] + + async def set_state(self, target_user, state, ignore_status_msg=False): + """Set the presence state of the user.""" + presence = state["presence"] + + valid_presence = ( + PresenceState.ONLINE, + PresenceState.UNAVAILABLE, + PresenceState.OFFLINE, + PresenceState.BUSY, + ) + + if presence not in valid_presence or ( + presence == PresenceState.BUSY and not self._busy_presence_enabled + ): + raise SynapseError(400, "Invalid presence state") + + user_id = target_user.to_string() + + # If presence is disabled, no-op + if not self.hs.config.use_presence: + return + + # Proxy request to master + await self._set_state_client( + user_id=user_id, state=state, ignore_status_msg=ignore_status_msg + ) + + async def bump_presence_active_time(self, user): + """We've seen the user do something that indicates they're interacting + with the app. + """ + # If presence is disabled, no-op + if not self.hs.config.use_presence: + return + + # Proxy request to master + user_id = user.to_string() + await self._bump_active_client(user_id=user_id) + class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index ced69ee904c6..ce5d651cb8c5 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -14,22 +14,36 @@ """A replication client for use by synapse workers. """ import logging -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple from twisted.internet.defer import Deferred from twisted.internet.protocol import ReconnectingClientFactory from synapse.api.constants import EventTypes +from synapse.federation import send_queue +from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol -from synapse.replication.tcp.streams import TypingStream +from synapse.replication.tcp.streams import ( + AccountDataStream, + DeviceListsStream, + GroupServerStream, + PresenceStream, + PushersStream, + PushRulesStream, + ReceiptsStream, + TagAccountDataStream, + ToDeviceStream, + TypingStream, +) from synapse.replication.tcp.streams.events import ( EventsStream, EventsStreamEventRow, EventsStreamRow, ) -from synapse.types import PersistedEventPosition, UserID -from synapse.util.async_helpers import timeout_deferred +from synapse.types import PersistedEventPosition, ReadReceipt, UserID +from synapse.util.async_helpers import Linearizer, timeout_deferred from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -105,6 +119,14 @@ def __init__(self, hs: "HomeServer"): self._instance_name = hs.get_instance_name() self._typing_handler = hs.get_typing_handler() + self._notify_pushers = hs.config.start_pushers + self._pusher_pool = hs.get_pusherpool() + self._presence_handler = hs.get_presence_handler() + + self.send_handler = None # type: Optional[FederationSenderHandler] + if hs.should_send_federation(): + self.send_handler = FederationSenderHandler(hs) + # Map from stream to list of deferreds waiting for the stream to # arrive at a particular position. The lists are sorted by stream position. self._streams_to_waiters = {} # type: Dict[str, List[Tuple[int, Deferred]]] @@ -125,13 +147,53 @@ async def on_rdata( """ self.store.process_replication_rows(stream_name, instance_name, token, rows) + if self.send_handler: + await self.send_handler.process_replication_rows(stream_name, token, rows) + if stream_name == TypingStream.NAME: self._typing_handler.process_replication_rows(token, rows) self.notifier.on_new_event( "typing_key", token, rooms=[row.room_id for row in rows] ) - - if stream_name == EventsStream.NAME: + elif stream_name == PushRulesStream.NAME: + self.notifier.on_new_event( + "push_rules_key", token, users=[row.user_id for row in rows] + ) + elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): + self.notifier.on_new_event( + "account_data_key", token, users=[row.user_id for row in rows] + ) + elif stream_name == ReceiptsStream.NAME: + self.notifier.on_new_event( + "receipt_key", token, rooms=[row.room_id for row in rows] + ) + await self._pusher_pool.on_new_receipts( + token, token, {row.room_id for row in rows} + ) + elif stream_name == ToDeviceStream.NAME: + entities = [row.entity for row in rows if row.entity.startswith("@")] + if entities: + self.notifier.on_new_event("to_device_key", token, users=entities) + elif stream_name == DeviceListsStream.NAME: + all_room_ids = set() # type: Set[str] + for row in rows: + if row.entity.startswith("@"): + room_ids = await self.store.get_rooms_for_user(row.entity) + all_room_ids.update(room_ids) + self.notifier.on_new_event("device_list_key", token, rooms=all_room_ids) + elif stream_name == GroupServerStream.NAME: + self.notifier.on_new_event( + "groups_key", token, users=[row.user_id for row in rows] + ) + elif stream_name == PushersStream.NAME: + for row in rows: + if row.deleted: + self.stop_pusher(row.user_id, row.app_id, row.pushkey) + else: + await self.start_pusher(row.user_id, row.app_id, row.pushkey) + elif stream_name == PresenceStream.NAME: + await self._presence_handler.process_replication_rows(token, rows) + elif stream_name == EventsStream.NAME: # We shouldn't get multiple rows per token for events stream, so # we don't need to optimise this for multiple rows. for row in rows: @@ -190,7 +252,7 @@ async def on_rdata( waiting_list[:] = waiting_list[index_of_first_deferred_not_called:] async def on_position(self, stream_name: str, instance_name: str, token: int): - self.store.process_replication_rows(stream_name, instance_name, token, []) + await self.on_rdata(stream_name, instance_name, token, []) # We poke the generic "replication" notifier to wake anything up that # may be streaming. @@ -199,6 +261,11 @@ async def on_position(self, stream_name: str, instance_name: str, token: int): def on_remote_server_up(self, server: str): """Called when get a new REMOTE_SERVER_UP command.""" + # Let's wake up the transaction queue for the server in case we have + # pending stuff to send to it. + if self.send_handler: + self.send_handler.wake_destination(server) + async def wait_for_stream_position( self, instance_name: str, stream_name: str, position: int ): @@ -235,3 +302,153 @@ async def wait_for_stream_position( logger.info( "Finished waiting for repl stream %r to reach %s", stream_name, position ) + + def stop_pusher(self, user_id, app_id, pushkey): + if not self._notify_pushers: + return + + key = "%s:%s" % (app_id, pushkey) + pushers_for_user = self._pusher_pool.pushers.get(user_id, {}) + pusher = pushers_for_user.pop(key, None) + if pusher is None: + return + logger.info("Stopping pusher %r / %r", user_id, key) + pusher.on_stop() + + async def start_pusher(self, user_id, app_id, pushkey): + if not self._notify_pushers: + return + + key = "%s:%s" % (app_id, pushkey) + logger.info("Starting pusher %r / %r", user_id, key) + return await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) + + +class FederationSenderHandler: + """Processes the fedration replication stream + + This class is only instantiate on the worker responsible for sending outbound + federation transactions. It receives rows from the replication stream and forwards + the appropriate entries to the FederationSender class. + """ + + def __init__(self, hs: "HomeServer"): + assert hs.should_send_federation() + + self.store = hs.get_datastore() + self._is_mine_id = hs.is_mine_id + self._hs = hs + + # We need to make a temporary value to ensure that mypy picks up the + # right type. We know we should have a federation sender instance since + # `should_send_federation` is True. + sender = hs.get_federation_sender() + assert isinstance(sender, FederationSender) + self.federation_sender = sender + + # Stores the latest position in the federation stream we've gotten up + # to. This is always set before we use it. + self.federation_position = None # type: Optional[int] + + self._fed_position_linearizer = Linearizer(name="_fed_position_linearizer") + + def wake_destination(self, server: str): + self.federation_sender.wake_destination(server) + + async def process_replication_rows(self, stream_name, token, rows): + # The federation stream contains things that we want to send out, e.g. + # presence, typing, etc. + if stream_name == "federation": + send_queue.process_rows_for_federation(self.federation_sender, rows) + await self.update_token(token) + + # ... and when new receipts happen + elif stream_name == ReceiptsStream.NAME: + await self._on_new_receipts(rows) + + # ... as well as device updates and messages + elif stream_name == DeviceListsStream.NAME: + # The entities are either user IDs (starting with '@') whose devices + # have changed, or remote servers that we need to tell about + # changes. + hosts = {row.entity for row in rows if not row.entity.startswith("@")} + for host in hosts: + self.federation_sender.send_device_messages(host) + + elif stream_name == ToDeviceStream.NAME: + # The to_device stream includes stuff to be pushed to both local + # clients and remote servers, so we ignore entities that start with + # '@' (since they'll be local users rather than destinations). + hosts = {row.entity for row in rows if not row.entity.startswith("@")} + for host in hosts: + self.federation_sender.send_device_messages(host) + + async def _on_new_receipts(self, rows): + """ + Args: + rows (Iterable[synapse.replication.tcp.streams.ReceiptsStream.ReceiptsStreamRow]): + new receipts to be processed + """ + for receipt in rows: + # we only want to send on receipts for our own users + if not self._is_mine_id(receipt.user_id): + continue + receipt_info = ReadReceipt( + receipt.room_id, + receipt.receipt_type, + receipt.user_id, + [receipt.event_id], + receipt.data, + ) + await self.federation_sender.send_read_receipt(receipt_info) + + async def update_token(self, token): + """Update the record of where we have processed to in the federation stream. + + Called after we have processed a an update received over replication. Sends + a FEDERATION_ACK back to the master, and stores the token that we have processed + in `federation_stream_position` so that we can restart where we left off. + """ + self.federation_position = token + + # We save and send the ACK to master asynchronously, so we don't block + # processing on persistence. We don't need to do this operation for + # every single RDATA we receive, we just need to do it periodically. + + if self._fed_position_linearizer.is_queued(None): + # There is already a task queued up to save and send the token, so + # no need to queue up another task. + return + + run_as_background_process("_save_and_send_ack", self._save_and_send_ack) + + async def _save_and_send_ack(self): + """Save the current federation position in the database and send an ACK + to master with where we're up to. + """ + # We should only be calling this once we've got a token. + assert self.federation_position is not None + + try: + # We linearize here to ensure we don't have races updating the token + # + # XXX this appears to be redundant, since the ReplicationCommandHandler + # has a linearizer which ensures that we only process one line of + # replication data at a time. Should we remove it, or is it doing useful + # service for robustness? Or could we replace it with an assertion that + # we're not being re-entered? + + with (await self._fed_position_linearizer.queue(None)): + # We persist and ack the same position, so we take a copy of it + # here as otherwise it can get modified from underneath us. + current_position = self.federation_position + + await self.store.update_federation_out_pos( + "federation", current_position + ) + + # We ACK this token over replication so that the master can drop + # its in memory queues + self._hs.get_tcp_replication().send_federation_ack(current_position) + except Exception: + logger.exception("Error updating federation stream position") diff --git a/synapse/server.py b/synapse/server.py index 6c35ae6e5032..95a2cd2e5d08 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -85,7 +85,11 @@ from synapse.handlers.message import EventCreationHandler, MessageHandler from synapse.handlers.pagination import PaginationHandler from synapse.handlers.password_policy import PasswordPolicyHandler -from synapse.handlers.presence import PresenceHandler +from synapse.handlers.presence import ( + BasePresenceHandler, + PresenceHandler, + WorkerPresenceHandler, +) from synapse.handlers.profile import ProfileHandler from synapse.handlers.read_marker import ReadMarkerHandler from synapse.handlers.receipts import ReceiptsHandler @@ -415,8 +419,11 @@ def get_state_resolution_handler(self) -> StateResolutionHandler: return StateResolutionHandler(self) @cache_in_self - def get_presence_handler(self) -> PresenceHandler: - return PresenceHandler(self) + def get_presence_handler(self) -> BasePresenceHandler: + if self.config.worker_app: + return WorkerPresenceHandler(self) + else: + return PresenceHandler(self) @cache_in_self def get_typing_writer_handler(self) -> TypingWriterHandler: diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 36138d69aa74..c9d04aef2924 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -21,13 +21,11 @@ from twisted.web.resource import Resource from twisted.web.server import Request, Site -from synapse.app.generic_worker import ( - GenericWorkerReplicationHandler, - GenericWorkerServer, -) +from synapse.app.generic_worker import GenericWorkerServer from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest, SynapseSite from synapse.replication.http import ReplicationRestResource +from synapse.replication.tcp.client import ReplicationDataHandler from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol from synapse.replication.tcp.resource import ( @@ -431,7 +429,7 @@ def connect_any_redis_attempts(self): server_protocol.makeConnection(server_to_client_transport) -class TestReplicationDataHandler(GenericWorkerReplicationHandler): +class TestReplicationDataHandler(ReplicationDataHandler): """Drop-in for ReplicationDataHandler which just collects RDATA rows""" def __init__(self, hs: HomeServer): From 05e8c70c059f8ebb066e029bc3aa3e0cefef1019 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Wed, 14 Apr 2021 18:19:02 +0200 Subject: [PATCH 035/222] Experimental Federation Speedup (#9702) This basically speeds up federation by "squeezing" each individual dual database call (to destinations and destination_rooms), which previously happened per every event, into one call for an entire batch (100 max). Signed-off-by: Jonathan de Jong --- changelog.d/9702.misc | 1 + contrib/experiments/test_messaging.py | 42 +++--- synapse/federation/sender/__init__.py | 140 +++++++++++------- .../sender/per_destination_queue.py | 15 +- .../storage/databases/main/transactions.py | 28 ++-- 5 files changed, 129 insertions(+), 97 deletions(-) create mode 100644 changelog.d/9702.misc diff --git a/changelog.d/9702.misc b/changelog.d/9702.misc new file mode 100644 index 000000000000..c6e63450a971 --- /dev/null +++ b/changelog.d/9702.misc @@ -0,0 +1 @@ +Speed up federation transmission by using fewer database calls. Contributed by @ShadowJonathan. diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py index 31b8a6822504..5dd172052b9e 100644 --- a/contrib/experiments/test_messaging.py +++ b/contrib/experiments/test_messaging.py @@ -224,14 +224,16 @@ def send_message(self, room_name, sender, body): destinations = yield self.get_servers_for_context(room_name) try: - yield self.replication_layer.send_pdu( - Pdu.create_new( - context=room_name, - pdu_type="sy.room.message", - content={"sender": sender, "body": body}, - origin=self.server_name, - destinations=destinations, - ) + yield self.replication_layer.send_pdus( + [ + Pdu.create_new( + context=room_name, + pdu_type="sy.room.message", + content={"sender": sender, "body": body}, + origin=self.server_name, + destinations=destinations, + ) + ] ) except Exception as e: logger.exception(e) @@ -253,7 +255,7 @@ def join_room(self, room_name, sender, joinee): origin=self.server_name, destinations=destinations, ) - yield self.replication_layer.send_pdu(pdu) + yield self.replication_layer.send_pdus([pdu]) except Exception as e: logger.exception(e) @@ -265,16 +267,18 @@ def invite_to_room(self, room_name, sender, invitee): destinations = yield self.get_servers_for_context(room_name) try: - yield self.replication_layer.send_pdu( - Pdu.create_new( - context=room_name, - is_state=True, - pdu_type="sy.room.member", - state_key=invitee, - content={"membership": "invite"}, - origin=self.server_name, - destinations=destinations, - ) + yield self.replication_layer.send_pdus( + [ + Pdu.create_new( + context=room_name, + is_state=True, + pdu_type="sy.room.member", + state_key=invitee, + content={"membership": "invite"}, + origin=self.server_name, + destinations=destinations, + ) + ] ) except Exception as e: logger.exception(e) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 155161685d0e..952ad39f8c2e 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -18,8 +18,6 @@ from prometheus_client import Counter -from twisted.internet import defer - import synapse.metrics from synapse.api.presence import UserPresenceState from synapse.events import EventBase @@ -27,11 +25,7 @@ from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu from synapse.handlers.presence import get_interested_remotes -from synapse.logging.context import ( - make_deferred_yieldable, - preserve_fn, - run_in_background, -) +from synapse.logging.context import preserve_fn from synapse.metrics import ( LaterGauge, event_processing_loop_counter, @@ -39,7 +33,7 @@ events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import JsonDict, ReadReceipt, RoomStreamToken +from synapse.types import Collection, JsonDict, ReadReceipt, RoomStreamToken from synapse.util.metrics import Measure, measure_func if TYPE_CHECKING: @@ -276,15 +270,27 @@ async def _process_event_queue_loop(self) -> None: if not events and next_token >= self._last_poked_id: break - async def handle_event(event: EventBase) -> None: + async def get_destinations_for_event( + event: EventBase, + ) -> Collection[str]: + """Computes the destinations to which this event must be sent. + + This returns an empty tuple when there are no destinations to send to, + or if this event is not from this homeserver and it is not sending + it on behalf of another server. + + Will also filter out destinations which this sender is not responsible for, + if multiple federation senders exist. + """ + # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of() is_mine = self.is_mine_id(event.sender) if not is_mine and send_on_behalf_of is None: - return + return () if not event.internal_metadata.should_proactively_send(): - return + return () destinations = None # type: Optional[Set[str]] if not event.prev_event_ids(): @@ -319,7 +325,7 @@ async def handle_event(event: EventBase) -> None: "Failed to calculate hosts in room for event: %s", event.event_id, ) - return + return () destinations = { d @@ -329,17 +335,15 @@ async def handle_event(event: EventBase) -> None: ) } + destinations.discard(self.server_name) + if send_on_behalf_of is not None: # If we are sending the event on behalf of another server # then it already has the event and there is no reason to # send the event to it. destinations.discard(send_on_behalf_of) - logger.debug("Sending %s to %r", event, destinations) - if destinations: - await self._send_pdu(event, destinations) - now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) @@ -347,24 +351,29 @@ async def handle_event(event: EventBase) -> None: "federation_sender" ).observe((now - ts) / 1000) - async def handle_room_events(events: Iterable[EventBase]) -> None: - with Measure(self.clock, "handle_room_events"): - for event in events: - await handle_event(event) - - events_by_room = {} # type: Dict[str, List[EventBase]] - for event in events: - events_by_room.setdefault(event.room_id, []).append(event) - - await make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background(handle_room_events, evs) - for evs in events_by_room.values() - ], - consumeErrors=True, - ) - ) + return destinations + return () + + async def get_federatable_events_and_destinations( + events: Iterable[EventBase], + ) -> List[Tuple[EventBase, Collection[str]]]: + with Measure(self.clock, "get_destinations_for_events"): + # Fetch federation destinations per event, + # skip if get_destinations_for_event returns an empty collection, + # return list of event->destinations pairs. + return [ + (event, dests) + for (event, dests) in [ + (event, await get_destinations_for_event(event)) + for event in events + ] + if dests + ] + + events_and_dests = await get_federatable_events_and_destinations(events) + + # Send corresponding events to each destination queue + await self._distribute_events(events_and_dests) await self.store.update_federation_out_pos("events", next_token) @@ -382,7 +391,7 @@ async def handle_room_events(events: Iterable[EventBase]) -> None: events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels("federation_sender").inc( - len(events_by_room) + len({event.room_id for event in events}) ) event_processing_loop_counter.labels("federation_sender").inc() @@ -394,34 +403,53 @@ async def handle_room_events(events: Iterable[EventBase]) -> None: finally: self._is_processing = False - async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None: - # We loop through all destinations to see whether we already have - # a transaction in progress. If we do, stick it in the pending_pdus - # table and we'll get back to it later. + async def _distribute_events( + self, + events_and_dests: Iterable[Tuple[EventBase, Collection[str]]], + ) -> None: + """Distribute events to the respective per_destination queues. - destinations = set(destinations) - destinations.discard(self.server_name) - logger.debug("Sending to: %s", str(destinations)) + Also persists last-seen per-room stream_ordering to 'destination_rooms'. - if not destinations: - return + Args: + events_and_dests: A list of tuples, which are (event: EventBase, destinations: Collection[str]). + Every event is paired with its intended destinations (in federation). + """ + # Tuples of room_id + destination to their max-seen stream_ordering + room_with_dest_stream_ordering = {} # type: Dict[Tuple[str, str], int] - sent_pdus_destination_dist_total.inc(len(destinations)) - sent_pdus_destination_dist_count.inc() + # List of events to send to each destination + events_by_dest = {} # type: Dict[str, List[EventBase]] - assert pdu.internal_metadata.stream_ordering + # For each event-destinations pair... + for event, destinations in events_and_dests: - # track the fact that we have a PDU for these destinations, - # to allow us to perform catch-up later on if the remote is unreachable - # for a while. - await self.store.store_destination_rooms_entries( - destinations, - pdu.room_id, - pdu.internal_metadata.stream_ordering, + # (we got this from the database, it's filled) + assert event.internal_metadata.stream_ordering + + sent_pdus_destination_dist_total.inc(len(destinations)) + sent_pdus_destination_dist_count.inc() + + # ...iterate over those destinations.. + for destination in destinations: + # ...update their stream-ordering... + room_with_dest_stream_ordering[(event.room_id, destination)] = max( + event.internal_metadata.stream_ordering, + room_with_dest_stream_ordering.get((event.room_id, destination), 0), + ) + + # ...and add the event to each destination queue. + events_by_dest.setdefault(destination, []).append(event) + + # Bulk-store destination_rooms stream_ids + await self.store.bulk_store_destination_rooms_entries( + room_with_dest_stream_ordering ) - for destination in destinations: - self._get_per_destination_queue(destination).send_pdu(pdu) + for destination, pdus in events_by_dest.items(): + logger.debug("Sending %d pdus to %s", len(pdus), destination) + + self._get_per_destination_queue(destination).send_pdus(pdus) async def send_read_receipt(self, receipt: ReadReceipt) -> None: """Send a RR to any other servers in the room diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 3b053ebcfb08..3bb66bce324e 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -154,19 +154,22 @@ def pending_edu_count(self) -> int: + len(self._pending_edus_keyed) ) - def send_pdu(self, pdu: EventBase) -> None: - """Add a PDU to the queue, and start the transmission loop if necessary + def send_pdus(self, pdus: Iterable[EventBase]) -> None: + """Add PDUs to the queue, and start the transmission loop if necessary Args: - pdu: pdu to send + pdus: pdus to send """ if not self._catching_up or self._last_successful_stream_ordering is None: # only enqueue the PDU if we are not catching up (False) or do not # yet know if we have anything to catch up (None) - self._pending_pdus.append(pdu) + self._pending_pdus.extend(pdus) else: - assert pdu.internal_metadata.stream_ordering - self._catchup_last_skipped = pdu.internal_metadata.stream_ordering + self._catchup_last_skipped = max( + pdu.internal_metadata.stream_ordering + for pdu in pdus + if pdu.internal_metadata.stream_ordering is not None + ) self.attempt_new_transaction() diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 82335e7a9dad..b28ca61f8064 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -14,7 +14,7 @@ import logging from collections import namedtuple -from typing import Iterable, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple from canonicaljson import encode_canonical_json @@ -295,37 +295,33 @@ def _set_destination_retry_timings_emulated( }, ) - async def store_destination_rooms_entries( - self, - destinations: Iterable[str], - room_id: str, - stream_ordering: int, - ) -> None: + async def bulk_store_destination_rooms_entries( + self, room_and_destination_to_ordering: Dict[Tuple[str, str], int] + ): """ - Updates or creates `destination_rooms` entries in batch for a single event. + Updates or creates `destination_rooms` entries for a number of events. Args: - destinations: list of destinations - room_id: the room_id of the event - stream_ordering: the stream_ordering of the event + room_and_destination_to_ordering: A mapping of (room, destination) -> stream_id """ await self.db_pool.simple_upsert_many( table="destinations", key_names=("destination",), - key_values=[(d,) for d in destinations], + key_values={(d,) for _, d in room_and_destination_to_ordering.keys()}, value_names=[], value_values=[], desc="store_destination_rooms_entries_dests", ) - rows = [(destination, room_id) for destination in destinations] await self.db_pool.simple_upsert_many( table="destination_rooms", - key_names=("destination", "room_id"), - key_values=rows, + key_names=("room_id", "destination"), + key_values=list(room_and_destination_to_ordering.keys()), value_names=["stream_ordering"], - value_values=[(stream_ordering,)] * len(rows), + value_values=[ + (stream_id,) for stream_id in room_and_destination_to_ordering.values() + ], desc="store_destination_rooms_entries_rooms", ) From cc51aaaa7adb0ec2235e027b5184ebda9b660ec4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 14 Apr 2021 12:32:20 -0400 Subject: [PATCH 036/222] Check for space membership during a remote join of a restricted room. (#9763) When receiving a /send_join request for a room with join rules set to 'restricted', check if the user is a member of the spaces defined in the 'allow' key of the join rules. This only applies to an experimental room version, as defined in MSC3083. --- changelog.d/9763.feature | 1 + changelog.d/9800.feature | 1 + synapse/handlers/event_auth.py | 82 ++++++++++++ synapse/handlers/federation.py | 212 +++++++++++++++++++++----------- synapse/handlers/room_member.py | 62 +--------- synapse/server.py | 5 + tests/test_federation.py | 6 +- 7 files changed, 238 insertions(+), 131 deletions(-) create mode 100644 changelog.d/9763.feature create mode 100644 changelog.d/9800.feature create mode 100644 synapse/handlers/event_auth.py diff --git a/changelog.d/9763.feature b/changelog.d/9763.feature new file mode 100644 index 000000000000..9404ad2fc047 --- /dev/null +++ b/changelog.d/9763.feature @@ -0,0 +1 @@ +Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/changelog.d/9800.feature b/changelog.d/9800.feature new file mode 100644 index 000000000000..9404ad2fc047 --- /dev/null +++ b/changelog.d/9800.feature @@ -0,0 +1 @@ +Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py new file mode 100644 index 000000000000..06da1a93d9f1 --- /dev/null +++ b/synapse/handlers/event_auth.py @@ -0,0 +1,82 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from synapse.api.constants import EventTypes, JoinRules +from synapse.api.room_versions import RoomVersion +from synapse.types import StateMap + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class EventAuthHandler: + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + + async def can_join_without_invite( + self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str + ) -> bool: + """ + Check whether a user can join a room without an invite. + + When joining a room with restricted joined rules (as defined in MSC3083), + the membership of spaces must be checked during join. + + Args: + state_ids: The state of the room as it currently is. + room_version: The room version of the room being joined. + user_id: The user joining the room. + + Returns: + True if the user can join the room, false otherwise. + """ + # This only applies to room versions which support the new join rule. + if not room_version.msc3083_join_rules: + return True + + # If there's no join rule, then it defaults to public (so this doesn't apply). + join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + if not join_rules_event_id: + return True + + # If the join rule is not restricted, this doesn't apply. + join_rules_event = await self._store.get_event(join_rules_event_id) + if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: + return True + + # If allowed is of the wrong form, then only allow invited users. + allowed_spaces = join_rules_event.content.get("allow", []) + if not isinstance(allowed_spaces, list): + return False + + # Get the list of joined rooms and see if there's an overlap. + joined_rooms = await self._store.get_rooms_for_user(user_id) + + # Pull out the other room IDs, invalid data gets filtered. + for space in allowed_spaces: + if not isinstance(space, dict): + continue + + space_id = space.get("space") + if not isinstance(space_id, str): + continue + + # The user was joined to one of the spaces specified, they can join + # this room! + if space_id in joined_rooms: + return True + + # The user was not in any of the required spaces. + return False diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index fe1d83f6b812..0c9bdf51a458 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -103,7 +103,7 @@ @attr.s(slots=True) class _NewEventInfo: - """Holds information about a received event, ready for passing to _handle_new_events + """Holds information about a received event, ready for passing to _auth_and_persist_events Attributes: event: the received event @@ -146,6 +146,7 @@ def __init__(self, hs: "HomeServer"): self.is_mine_id = hs.is_mine_id self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() + self.event_auth_handler = hs.get_event_auth_handler() self._message_handler = hs.get_message_handler() self._server_notices_mxid = hs.config.server_notices_mxid self.config = hs.config @@ -807,7 +808,10 @@ async def _process_received_pdu( logger.debug("Processing event: %s", event) try: - await self._handle_new_event(origin, event, state=state) + context = await self.state_handler.compute_event_context( + event, old_state=state + ) + await self._auth_and_persist_event(origin, event, context, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) @@ -1010,7 +1014,9 @@ async def backfill( ) if ev_infos: - await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) + await self._auth_and_persist_events( + dest, room_id, ev_infos, backfilled=True + ) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) @@ -1023,10 +1029,12 @@ async def backfill( # non-outliers assert not event.internal_metadata.is_outlier() + context = await self.state_handler.compute_event_context(event) + # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. - await self._handle_new_event(dest, event, backfilled=True) + await self._auth_and_persist_event(dest, event, context, backfilled=True) return events @@ -1360,7 +1368,7 @@ async def get_event(event_id: str): event_infos.append(_NewEventInfo(event, None, auth)) - await self._handle_new_events( + await self._auth_and_persist_events( destination, room_id, event_infos, @@ -1666,16 +1674,47 @@ async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict: # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin - context = await self._handle_new_event(origin, event) + # Calculate the event context. + context = await self.state_handler.compute_event_context(event) + + # Get the state before the new event. + prev_state_ids = await context.get_prev_state_ids() + + # Check if the user is already in the room or invited to the room. + user_id = event.state_key + prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) + newly_joined = True + is_invite = False + if prev_member_event_id: + prev_member_event = await self.store.get_event(prev_member_event_id) + newly_joined = prev_member_event.membership != Membership.JOIN + is_invite = prev_member_event.membership == Membership.INVITE + + # If the member is not already in the room, and not invited, check if + # they should be allowed access via membership in a space. + if ( + newly_joined + and not is_invite + and not await self.event_auth_handler.can_join_without_invite( + prev_state_ids, + event.room_version, + user_id, + ) + ): + raise SynapseError( + 400, + "You do not belong to any of the required spaces to join this room.", + ) + + # Persist the event. + await self._auth_and_persist_event(origin, event, context) logger.debug( - "on_send_join_request: After _handle_new_event: %s, sigs: %s", + "on_send_join_request: After _auth_and_persist_event: %s, sigs: %s", event.event_id, event.signatures, ) - prev_state_ids = await context.get_prev_state_ids() - state_ids = list(prev_state_ids.values()) auth_chain = await self.store.get_auth_chain(event.room_id, state_ids) @@ -1878,10 +1917,11 @@ async def on_send_leave_request(self, origin: str, pdu: EventBase) -> None: event.internal_metadata.outlier = False - await self._handle_new_event(origin, event) + context = await self.state_handler.compute_event_context(event) + await self._auth_and_persist_event(origin, event, context) logger.debug( - "on_send_leave_request: After _handle_new_event: %s, sigs: %s", + "on_send_leave_request: After _auth_and_persist_event: %s, sigs: %s", event.event_id, event.signatures, ) @@ -1989,16 +2029,44 @@ async def get_persisted_pdu( async def get_min_depth_for_context(self, context: str) -> int: return await self.store.get_min_depth(context) - async def _handle_new_event( + async def _auth_and_persist_event( self, origin: str, event: EventBase, + context: EventContext, state: Optional[Iterable[EventBase]] = None, auth_events: Optional[MutableStateMap[EventBase]] = None, backfilled: bool = False, - ) -> EventContext: - context = await self._prep_event( - origin, event, state=state, auth_events=auth_events, backfilled=backfilled + ) -> None: + """ + Process an event by performing auth checks and then persisting to the database. + + Args: + origin: The host the event originates from. + event: The event itself. + context: + The event context. + + NB that this function potentially modifies it. + state: + The state events used to auth the event. If this is not provided + the current state events will be used. + auth_events: + Map from (event_type, state_key) to event + + Normally, our calculated auth_events based on the state of the room + at the event's position in the DAG, though occasionally (eg if the + event is an outlier), may be the auth events claimed by the remote + server. + backfilled: True if the event was backfilled. + """ + context = await self._check_event_auth( + origin, + event, + context, + state=state, + auth_events=auth_events, + backfilled=backfilled, ) try: @@ -2020,9 +2088,7 @@ async def _handle_new_event( ) raise - return context - - async def _handle_new_events( + async def _auth_and_persist_events( self, origin: str, room_id: str, @@ -2040,9 +2106,13 @@ async def _handle_new_events( async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): - res = await self._prep_event( + res = await self.state_handler.compute_event_context( + event, old_state=ev_info.state + ) + res = await self._check_event_auth( origin, event, + res, state=ev_info.state, auth_events=ev_info.auth_events, backfilled=backfilled, @@ -2177,49 +2247,6 @@ async def _persist_auth_tree( room_id, [(event, new_event_context)] ) - async def _prep_event( - self, - origin: str, - event: EventBase, - state: Optional[Iterable[EventBase]], - auth_events: Optional[MutableStateMap[EventBase]], - backfilled: bool, - ) -> EventContext: - context = await self.state_handler.compute_event_context(event, old_state=state) - - if not auth_events: - prev_state_ids = await context.get_prev_state_ids() - auth_events_ids = self.auth.compute_auth_events( - event, prev_state_ids, for_verification=True - ) - auth_events_x = await self.store.get_events(auth_events_ids) - auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} - - # This is a hack to fix some old rooms where the initial join event - # didn't reference the create event in its auth events. - if event.type == EventTypes.Member and not event.auth_event_ids(): - if len(event.prev_event_ids()) == 1 and event.depth < 5: - c = await self.store.get_event( - event.prev_event_ids()[0], allow_none=True - ) - if c and c.type == EventTypes.Create: - auth_events[(c.type, c.state_key)] = c - - context = await self.do_auth(origin, event, context, auth_events=auth_events) - - if not context.rejected: - await self._check_for_soft_fail(event, state, backfilled) - - if event.type == EventTypes.GuestAccess and not context.rejected: - await self.maybe_kick_guest_users(event) - - # If we are going to send this event over federation we precaclculate - # the joined hosts. - if event.internal_metadata.get_send_on_behalf_of(): - await self.event_creation_handler.cache_joined_hosts_for_event(event) - - return context - async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool ) -> None: @@ -2330,19 +2357,28 @@ async def on_get_missing_events( return missing_events - async def do_auth( + async def _check_event_auth( self, origin: str, event: EventBase, context: EventContext, - auth_events: MutableStateMap[EventBase], + state: Optional[Iterable[EventBase]], + auth_events: Optional[MutableStateMap[EventBase]], + backfilled: bool, ) -> EventContext: """ + Checks whether an event should be rejected (for failing auth checks). Args: - origin: - event: + origin: The host the event originates from. + event: The event itself. context: + The event context. + + NB that this function potentially modifies it. + state: + The state events used to auth the event. If this is not provided + the current state events will be used. auth_events: Map from (event_type, state_key) to event @@ -2352,12 +2388,32 @@ async def do_auth( server. Also NB that this function adds entries to it. + backfilled: True if the event was backfilled. + Returns: - updated context object + The updated context object. """ room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + if not auth_events: + prev_state_ids = await context.get_prev_state_ids() + auth_events_ids = self.auth.compute_auth_events( + event, prev_state_ids, for_verification=True + ) + auth_events_x = await self.store.get_events(auth_events_ids) + auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} + + # This is a hack to fix some old rooms where the initial join event + # didn't reference the create event in its auth events. + if event.type == EventTypes.Member and not event.auth_event_ids(): + if len(event.prev_event_ids()) == 1 and event.depth < 5: + c = await self.store.get_event( + event.prev_event_ids()[0], allow_none=True + ) + if c and c.type == EventTypes.Create: + auth_events[(c.type, c.state_key)] = c + try: context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events @@ -2379,6 +2435,17 @@ async def do_auth( logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR + if not context.rejected: + await self._check_for_soft_fail(event, state, backfilled) + + if event.type == EventTypes.GuestAccess and not context.rejected: + await self.maybe_kick_guest_users(event) + + # If we are going to send this event over federation we precaclculate + # the joined hosts. + if event.internal_metadata.get_send_on_behalf_of(): + await self.event_creation_handler.cache_joined_hosts_for_event(event) + return context async def _update_auth_events_and_context_for_auth( @@ -2388,7 +2455,7 @@ async def _update_auth_events_and_context_for_auth( context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: - """Helper for do_auth. See there for docs. + """Helper for _check_event_auth. See there for docs. Checks whether a given event has the expected auth events. If it doesn't then we talk to the remote server to compare state to see if @@ -2468,9 +2535,14 @@ async def _update_auth_events_and_context_for_auth( e.internal_metadata.outlier = True logger.debug( - "do_auth %s missing_auth: %s", event.event_id, e.event_id + "_check_event_auth %s missing_auth: %s", + event.event_id, + e.event_id, + ) + context = await self.state_handler.compute_event_context(e) + await self._auth_and_persist_event( + origin, e, context, auth_events=auth ) - await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 2bbfac6471e5..2c5bada1d89b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple from synapse import types -from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -28,7 +28,6 @@ SynapseError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID @@ -64,6 +63,7 @@ def __init__(self, hs: "HomeServer"): self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() self.account_data_handler = hs.get_account_data_handler() + self.event_auth_handler = hs.get_event_auth_handler() self.member_linearizer = Linearizer(name="member") @@ -178,62 +178,6 @@ async def ratelimit_invite( await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id) - async def _can_join_without_invite( - self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str - ) -> bool: - """ - Check whether a user can join a room without an invite. - - When joining a room with restricted joined rules (as defined in MSC3083), - the membership of spaces must be checked during join. - - Args: - state_ids: The state of the room as it currently is. - room_version: The room version of the room being joined. - user_id: The user joining the room. - - Returns: - True if the user can join the room, false otherwise. - """ - # This only applies to room versions which support the new join rule. - if not room_version.msc3083_join_rules: - return True - - # If there's no join rule, then it defaults to public (so this doesn't apply). - join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) - if not join_rules_event_id: - return True - - # If the join rule is not restricted, this doesn't apply. - join_rules_event = await self.store.get_event(join_rules_event_id) - if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: - return True - - # If allowed is of the wrong form, then only allow invited users. - allowed_spaces = join_rules_event.content.get("allow", []) - if not isinstance(allowed_spaces, list): - return False - - # Get the list of joined rooms and see if there's an overlap. - joined_rooms = await self.store.get_rooms_for_user(user_id) - - # Pull out the other room IDs, invalid data gets filtered. - for space in allowed_spaces: - if not isinstance(space, dict): - continue - - space_id = space.get("space") - if not isinstance(space_id, str): - continue - - # The user was joined to one of the spaces specified, they can join - # this room! - if space_id in joined_rooms: - return True - - # The user was not in any of the required spaces. - return False - async def _local_membership_update( self, requester: Requester, @@ -302,7 +246,7 @@ async def _local_membership_update( if ( newly_joined and not user_is_invited - and not await self._can_join_without_invite( + and not await self.event_auth_handler.can_join_without_invite( prev_state_ids, event.room_version, user_id ) ): diff --git a/synapse/server.py b/synapse/server.py index 95a2cd2e5d08..045b8f3fcac3 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -77,6 +77,7 @@ from synapse.handlers.directory import DirectoryHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler +from synapse.handlers.event_auth import EventAuthHandler from synapse.handlers.events import EventHandler, EventStreamHandler from synapse.handlers.federation import FederationHandler from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler @@ -749,6 +750,10 @@ def get_account_data_handler(self) -> AccountDataHandler: def get_space_summary_handler(self) -> SpaceSummaryHandler: return SpaceSummaryHandler(self) + @cache_in_self + def get_event_auth_handler(self) -> EventAuthHandler: + return EventAuthHandler(self) + @cache_in_self def get_external_cache(self) -> ExternalCache: return ExternalCache(self) diff --git a/tests/test_federation.py b/tests/test_federation.py index 86a44a13da94..0a3a996ec1e6 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -75,8 +75,10 @@ def setUp(self): ) self.handler = self.homeserver.get_federation_handler() - self.handler.do_auth = lambda origin, event, context, auth_events: succeed( - context + self.handler._check_event_auth = ( + lambda origin, event, context, state, auth_events, backfilled: succeed( + context + ) ) self.client = self.homeserver.get_federation_client() self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed( From e8816c6aced208cdf1310393a212b5109892ffbf Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 14 Apr 2021 12:33:37 -0400 Subject: [PATCH 037/222] Revert "Check for space membership during a remote join of a restricted room. (#9763)" This reverts commit cc51aaaa7adb0ec2235e027b5184ebda9b660ec4. The PR was prematurely merged and not yet approved. --- changelog.d/9763.feature | 1 - changelog.d/9800.feature | 1 - synapse/handlers/event_auth.py | 82 ------------ synapse/handlers/federation.py | 212 +++++++++++--------------------- synapse/handlers/room_member.py | 62 +++++++++- synapse/server.py | 5 - tests/test_federation.py | 6 +- 7 files changed, 131 insertions(+), 238 deletions(-) delete mode 100644 changelog.d/9763.feature delete mode 100644 changelog.d/9800.feature delete mode 100644 synapse/handlers/event_auth.py diff --git a/changelog.d/9763.feature b/changelog.d/9763.feature deleted file mode 100644 index 9404ad2fc047..000000000000 --- a/changelog.d/9763.feature +++ /dev/null @@ -1 +0,0 @@ -Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/changelog.d/9800.feature b/changelog.d/9800.feature deleted file mode 100644 index 9404ad2fc047..000000000000 --- a/changelog.d/9800.feature +++ /dev/null @@ -1 +0,0 @@ -Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py deleted file mode 100644 index 06da1a93d9f1..000000000000 --- a/synapse/handlers/event_auth.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2021 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from typing import TYPE_CHECKING - -from synapse.api.constants import EventTypes, JoinRules -from synapse.api.room_versions import RoomVersion -from synapse.types import StateMap - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class EventAuthHandler: - def __init__(self, hs: "HomeServer"): - self._store = hs.get_datastore() - - async def can_join_without_invite( - self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str - ) -> bool: - """ - Check whether a user can join a room without an invite. - - When joining a room with restricted joined rules (as defined in MSC3083), - the membership of spaces must be checked during join. - - Args: - state_ids: The state of the room as it currently is. - room_version: The room version of the room being joined. - user_id: The user joining the room. - - Returns: - True if the user can join the room, false otherwise. - """ - # This only applies to room versions which support the new join rule. - if not room_version.msc3083_join_rules: - return True - - # If there's no join rule, then it defaults to public (so this doesn't apply). - join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) - if not join_rules_event_id: - return True - - # If the join rule is not restricted, this doesn't apply. - join_rules_event = await self._store.get_event(join_rules_event_id) - if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: - return True - - # If allowed is of the wrong form, then only allow invited users. - allowed_spaces = join_rules_event.content.get("allow", []) - if not isinstance(allowed_spaces, list): - return False - - # Get the list of joined rooms and see if there's an overlap. - joined_rooms = await self._store.get_rooms_for_user(user_id) - - # Pull out the other room IDs, invalid data gets filtered. - for space in allowed_spaces: - if not isinstance(space, dict): - continue - - space_id = space.get("space") - if not isinstance(space_id, str): - continue - - # The user was joined to one of the spaces specified, they can join - # this room! - if space_id in joined_rooms: - return True - - # The user was not in any of the required spaces. - return False diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0c9bdf51a458..fe1d83f6b812 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -103,7 +103,7 @@ @attr.s(slots=True) class _NewEventInfo: - """Holds information about a received event, ready for passing to _auth_and_persist_events + """Holds information about a received event, ready for passing to _handle_new_events Attributes: event: the received event @@ -146,7 +146,6 @@ def __init__(self, hs: "HomeServer"): self.is_mine_id = hs.is_mine_id self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() - self.event_auth_handler = hs.get_event_auth_handler() self._message_handler = hs.get_message_handler() self._server_notices_mxid = hs.config.server_notices_mxid self.config = hs.config @@ -808,10 +807,7 @@ async def _process_received_pdu( logger.debug("Processing event: %s", event) try: - context = await self.state_handler.compute_event_context( - event, old_state=state - ) - await self._auth_and_persist_event(origin, event, context, state=state) + await self._handle_new_event(origin, event, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) @@ -1014,9 +1010,7 @@ async def backfill( ) if ev_infos: - await self._auth_and_persist_events( - dest, room_id, ev_infos, backfilled=True - ) + await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) @@ -1029,12 +1023,10 @@ async def backfill( # non-outliers assert not event.internal_metadata.is_outlier() - context = await self.state_handler.compute_event_context(event) - # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. - await self._auth_and_persist_event(dest, event, context, backfilled=True) + await self._handle_new_event(dest, event, backfilled=True) return events @@ -1368,7 +1360,7 @@ async def get_event(event_id: str): event_infos.append(_NewEventInfo(event, None, auth)) - await self._auth_and_persist_events( + await self._handle_new_events( destination, room_id, event_infos, @@ -1674,47 +1666,16 @@ async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict: # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin - # Calculate the event context. - context = await self.state_handler.compute_event_context(event) - - # Get the state before the new event. - prev_state_ids = await context.get_prev_state_ids() - - # Check if the user is already in the room or invited to the room. - user_id = event.state_key - prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) - newly_joined = True - is_invite = False - if prev_member_event_id: - prev_member_event = await self.store.get_event(prev_member_event_id) - newly_joined = prev_member_event.membership != Membership.JOIN - is_invite = prev_member_event.membership == Membership.INVITE - - # If the member is not already in the room, and not invited, check if - # they should be allowed access via membership in a space. - if ( - newly_joined - and not is_invite - and not await self.event_auth_handler.can_join_without_invite( - prev_state_ids, - event.room_version, - user_id, - ) - ): - raise SynapseError( - 400, - "You do not belong to any of the required spaces to join this room.", - ) - - # Persist the event. - await self._auth_and_persist_event(origin, event, context) + context = await self._handle_new_event(origin, event) logger.debug( - "on_send_join_request: After _auth_and_persist_event: %s, sigs: %s", + "on_send_join_request: After _handle_new_event: %s, sigs: %s", event.event_id, event.signatures, ) + prev_state_ids = await context.get_prev_state_ids() + state_ids = list(prev_state_ids.values()) auth_chain = await self.store.get_auth_chain(event.room_id, state_ids) @@ -1917,11 +1878,10 @@ async def on_send_leave_request(self, origin: str, pdu: EventBase) -> None: event.internal_metadata.outlier = False - context = await self.state_handler.compute_event_context(event) - await self._auth_and_persist_event(origin, event, context) + await self._handle_new_event(origin, event) logger.debug( - "on_send_leave_request: After _auth_and_persist_event: %s, sigs: %s", + "on_send_leave_request: After _handle_new_event: %s, sigs: %s", event.event_id, event.signatures, ) @@ -2029,44 +1989,16 @@ async def get_persisted_pdu( async def get_min_depth_for_context(self, context: str) -> int: return await self.store.get_min_depth(context) - async def _auth_and_persist_event( + async def _handle_new_event( self, origin: str, event: EventBase, - context: EventContext, state: Optional[Iterable[EventBase]] = None, auth_events: Optional[MutableStateMap[EventBase]] = None, backfilled: bool = False, - ) -> None: - """ - Process an event by performing auth checks and then persisting to the database. - - Args: - origin: The host the event originates from. - event: The event itself. - context: - The event context. - - NB that this function potentially modifies it. - state: - The state events used to auth the event. If this is not provided - the current state events will be used. - auth_events: - Map from (event_type, state_key) to event - - Normally, our calculated auth_events based on the state of the room - at the event's position in the DAG, though occasionally (eg if the - event is an outlier), may be the auth events claimed by the remote - server. - backfilled: True if the event was backfilled. - """ - context = await self._check_event_auth( - origin, - event, - context, - state=state, - auth_events=auth_events, - backfilled=backfilled, + ) -> EventContext: + context = await self._prep_event( + origin, event, state=state, auth_events=auth_events, backfilled=backfilled ) try: @@ -2088,7 +2020,9 @@ async def _auth_and_persist_event( ) raise - async def _auth_and_persist_events( + return context + + async def _handle_new_events( self, origin: str, room_id: str, @@ -2106,13 +2040,9 @@ async def _auth_and_persist_events( async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): - res = await self.state_handler.compute_event_context( - event, old_state=ev_info.state - ) - res = await self._check_event_auth( + res = await self._prep_event( origin, event, - res, state=ev_info.state, auth_events=ev_info.auth_events, backfilled=backfilled, @@ -2247,6 +2177,49 @@ async def _persist_auth_tree( room_id, [(event, new_event_context)] ) + async def _prep_event( + self, + origin: str, + event: EventBase, + state: Optional[Iterable[EventBase]], + auth_events: Optional[MutableStateMap[EventBase]], + backfilled: bool, + ) -> EventContext: + context = await self.state_handler.compute_event_context(event, old_state=state) + + if not auth_events: + prev_state_ids = await context.get_prev_state_ids() + auth_events_ids = self.auth.compute_auth_events( + event, prev_state_ids, for_verification=True + ) + auth_events_x = await self.store.get_events(auth_events_ids) + auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} + + # This is a hack to fix some old rooms where the initial join event + # didn't reference the create event in its auth events. + if event.type == EventTypes.Member and not event.auth_event_ids(): + if len(event.prev_event_ids()) == 1 and event.depth < 5: + c = await self.store.get_event( + event.prev_event_ids()[0], allow_none=True + ) + if c and c.type == EventTypes.Create: + auth_events[(c.type, c.state_key)] = c + + context = await self.do_auth(origin, event, context, auth_events=auth_events) + + if not context.rejected: + await self._check_for_soft_fail(event, state, backfilled) + + if event.type == EventTypes.GuestAccess and not context.rejected: + await self.maybe_kick_guest_users(event) + + # If we are going to send this event over federation we precaclculate + # the joined hosts. + if event.internal_metadata.get_send_on_behalf_of(): + await self.event_creation_handler.cache_joined_hosts_for_event(event) + + return context + async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool ) -> None: @@ -2357,28 +2330,19 @@ async def on_get_missing_events( return missing_events - async def _check_event_auth( + async def do_auth( self, origin: str, event: EventBase, context: EventContext, - state: Optional[Iterable[EventBase]], - auth_events: Optional[MutableStateMap[EventBase]], - backfilled: bool, + auth_events: MutableStateMap[EventBase], ) -> EventContext: """ - Checks whether an event should be rejected (for failing auth checks). Args: - origin: The host the event originates from. - event: The event itself. + origin: + event: context: - The event context. - - NB that this function potentially modifies it. - state: - The state events used to auth the event. If this is not provided - the current state events will be used. auth_events: Map from (event_type, state_key) to event @@ -2388,32 +2352,12 @@ async def _check_event_auth( server. Also NB that this function adds entries to it. - backfilled: True if the event was backfilled. - Returns: - The updated context object. + updated context object """ room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] - if not auth_events: - prev_state_ids = await context.get_prev_state_ids() - auth_events_ids = self.auth.compute_auth_events( - event, prev_state_ids, for_verification=True - ) - auth_events_x = await self.store.get_events(auth_events_ids) - auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} - - # This is a hack to fix some old rooms where the initial join event - # didn't reference the create event in its auth events. - if event.type == EventTypes.Member and not event.auth_event_ids(): - if len(event.prev_event_ids()) == 1 and event.depth < 5: - c = await self.store.get_event( - event.prev_event_ids()[0], allow_none=True - ) - if c and c.type == EventTypes.Create: - auth_events[(c.type, c.state_key)] = c - try: context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events @@ -2435,17 +2379,6 @@ async def _check_event_auth( logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR - if not context.rejected: - await self._check_for_soft_fail(event, state, backfilled) - - if event.type == EventTypes.GuestAccess and not context.rejected: - await self.maybe_kick_guest_users(event) - - # If we are going to send this event over federation we precaclculate - # the joined hosts. - if event.internal_metadata.get_send_on_behalf_of(): - await self.event_creation_handler.cache_joined_hosts_for_event(event) - return context async def _update_auth_events_and_context_for_auth( @@ -2455,7 +2388,7 @@ async def _update_auth_events_and_context_for_auth( context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: - """Helper for _check_event_auth. See there for docs. + """Helper for do_auth. See there for docs. Checks whether a given event has the expected auth events. If it doesn't then we talk to the remote server to compare state to see if @@ -2535,14 +2468,9 @@ async def _update_auth_events_and_context_for_auth( e.internal_metadata.outlier = True logger.debug( - "_check_event_auth %s missing_auth: %s", - event.event_id, - e.event_id, - ) - context = await self.state_handler.compute_event_context(e) - await self._auth_and_persist_event( - origin, e, context, auth_events=auth + "do_auth %s missing_auth: %s", event.event_id, e.event_id ) + await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 2c5bada1d89b..2bbfac6471e5 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple from synapse import types -from synapse.api.constants import AccountDataTypes, EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership from synapse.api.errors import ( AuthError, Codes, @@ -28,6 +28,7 @@ SynapseError, ) from synapse.api.ratelimiting import Ratelimiter +from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID @@ -63,7 +64,6 @@ def __init__(self, hs: "HomeServer"): self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() self.account_data_handler = hs.get_account_data_handler() - self.event_auth_handler = hs.get_event_auth_handler() self.member_linearizer = Linearizer(name="member") @@ -178,6 +178,62 @@ async def ratelimit_invite( await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id) + async def _can_join_without_invite( + self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str + ) -> bool: + """ + Check whether a user can join a room without an invite. + + When joining a room with restricted joined rules (as defined in MSC3083), + the membership of spaces must be checked during join. + + Args: + state_ids: The state of the room as it currently is. + room_version: The room version of the room being joined. + user_id: The user joining the room. + + Returns: + True if the user can join the room, false otherwise. + """ + # This only applies to room versions which support the new join rule. + if not room_version.msc3083_join_rules: + return True + + # If there's no join rule, then it defaults to public (so this doesn't apply). + join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + if not join_rules_event_id: + return True + + # If the join rule is not restricted, this doesn't apply. + join_rules_event = await self.store.get_event(join_rules_event_id) + if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: + return True + + # If allowed is of the wrong form, then only allow invited users. + allowed_spaces = join_rules_event.content.get("allow", []) + if not isinstance(allowed_spaces, list): + return False + + # Get the list of joined rooms and see if there's an overlap. + joined_rooms = await self.store.get_rooms_for_user(user_id) + + # Pull out the other room IDs, invalid data gets filtered. + for space in allowed_spaces: + if not isinstance(space, dict): + continue + + space_id = space.get("space") + if not isinstance(space_id, str): + continue + + # The user was joined to one of the spaces specified, they can join + # this room! + if space_id in joined_rooms: + return True + + # The user was not in any of the required spaces. + return False + async def _local_membership_update( self, requester: Requester, @@ -246,7 +302,7 @@ async def _local_membership_update( if ( newly_joined and not user_is_invited - and not await self.event_auth_handler.can_join_without_invite( + and not await self._can_join_without_invite( prev_state_ids, event.room_version, user_id ) ): diff --git a/synapse/server.py b/synapse/server.py index 045b8f3fcac3..95a2cd2e5d08 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -77,7 +77,6 @@ from synapse.handlers.directory import DirectoryHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler -from synapse.handlers.event_auth import EventAuthHandler from synapse.handlers.events import EventHandler, EventStreamHandler from synapse.handlers.federation import FederationHandler from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler @@ -750,10 +749,6 @@ def get_account_data_handler(self) -> AccountDataHandler: def get_space_summary_handler(self) -> SpaceSummaryHandler: return SpaceSummaryHandler(self) - @cache_in_self - def get_event_auth_handler(self) -> EventAuthHandler: - return EventAuthHandler(self) - @cache_in_self def get_external_cache(self) -> ExternalCache: return ExternalCache(self) diff --git a/tests/test_federation.py b/tests/test_federation.py index 0a3a996ec1e6..86a44a13da94 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -75,10 +75,8 @@ def setUp(self): ) self.handler = self.homeserver.get_federation_handler() - self.handler._check_event_auth = ( - lambda origin, event, context, state, auth_events, backfilled: succeed( - context - ) + self.handler.do_auth = lambda origin, event, context, auth_events: succeed( + context ) self.client = self.homeserver.get_federation_client() self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed( From 936e69825ab684ca580edb6038e86fdb2e561776 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 14 Apr 2021 12:35:28 -0400 Subject: [PATCH 038/222] Separate creating an event context from persisting it in the federation handler (#9800) This refactoring allows adding logic that uses the event context before persisting it. --- changelog.d/9800.feature | 1 + synapse/handlers/federation.py | 178 +++++++++++++++++++++------------ tests/test_federation.py | 6 +- 3 files changed, 118 insertions(+), 67 deletions(-) create mode 100644 changelog.d/9800.feature diff --git a/changelog.d/9800.feature b/changelog.d/9800.feature new file mode 100644 index 000000000000..9404ad2fc047 --- /dev/null +++ b/changelog.d/9800.feature @@ -0,0 +1 @@ +Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index fe1d83f6b812..4b3730aa3b7d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -103,7 +103,7 @@ @attr.s(slots=True) class _NewEventInfo: - """Holds information about a received event, ready for passing to _handle_new_events + """Holds information about a received event, ready for passing to _auth_and_persist_events Attributes: event: the received event @@ -807,7 +807,10 @@ async def _process_received_pdu( logger.debug("Processing event: %s", event) try: - await self._handle_new_event(origin, event, state=state) + context = await self.state_handler.compute_event_context( + event, old_state=state + ) + await self._auth_and_persist_event(origin, event, context, state=state) except AuthError as e: raise FederationError("ERROR", e.code, e.msg, affected=event.event_id) @@ -1010,7 +1013,9 @@ async def backfill( ) if ev_infos: - await self._handle_new_events(dest, room_id, ev_infos, backfilled=True) + await self._auth_and_persist_events( + dest, room_id, ev_infos, backfilled=True + ) # Step 2: Persist the rest of the events in the chunk one by one events.sort(key=lambda e: e.depth) @@ -1023,10 +1028,12 @@ async def backfill( # non-outliers assert not event.internal_metadata.is_outlier() + context = await self.state_handler.compute_event_context(event) + # We store these one at a time since each event depends on the # previous to work out the state. # TODO: We can probably do something more clever here. - await self._handle_new_event(dest, event, backfilled=True) + await self._auth_and_persist_event(dest, event, context, backfilled=True) return events @@ -1360,7 +1367,7 @@ async def get_event(event_id: str): event_infos.append(_NewEventInfo(event, None, auth)) - await self._handle_new_events( + await self._auth_and_persist_events( destination, room_id, event_infos, @@ -1666,10 +1673,11 @@ async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict: # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin - context = await self._handle_new_event(origin, event) + context = await self.state_handler.compute_event_context(event) + context = await self._auth_and_persist_event(origin, event, context) logger.debug( - "on_send_join_request: After _handle_new_event: %s, sigs: %s", + "on_send_join_request: After _auth_and_persist_event: %s, sigs: %s", event.event_id, event.signatures, ) @@ -1878,10 +1886,11 @@ async def on_send_leave_request(self, origin: str, pdu: EventBase) -> None: event.internal_metadata.outlier = False - await self._handle_new_event(origin, event) + context = await self.state_handler.compute_event_context(event) + await self._auth_and_persist_event(origin, event, context) logger.debug( - "on_send_leave_request: After _handle_new_event: %s, sigs: %s", + "on_send_leave_request: After _auth_and_persist_event: %s, sigs: %s", event.event_id, event.signatures, ) @@ -1989,16 +1998,47 @@ async def get_persisted_pdu( async def get_min_depth_for_context(self, context: str) -> int: return await self.store.get_min_depth(context) - async def _handle_new_event( + async def _auth_and_persist_event( self, origin: str, event: EventBase, + context: EventContext, state: Optional[Iterable[EventBase]] = None, auth_events: Optional[MutableStateMap[EventBase]] = None, backfilled: bool = False, ) -> EventContext: - context = await self._prep_event( - origin, event, state=state, auth_events=auth_events, backfilled=backfilled + """ + Process an event by performing auth checks and then persisting to the database. + + Args: + origin: The host the event originates from. + event: The event itself. + context: + The event context. + + NB that this function potentially modifies it. + state: + The state events used to check the event for soft-fail. If this is + not provided the current state events will be used. + auth_events: + Map from (event_type, state_key) to event + + Normally, our calculated auth_events based on the state of the room + at the event's position in the DAG, though occasionally (eg if the + event is an outlier), may be the auth events claimed by the remote + server. + backfilled: True if the event was backfilled. + + Returns: + The event context. + """ + context = await self._check_event_auth( + origin, + event, + context, + state=state, + auth_events=auth_events, + backfilled=backfilled, ) try: @@ -2022,7 +2062,7 @@ async def _handle_new_event( return context - async def _handle_new_events( + async def _auth_and_persist_events( self, origin: str, room_id: str, @@ -2040,9 +2080,13 @@ async def _handle_new_events( async def prep(ev_info: _NewEventInfo): event = ev_info.event with nested_logging_context(suffix=event.event_id): - res = await self._prep_event( + res = await self.state_handler.compute_event_context( + event, old_state=ev_info.state + ) + res = await self._check_event_auth( origin, event, + res, state=ev_info.state, auth_events=ev_info.auth_events, backfilled=backfilled, @@ -2177,49 +2221,6 @@ async def _persist_auth_tree( room_id, [(event, new_event_context)] ) - async def _prep_event( - self, - origin: str, - event: EventBase, - state: Optional[Iterable[EventBase]], - auth_events: Optional[MutableStateMap[EventBase]], - backfilled: bool, - ) -> EventContext: - context = await self.state_handler.compute_event_context(event, old_state=state) - - if not auth_events: - prev_state_ids = await context.get_prev_state_ids() - auth_events_ids = self.auth.compute_auth_events( - event, prev_state_ids, for_verification=True - ) - auth_events_x = await self.store.get_events(auth_events_ids) - auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} - - # This is a hack to fix some old rooms where the initial join event - # didn't reference the create event in its auth events. - if event.type == EventTypes.Member and not event.auth_event_ids(): - if len(event.prev_event_ids()) == 1 and event.depth < 5: - c = await self.store.get_event( - event.prev_event_ids()[0], allow_none=True - ) - if c and c.type == EventTypes.Create: - auth_events[(c.type, c.state_key)] = c - - context = await self.do_auth(origin, event, context, auth_events=auth_events) - - if not context.rejected: - await self._check_for_soft_fail(event, state, backfilled) - - if event.type == EventTypes.GuestAccess and not context.rejected: - await self.maybe_kick_guest_users(event) - - # If we are going to send this event over federation we precaclculate - # the joined hosts. - if event.internal_metadata.get_send_on_behalf_of(): - await self.event_creation_handler.cache_joined_hosts_for_event(event) - - return context - async def _check_for_soft_fail( self, event: EventBase, state: Optional[Iterable[EventBase]], backfilled: bool ) -> None: @@ -2330,19 +2331,28 @@ async def on_get_missing_events( return missing_events - async def do_auth( + async def _check_event_auth( self, origin: str, event: EventBase, context: EventContext, - auth_events: MutableStateMap[EventBase], + state: Optional[Iterable[EventBase]], + auth_events: Optional[MutableStateMap[EventBase]], + backfilled: bool, ) -> EventContext: """ + Checks whether an event should be rejected (for failing auth checks). Args: - origin: - event: + origin: The host the event originates from. + event: The event itself. context: + The event context. + + NB that this function potentially modifies it. + state: + The state events used to check the event for soft-fail. If this is + not provided the current state events will be used. auth_events: Map from (event_type, state_key) to event @@ -2352,12 +2362,34 @@ async def do_auth( server. Also NB that this function adds entries to it. + + If this is not provided, it is calculated from the previous state IDs. + backfilled: True if the event was backfilled. + Returns: - updated context object + The updated context object. """ room_version = await self.store.get_room_version_id(event.room_id) room_version_obj = KNOWN_ROOM_VERSIONS[room_version] + if not auth_events: + prev_state_ids = await context.get_prev_state_ids() + auth_events_ids = self.auth.compute_auth_events( + event, prev_state_ids, for_verification=True + ) + auth_events_x = await self.store.get_events(auth_events_ids) + auth_events = {(e.type, e.state_key): e for e in auth_events_x.values()} + + # This is a hack to fix some old rooms where the initial join event + # didn't reference the create event in its auth events. + if event.type == EventTypes.Member and not event.auth_event_ids(): + if len(event.prev_event_ids()) == 1 and event.depth < 5: + c = await self.store.get_event( + event.prev_event_ids()[0], allow_none=True + ) + if c and c.type == EventTypes.Create: + auth_events[(c.type, c.state_key)] = c + try: context = await self._update_auth_events_and_context_for_auth( origin, event, context, auth_events @@ -2379,6 +2411,17 @@ async def do_auth( logger.warning("Failed auth resolution for %r because %s", event, e) context.rejected = RejectedReason.AUTH_ERROR + if not context.rejected: + await self._check_for_soft_fail(event, state, backfilled) + + if event.type == EventTypes.GuestAccess and not context.rejected: + await self.maybe_kick_guest_users(event) + + # If we are going to send this event over federation we precaclculate + # the joined hosts. + if event.internal_metadata.get_send_on_behalf_of(): + await self.event_creation_handler.cache_joined_hosts_for_event(event) + return context async def _update_auth_events_and_context_for_auth( @@ -2388,7 +2431,7 @@ async def _update_auth_events_and_context_for_auth( context: EventContext, auth_events: MutableStateMap[EventBase], ) -> EventContext: - """Helper for do_auth. See there for docs. + """Helper for _check_event_auth. See there for docs. Checks whether a given event has the expected auth events. If it doesn't then we talk to the remote server to compare state to see if @@ -2468,9 +2511,14 @@ async def _update_auth_events_and_context_for_auth( e.internal_metadata.outlier = True logger.debug( - "do_auth %s missing_auth: %s", event.event_id, e.event_id + "_check_event_auth %s missing_auth: %s", + event.event_id, + e.event_id, + ) + context = await self.state_handler.compute_event_context(e) + await self._auth_and_persist_event( + origin, e, context, auth_events=auth ) - await self._handle_new_event(origin, e, auth_events=auth) if e.event_id in event_auth_events: auth_events[(e.type, e.state_key)] = e diff --git a/tests/test_federation.py b/tests/test_federation.py index 86a44a13da94..0a3a996ec1e6 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -75,8 +75,10 @@ def setUp(self): ) self.handler = self.homeserver.get_federation_handler() - self.handler.do_auth = lambda origin, event, context, auth_events: succeed( - context + self.handler._check_event_auth = ( + lambda origin, event, context, state, auth_events, backfilled: succeed( + context + ) ) self.client = self.homeserver.get_federation_client() self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed( From 5a153772c197a689df6c087e49d7bd8beee5dbdd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 14 Apr 2021 19:09:08 +0100 Subject: [PATCH 039/222] remove `HomeServer.get_config` (#9815) Every single time I want to access the config object, I have to remember whether or not we use `get_config`. Let's just get rid of it. --- changelog.d/9815.misc | 1 + synapse/app/generic_worker.py | 2 +- synapse/app/homeserver.py | 18 +++++++++--------- synapse/crypto/keyring.py | 2 +- synapse/federation/federation_server.py | 2 +- .../federation/sender/transaction_manager.py | 2 +- synapse/rest/media/v1/config_resource.py | 2 +- synapse/server.py | 3 --- tests/app/test_openid_listener.py | 2 +- 9 files changed, 16 insertions(+), 18 deletions(-) create mode 100644 changelog.d/9815.misc diff --git a/changelog.d/9815.misc b/changelog.d/9815.misc new file mode 100644 index 000000000000..e33d012d3d28 --- /dev/null +++ b/changelog.d/9815.misc @@ -0,0 +1 @@ +Replace `HomeServer.get_config()` with inline references. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 28e3b1aa3cb4..26c458dbb68b 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -405,7 +405,7 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): listener.bind_addresses, listener.port, manhole_globals={"hs": self} ) elif listener.type == "metrics": - if not self.get_config().enable_metrics: + if not self.config.enable_metrics: logger.warning( ( "Metrics listener configured, but " diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 679b7f428929..8be8b520eb1a 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -191,7 +191,7 @@ def _configure_named_resource(self, name, compress=False): } ) - if self.get_config().threepid_behaviour_email == ThreepidBehaviour.LOCAL: + if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL: from synapse.rest.synapse.client.password_reset import ( PasswordResetSubmitTokenResource, ) @@ -230,7 +230,7 @@ def _configure_named_resource(self, name, compress=False): ) if name in ["media", "federation", "client"]: - if self.get_config().enable_media_repo: + if self.config.enable_media_repo: media_repo = self.get_media_repository_resource() resources.update( {MEDIA_PREFIX: media_repo, LEGACY_MEDIA_PREFIX: media_repo} @@ -244,7 +244,7 @@ def _configure_named_resource(self, name, compress=False): resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self) if name == "webclient": - webclient_loc = self.get_config().web_client_location + webclient_loc = self.config.web_client_location if webclient_loc is None: logger.warning( @@ -265,7 +265,7 @@ def _configure_named_resource(self, name, compress=False): # https://twistedmatrix.com/trac/ticket/7678 resources[WEB_CLIENT_PREFIX] = File(webclient_loc) - if name == "metrics" and self.get_config().enable_metrics: + if name == "metrics" and self.config.enable_metrics: resources[METRICS_PREFIX] = MetricsResource(RegistryProxy) if name == "replication": @@ -274,9 +274,7 @@ def _configure_named_resource(self, name, compress=False): return resources def start_listening(self, listeners: Iterable[ListenerConfig]): - config = self.get_config() - - if config.redis_enabled: + if self.config.redis_enabled: # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). @@ -284,7 +282,9 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): for listener in listeners: if listener.type == "http": - self._listening_services.extend(self._listener_http(config, listener)) + self._listening_services.extend( + self._listener_http(self.config, listener) + ) elif listener.type == "manhole": _base.listen_manhole( listener.bind_addresses, listener.port, manhole_globals={"hs": self} @@ -298,7 +298,7 @@ def start_listening(self, listeners: Iterable[ListenerConfig]): for s in services: reactor.addSystemEventTrigger("before", "shutdown", s.stopListening) elif listener.type == "metrics": - if not self.get_config().enable_metrics: + if not self.config.enable_metrics: logger.warning( ( "Metrics listener configured, but " diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 40073dc7c2b1..5f18ef77489d 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -501,7 +501,7 @@ async def get_keys( class BaseV2KeyFetcher(KeyFetcher): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() - self.config = hs.get_config() + self.config = hs.config async def process_v2_response( self, from_server: str, response_json: JsonDict, time_added_ms: int diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3ff6479cfbde..b729a69203d0 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -136,7 +136,7 @@ def __init__(self, hs: "HomeServer"): ) # type: ResponseCache[Tuple[str, str]] self._federation_metrics_domains = ( - hs.get_config().federation.federation_metrics_domains + hs.config.federation.federation_metrics_domains ) async def on_backfill_request( diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 12fe3a719bc3..72a635830b9a 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -56,7 +56,7 @@ def __init__(self, hs: "synapse.server.HomeServer"): self._transport_layer = hs.get_federation_transport_client() self._federation_metrics_domains = ( - hs.get_config().federation.federation_metrics_domains + hs.config.federation.federation_metrics_domains ) # HACK to get unique tx id diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/v1/config_resource.py index b20c29f0071e..a1d36e5cf189 100644 --- a/synapse/rest/media/v1/config_resource.py +++ b/synapse/rest/media/v1/config_resource.py @@ -30,7 +30,7 @@ class MediaConfigResource(DirectServeJsonResource): def __init__(self, hs: "HomeServer"): super().__init__() - config = hs.get_config() + config = hs.config self.clock = hs.get_clock() self.auth = hs.get_auth() self.limits_dict = {"m.upload.size": config.max_upload_size} diff --git a/synapse/server.py b/synapse/server.py index 95a2cd2e5d08..42d2fad8e869 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -323,9 +323,6 @@ def get_datastores(self) -> Databases: return self.datastores - def get_config(self) -> HomeServerConfig: - return self.config - @cache_in_self def get_distributor(self) -> Distributor: return Distributor() diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 276f09015ed0..264e10108242 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -109,7 +109,7 @@ def test_openid_listener(self, names, expectation): } # Listen with the config - self.hs._listener_http(self.hs.get_config(), parse_listener_def(config)) + self.hs._listener_http(self.hs.config, parse_listener_def(config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] From 601b893352838c1391da083e8edde62904d23208 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 16 Apr 2021 14:44:55 +0100 Subject: [PATCH 040/222] Small speed up joining large remote rooms (#9825) There are a couple of points in `persist_events` where we are doing a query per event in series, which we can replace. --- changelog.d/9825.misc | 1 + synapse/storage/databases/main/events.py | 54 +++++++++++++++--------- 2 files changed, 34 insertions(+), 21 deletions(-) create mode 100644 changelog.d/9825.misc diff --git a/changelog.d/9825.misc b/changelog.d/9825.misc new file mode 100644 index 000000000000..42f3f1561936 --- /dev/null +++ b/changelog.d/9825.misc @@ -0,0 +1 @@ +Small speed up for joining large remote rooms. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index bed4326d1137..a362521e20e7 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1378,17 +1378,21 @@ def get_internal_metadata(event): ], ) - for event, _ in events_and_contexts: - if not event.internal_metadata.is_redacted(): - # If we're persisting an unredacted event we go and ensure - # that we mark any redactions that reference this event as - # requiring censoring. - self.db_pool.simple_update_txn( - txn, - table="redactions", - keyvalues={"redacts": event.event_id}, - updatevalues={"have_censored": False}, + # If we're persisting an unredacted event we go and ensure + # that we mark any redactions that reference this event as + # requiring censoring. + sql = "UPDATE redactions SET have_censored = ? WHERE redacts = ?" + txn.execute_batch( + sql, + ( + ( + False, + event.event_id, ) + for event, _ in events_and_contexts + if not event.internal_metadata.is_redacted() + ), + ) state_events_and_contexts = [ ec for ec in events_and_contexts if ec[0].is_state() @@ -1881,20 +1885,28 @@ def _set_push_actions_for_event_and_users_txn( ), ) - for event, _ in events_and_contexts: - user_ids = self.db_pool.simple_select_onecol_txn( - txn, - table="event_push_actions_staging", - keyvalues={"event_id": event.event_id}, - retcol="user_id", - ) + room_to_event_ids = {} # type: Dict[str, List[str]] + for e, _ in events_and_contexts: + room_to_event_ids.setdefault(e.room_id, []).append(e.event_id) - for uid in user_ids: - txn.call_after( - self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, - (event.room_id, uid), + for room_id, event_ids in room_to_event_ids.items(): + rows = self.db_pool.simple_select_many_txn( + txn, + table="event_push_actions_staging", + column="event_id", + iterable=event_ids, + keyvalues={}, + retcols=("user_id",), ) + user_ids = {row["user_id"] for row in rows} + + for user_id in user_ids: + txn.call_after( + self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, + (room_id, user_id), + ) + # Now we delete the staging area for *all* events that were being # persisted. txn.execute_batch( From c571736c6ca5d1d2d9bf7cd9b717465d446ac7b3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 16 Apr 2021 18:17:18 +0100 Subject: [PATCH 041/222] User directory: use calculated room membership state instead (#9821) Fixes: #9797. Should help reduce CPU usage on the user directory, especially when memberships change in rooms with lots of state history. --- changelog.d/9821.misc | 1 + synapse/handlers/user_directory.py | 15 ++++++----- synapse/storage/databases/main/roommember.py | 27 ++++++++++++++++++++ 3 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 changelog.d/9821.misc diff --git a/changelog.d/9821.misc b/changelog.d/9821.misc new file mode 100644 index 000000000000..03b2d2ed4dbb --- /dev/null +++ b/changelog.d/9821.misc @@ -0,0 +1 @@ +Reduce CPU usage of the user directory by reusing existing calculated room membership. \ No newline at end of file diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 9b1e6d5c18e8..dacc4f3076e7 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -44,7 +44,6 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.store = hs.get_datastore() - self.state = hs.get_state_handler() self.server_name = hs.hostname self.clock = hs.get_clock() self.notifier = hs.get_notifier() @@ -302,10 +301,12 @@ async def _handle_room_publicity_change( # ignore the change return - users_with_profile = await self.state.get_current_users_in_room(room_id) + other_users_in_room_with_profiles = ( + await self.store.get_users_in_room_with_profiles(room_id) + ) # Remove every user from the sharing tables for that room. - for user_id in users_with_profile.keys(): + for user_id in other_users_in_room_with_profiles.keys(): await self.store.remove_user_who_share_room(user_id, room_id) # Then, re-add them to the tables. @@ -314,7 +315,7 @@ async def _handle_room_publicity_change( # which when ran over an entire room, will result in the same values # being added multiple times. The batching upserts shouldn't make this # too bad, though. - for user_id, profile in users_with_profile.items(): + for user_id, profile in other_users_in_room_with_profiles.items(): await self._handle_new_user(room_id, user_id, profile) async def _handle_new_user( @@ -336,7 +337,7 @@ async def _handle_new_user( room_id ) # Now we update users who share rooms with users. - users_with_profile = await self.state.get_current_users_in_room(room_id) + other_users_in_room = await self.store.get_users_in_room(room_id) if is_public: await self.store.add_users_in_public_rooms(room_id, (user_id,)) @@ -352,14 +353,14 @@ async def _handle_new_user( # We don't care about appservice users. if not is_appservice: - for other_user_id in users_with_profile: + for other_user_id in other_users_in_room: if user_id == other_user_id: continue to_insert.add((user_id, other_user_id)) # Next we need to update for every local user in the room - for other_user_id in users_with_profile: + for other_user_id in other_users_in_room: if user_id == other_user_id: continue diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index ef5587f87a9f..fd525dce65c3 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -173,6 +173,33 @@ def get_users_in_room_txn(self, txn, room_id: str) -> List[str]: txn.execute(sql, (room_id, Membership.JOIN)) return [r[0] for r in txn] + @cached(max_entries=100000, iterable=True) + async def get_users_in_room_with_profiles( + self, room_id: str + ) -> Dict[str, ProfileInfo]: + """Get a mapping from user ID to profile information for all users in a given room. + + Args: + room_id: The ID of the room to retrieve the users of. + + Returns: + A mapping from user ID to ProfileInfo. + """ + + def _get_users_in_room_with_profiles(txn) -> Dict[str, ProfileInfo]: + sql = """ + SELECT user_id, display_name, avatar_url FROM room_memberships + WHERE room_id = ? AND membership = ? + """ + txn.execute(sql, (room_id, Membership.JOIN)) + + return {r[0]: ProfileInfo(display_name=r[1], avatar_url=r[2]) for r in txn} + + return await self.db_pool.runInteraction( + "get_users_in_room_with_profiles", + _get_users_in_room_with_profiles, + ) + @cached(max_entries=100000) async def get_room_summary(self, room_id: str) -> Dict[str, MemberSummary]: """Get the details of a room roughly suitable for use by the room From 2b7dd21655b1ed2db490853d2cdbf6fb38704d81 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 19 Apr 2021 10:50:49 +0100 Subject: [PATCH 042/222] Don't send normal presence updates over federation replication stream (#9828) --- changelog.d/9828.feature | 1 + synapse/federation/send_queue.py | 70 +------------------ synapse/federation/sender/__init__.py | 96 +-------------------------- synapse/handlers/presence.py | 78 +++++++++++++++++----- synapse/module_api/__init__.py | 13 ++-- 5 files changed, 75 insertions(+), 183 deletions(-) create mode 100644 changelog.d/9828.feature diff --git a/changelog.d/9828.feature b/changelog.d/9828.feature new file mode 100644 index 000000000000..f56b0bb3bdeb --- /dev/null +++ b/changelog.d/9828.feature @@ -0,0 +1 @@ +Add experimental support for handling presence on a worker. diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index e3f0bc2471f4..d71f04e43e41 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -76,9 +76,6 @@ def __init__(self, hs: "HomeServer"): # Pending presence map user_id -> UserPresenceState self.presence_map = {} # type: Dict[str, UserPresenceState] - # Stream position -> list[user_id] - self.presence_changed = SortedDict() # type: SortedDict[int, List[str]] - # Stores the destinations we need to explicitly send presence to about a # given user. # Stream position -> (user_id, destinations) @@ -96,7 +93,7 @@ def __init__(self, hs: "HomeServer"): self.edus = SortedDict() # type: SortedDict[int, Edu] - # stream ID for the next entry into presence_changed/keyed_edu_changed/edus. + # stream ID for the next entry into keyed_edu_changed/edus. self.pos = 1 # map from stream ID to the time that stream entry was generated, so that we @@ -117,7 +114,6 @@ def register(name: str, queue: Sized) -> None: for queue_name in [ "presence_map", - "presence_changed", "keyed_edu", "keyed_edu_changed", "edus", @@ -155,23 +151,12 @@ def _clear_queue_before_pos(self, position_to_delete: int) -> None: """Clear all the queues from before a given position""" with Measure(self.clock, "send_queue._clear"): # Delete things out of presence maps - keys = self.presence_changed.keys() - i = self.presence_changed.bisect_left(position_to_delete) - for key in keys[:i]: - del self.presence_changed[key] - - user_ids = { - user_id for uids in self.presence_changed.values() for user_id in uids - } - keys = self.presence_destinations.keys() i = self.presence_destinations.bisect_left(position_to_delete) for key in keys[:i]: del self.presence_destinations[key] - user_ids.update( - user_id for user_id, _ in self.presence_destinations.values() - ) + user_ids = {user_id for user_id, _ in self.presence_destinations.values()} to_del = [ user_id for user_id in self.presence_map if user_id not in user_ids @@ -244,23 +229,6 @@ async def send_read_receipt(self, receipt: ReadReceipt) -> None: """ # nothing to do here: the replication listener will handle it. - def send_presence(self, states: List[UserPresenceState]) -> None: - """As per FederationSender - - Args: - states - """ - pos = self._next_pos() - - # We only want to send presence for our own users, so lets always just - # filter here just in case. - local_states = [s for s in states if self.is_mine_id(s.user_id)] - - self.presence_map.update({state.user_id: state for state in local_states}) - self.presence_changed[pos] = [state.user_id for state in local_states] - - self.notifier.on_new_replication_data() - def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] ) -> None: @@ -325,18 +293,6 @@ async def get_replication_rows( # of the federation stream. rows = [] # type: List[Tuple[int, BaseFederationRow]] - # Fetch changed presence - i = self.presence_changed.bisect_right(from_token) - j = self.presence_changed.bisect_right(to_token) + 1 - dest_user_ids = [ - (pos, user_id) - for pos, user_id_list in self.presence_changed.items()[i:j] - for user_id in user_id_list - ] - - for (key, user_id) in dest_user_ids: - rows.append((key, PresenceRow(state=self.presence_map[user_id]))) - # Fetch presence to send to destinations i = self.presence_destinations.bisect_right(from_token) j = self.presence_destinations.bisect_right(to_token) + 1 @@ -427,22 +383,6 @@ def add_to_buffer(self, buff): raise NotImplementedError() -class PresenceRow( - BaseFederationRow, namedtuple("PresenceRow", ("state",)) # UserPresenceState -): - TypeId = "p" - - @staticmethod - def from_data(data): - return PresenceRow(state=UserPresenceState.from_dict(data)) - - def to_data(self): - return self.state.as_dict() - - def add_to_buffer(self, buff): - buff.presence.append(self.state) - - class PresenceDestinationsRow( BaseFederationRow, namedtuple( @@ -506,7 +446,6 @@ def add_to_buffer(self, buff): _rowtypes = ( - PresenceRow, PresenceDestinationsRow, KeyedEduRow, EduRow, @@ -518,7 +457,6 @@ def add_to_buffer(self, buff): ParsedFederationStreamData = namedtuple( "ParsedFederationStreamData", ( - "presence", # list(UserPresenceState) "presence_destinations", # list of tuples of UserPresenceState and destinations "keyed_edus", # dict of destination -> { key -> Edu } "edus", # dict of destination -> [Edu] @@ -543,7 +481,6 @@ def process_rows_for_federation( # them into the appropriate collection and then send them off. buff = ParsedFederationStreamData( - presence=[], presence_destinations=[], keyed_edus={}, edus={}, @@ -559,9 +496,6 @@ def process_rows_for_federation( parsed_row = RowType.from_data(row.data) parsed_row.add_to_buffer(buff) - if buff.presence: - transaction_queue.send_presence(buff.presence) - for state, destinations in buff.presence_destinations: transaction_queue.send_presence_to_destinations( states=[state], destinations=destinations diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 952ad39f8c2e..6266accaf533 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -24,8 +24,6 @@ from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu -from synapse.handlers.presence import get_interested_remotes -from synapse.logging.context import preserve_fn from synapse.metrics import ( LaterGauge, event_processing_loop_counter, @@ -34,7 +32,7 @@ ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import Collection, JsonDict, ReadReceipt, RoomStreamToken -from synapse.util.metrics import Measure, measure_func +from synapse.util.metrics import Measure if TYPE_CHECKING: from synapse.events.presence_router import PresenceRouter @@ -79,15 +77,6 @@ async def send_read_receipt(self, receipt: ReadReceipt) -> None: """ raise NotImplementedError() - @abc.abstractmethod - def send_presence(self, states: List[UserPresenceState]) -> None: - """Send the new presence states to the appropriate destinations. - - This actually queues up the presence states ready for sending and - triggers a background task to process them and send out the transactions. - """ - raise NotImplementedError() - @abc.abstractmethod def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] @@ -176,11 +165,6 @@ def __init__(self, hs: "HomeServer"): ), ) - # Map of user_id -> UserPresenceState for all the pending presence - # to be sent out by user_id. Entries here get processed and put in - # pending_presence_by_dest - self.pending_presence = {} # type: Dict[str, UserPresenceState] - LaterGauge( "synapse_federation_transaction_queue_pending_pdus", "", @@ -201,8 +185,6 @@ def __init__(self, hs: "HomeServer"): self._is_processing = False self._last_poked_id = -1 - self._processing_pending_presence = False - # map from room_id to a set of PerDestinationQueues which we believe are # awaiting a call to flush_read_receipts_for_room. The presence of an entry # here for a given room means that we are rate-limiting RR flushes to that room, @@ -546,48 +528,6 @@ def _flush_rrs_for_room(self, room_id: str) -> None: for queue in queues: queue.flush_read_receipts_for_room(room_id) - @preserve_fn # the caller should not yield on this - async def send_presence(self, states: List[UserPresenceState]) -> None: - """Send the new presence states to the appropriate destinations. - - This actually queues up the presence states ready for sending and - triggers a background task to process them and send out the transactions. - """ - if not self.hs.config.use_presence: - # No-op if presence is disabled. - return - - # First we queue up the new presence by user ID, so multiple presence - # updates in quick succession are correctly handled. - # We only want to send presence for our own users, so lets always just - # filter here just in case. - self.pending_presence.update( - {state.user_id: state for state in states if self.is_mine_id(state.user_id)} - ) - - # We then handle the new pending presence in batches, first figuring - # out the destinations we need to send each state to and then poking it - # to attempt a new transaction. We linearize this so that we don't - # accidentally mess up the ordering and send multiple presence updates - # in the wrong order - if self._processing_pending_presence: - return - - self._processing_pending_presence = True - try: - while True: - states_map = self.pending_presence - self.pending_presence = {} - - if not states_map: - break - - await self._process_presence_inner(list(states_map.values())) - except Exception: - logger.exception("Error sending presence states to servers") - finally: - self._processing_pending_presence = False - def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] ) -> None: @@ -608,40 +548,6 @@ def send_presence_to_destinations( continue self._get_per_destination_queue(destination).send_presence(states) - @measure_func("txnqueue._process_presence") - async def _process_presence_inner(self, states: List[UserPresenceState]) -> None: - """Given a list of states populate self.pending_presence_by_dest and - poke to send a new transaction to each destination - """ - # We pull the presence router here instead of __init__ - # to prevent a dependency cycle: - # - # AuthHandler -> Notifier -> FederationSender - # -> PresenceRouter -> ModuleApi -> AuthHandler - if self._presence_router is None: - self._presence_router = self.hs.get_presence_router() - - assert self._presence_router is not None - - hosts_and_states = await get_interested_remotes( - self.store, - self._presence_router, - states, - self.state, - ) - - for destinations, states in hosts_and_states: - for destination in destinations: - if destination == self.server_name: - continue - - if not self._federation_shard_config.should_handle( - self._instance_name, destination - ): - continue - - self._get_per_destination_queue(destination).send_presence(states) - def build_and_send_edu( self, destination: str, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index e120dd1f4860..6460eb99521c 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -123,6 +123,14 @@ class BasePresenceHandler(abc.ABC): def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.store = hs.get_datastore() + self.presence_router = hs.get_presence_router() + self.state = hs.get_state_handler() + + self._federation = None + if hs.should_send_federation() or not hs.config.worker_app: + self._federation = hs.get_federation_sender() + + self._send_federation = hs.should_send_federation() self._busy_presence_enabled = hs.config.experimental.msc3026_enabled @@ -249,6 +257,29 @@ async def process_replication_rows(self, token, rows): """Process presence stream rows received over replication.""" pass + async def maybe_send_presence_to_interested_destinations( + self, states: List[UserPresenceState] + ): + """If this instance is a federation sender, send the states to all + destinations that are interested. + """ + + if not self._send_federation: + return + + # If this worker sends federation we must have a FederationSender. + assert self._federation + + hosts_and_states = await get_interested_remotes( + self.store, + self.presence_router, + states, + self.state, + ) + + for destinations, states in hosts_and_states: + self._federation.send_presence_to_destinations(states, destinations) + class _NullContextManager(ContextManager[None]): """A context manager which does nothing.""" @@ -263,7 +294,6 @@ def __init__(self, hs): self.hs = hs self.is_mine_id = hs.is_mine_id - self.presence_router = hs.get_presence_router() self._presence_enabled = hs.config.use_presence # The number of ongoing syncs on this process, by user id. @@ -388,6 +418,9 @@ async def notify_from_replication(self, states, stream_id): users=users_to_states.keys(), ) + # If this is a federation sender, notify about presence updates. + await self.maybe_send_presence_to_interested_destinations(states) + async def process_replication_rows(self, token, rows): states = [ UserPresenceState( @@ -463,9 +496,6 @@ def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname self.wheel_timer = WheelTimer() self.notifier = hs.get_notifier() - self.federation = hs.get_federation_sender() - self.state = hs.get_state_handler() - self.presence_router = hs.get_presence_router() self._presence_enabled = hs.config.use_presence federation_registry = hs.get_federation_registry() @@ -672,6 +702,13 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: self.unpersisted_users_changes |= {s.user_id for s in new_states} self.unpersisted_users_changes -= set(to_notify.keys()) + # Check if we need to resend any presence states to remote hosts. We + # only do this for states that haven't been updated in a while to + # ensure that the remote host doesn't time the presence state out. + # + # Note that since these are states that have *not* been updated, + # they won't get sent down the normal presence replication stream, + # and so we have to explicitly send them via the federation stream. to_federation_ping = { user_id: state for user_id, state in to_federation_ping.items() @@ -680,7 +717,19 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: if to_federation_ping: federation_presence_out_counter.inc(len(to_federation_ping)) - self._push_to_remotes(to_federation_ping.values()) + hosts_and_states = await get_interested_remotes( + self.store, + self.presence_router, + list(to_federation_ping.values()), + self.state, + ) + + # Since this is master we know that we have a federation sender or + # queue, and so this will be defined. + assert self._federation + + for destinations, states in hosts_and_states: + self._federation.send_presence_to_destinations(states, destinations) async def _handle_timeouts(self): """Checks the presence of users that have timed out and updates as @@ -920,15 +969,10 @@ async def _persist_and_notify(self, states): users=[UserID.from_string(u) for u in users_to_states], ) - self._push_to_remotes(states) - - def _push_to_remotes(self, states): - """Sends state updates to remote servers. - - Args: - states (list(UserPresenceState)) - """ - self.federation.send_presence(states) + # We only want to poke the local federation sender, if any, as other + # workers will receive the presence updates via the presence replication + # stream (which is updated by `store.update_presence`). + await self.maybe_send_presence_to_interested_destinations(states) async def incoming_presence(self, origin, content): """Called when we receive a `m.presence` EDU from a remote server.""" @@ -1164,9 +1208,13 @@ async def _handle_state_delta(self, deltas): user_presence_states ) + # Since this is master we know that we have a federation sender or + # queue, and so this will be defined. + assert self._federation + # Send out user presence updates for each destination for destination, user_state_set in presence_destinations.items(): - self.federation.send_presence_to_destinations( + self._federation.send_presence_to_destinations( destinations=[destination], states=user_state_set ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index b7dbbfc27c18..a1a2b9aeccd3 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -50,6 +50,7 @@ def __init__(self, hs, auth_handler): self._auth_handler = auth_handler self._server_name = hs.hostname self._presence_stream = hs.get_event_sources().sources["presence"] + self._state = hs.get_state_handler() # We expose these as properties below in order to attach a helpful docstring. self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient @@ -429,11 +430,13 @@ async def send_local_online_presence_to(self, users: Iterable[str]) -> None: UserID.from_string(user), from_key=None, include_offline=False ) - # Send to remote destinations - await make_deferred_yieldable( - # We pull the federation sender here as we can only do so on workers - # that support sending presence - self._hs.get_federation_sender().send_presence(presence_events) + # Send to remote destinations. + + # We pull out the presence handler here to break a cyclic + # dependency between the presence router and module API. + presence_handler = self._hs.get_presence_handler() + await presence_handler.maybe_send_presence_to_interested_destinations( + presence_events ) From e694a598f8c948ad177e897c5bedaa71a47add29 Mon Sep 17 00:00:00 2001 From: Denis Kasak Date: Mon, 19 Apr 2021 16:21:46 +0000 Subject: [PATCH 043/222] Sanity check identity server passed to bind/unbind. (#9802) Signed-off-by: Denis Kasak --- changelog.d/9802.bugfix | 1 + synapse/handlers/identity.py | 29 ++++++++++++++++++++++++++--- synapse/util/stringutils.py | 32 ++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9802.bugfix diff --git a/changelog.d/9802.bugfix b/changelog.d/9802.bugfix new file mode 100644 index 000000000000..0c72f7be473f --- /dev/null +++ b/changelog.d/9802.bugfix @@ -0,0 +1 @@ +Add some sanity checks to identity server passed to 3PID bind/unbind endpoints. diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 87a8b89237e7..0b3b1fadb5df 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -15,7 +15,6 @@ # limitations under the License. """Utilities for interacting with Identity Servers""" - import logging import urllib.parse from typing import Awaitable, Callable, Dict, List, Optional, Tuple @@ -34,7 +33,11 @@ from synapse.types import JsonDict, Requester from synapse.util import json_decoder from synapse.util.hash import sha256_and_url_safe_base64 -from synapse.util.stringutils import assert_valid_client_secret, random_string +from synapse.util.stringutils import ( + assert_valid_client_secret, + random_string, + valid_id_server_location, +) from ._base import BaseHandler @@ -172,6 +175,11 @@ async def bind_threepid( server with, if necessary. Required if use_v2 is true use_v2: Whether to use v2 Identity Service API endpoints. Defaults to True + Raises: + SynapseError: On any of the following conditions + - the supplied id_server is not a valid identity server name + - we failed to contact the supplied identity server + Returns: The response from the identity server """ @@ -181,6 +189,12 @@ async def bind_threepid( if id_access_token is None: use_v2 = False + if not valid_id_server_location(id_server): + raise SynapseError( + 400, + "id_server must be a valid hostname with optional port and path components", + ) + # Decide which API endpoint URLs to use headers = {} bind_data = {"sid": sid, "client_secret": client_secret, "mxid": mxid} @@ -269,12 +283,21 @@ async def try_unbind_threepid_with_id_server( id_server: Identity server to unbind from Raises: - SynapseError: If we failed to contact the identity server + SynapseError: On any of the following conditions + - the supplied id_server is not a valid identity server name + - we failed to contact the supplied identity server Returns: True on success, otherwise False if the identity server doesn't support unbinding """ + + if not valid_id_server_location(id_server): + raise SynapseError( + 400, + "id_server must be a valid hostname with optional port and path components", + ) + url = "https://%s/_matrix/identity/api/v1/3pid/unbind" % (id_server,) url_bytes = "/_matrix/identity/api/v1/3pid/unbind".encode("ascii") diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index c0e6fb9a60a2..cd82777f80eb 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -132,6 +132,38 @@ def parse_and_validate_server_name(server_name: str) -> Tuple[str, Optional[int] return host, port +def valid_id_server_location(id_server: str) -> bool: + """Check whether an identity server location, such as the one passed as the + `id_server` parameter to `/_matrix/client/r0/account/3pid/bind`, is valid. + + A valid identity server location consists of a valid hostname and optional + port number, optionally followed by any number of `/` delimited path + components, without any fragment or query string parts. + + Args: + id_server: identity server location string to validate + + Returns: + True if valid, False otherwise. + """ + + components = id_server.split("/", 1) + + host = components[0] + + try: + parse_and_validate_server_name(host) + except ValueError: + return False + + if len(components) < 2: + # no path + return True + + path = components[1] + return "#" not in path and "?" not in path + + def parse_and_validate_mxc_uri(mxc: str) -> Tuple[str, Optional[int], str]: """Parse the given string as an MXC URI From 71f0623de968f07292d5a092e9197f7513ab6cde Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 19 Apr 2021 19:16:34 +0100 Subject: [PATCH 044/222] Port "Allow users to click account renewal links multiple times without hitting an 'Invalid Token' page #74" from synapse-dinsic (#9832) This attempts to be a direct port of https://github.com/matrix-org/synapse-dinsic/pull/74 to mainline. There was some fiddling required to deal with the changes that have been made to mainline since (mainly dealing with the split of `RegistrationWorkerStore` from `RegistrationStore`, and the changes made to `self.make_request` in test code). --- UPGRADE.rst | 23 +++ changelog.d/9832.feature | 1 + docs/sample_config.yaml | 148 +++++++++------- synapse/api/auth.py | 6 +- synapse/config/_base.pyi | 2 + synapse/config/account_validity.py | 165 ++++++++++++++++++ synapse/config/emailconfig.py | 2 +- synapse/config/homeserver.py | 3 +- synapse/config/registration.py | 129 -------------- synapse/handlers/account_validity.py | 101 ++++++++--- synapse/handlers/deactivate_account.py | 4 +- synapse/push/pusherpool.py | 8 +- .../templates/account_previously_renewed.html | 1 + synapse/res/templates/account_renewed.html | 2 +- .../rest/client/v2_alpha/account_validity.py | 32 +++- .../storage/databases/main/registration.py | 62 +++++-- .../12account_validity_token_used_ts_ms.sql | 18 ++ tests/rest/client/v2_alpha/test_register.py | 52 ++++-- 18 files changed, 496 insertions(+), 263 deletions(-) create mode 100644 changelog.d/9832.feature create mode 100644 synapse/config/account_validity.py create mode 100644 synapse/res/templates/account_previously_renewed.html create mode 100644 synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql diff --git a/UPGRADE.rst b/UPGRADE.rst index 665821d4ef04..eff976017dd5 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -85,6 +85,29 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.33.0 +==================== + +Account Validity HTML templates can now display a user's expiration date +------------------------------------------------------------------------ + +This may affect you if you have enabled the account validity feature, and have made use of a +custom HTML template specified by the ``account_validity.template_dir`` or ``account_validity.account_renewed_html_path`` +Synapse config options. + +The template can now accept an ``expiration_ts`` variable, which represents the unix timestamp in milliseconds for the +future date of which their account has been renewed until. See the +`default template `_ +for an example of usage. + +ALso note that a new HTML template, ``account_previously_renewed.html``, has been added. This is is shown to users +when they attempt to renew their account with a valid renewal token that has already been used before. The default +template contents can been found +`here `_, +and can also accept an ``expiration_ts`` variable. This template replaces the error message users would previously see +upon attempting to use a valid renewal token more than once. + + Upgrading to v1.32.0 ==================== diff --git a/changelog.d/9832.feature b/changelog.d/9832.feature new file mode 100644 index 000000000000..e76395fbe886 --- /dev/null +++ b/changelog.d/9832.feature @@ -0,0 +1 @@ +Don't return an error when a user attempts to renew their account multiple times with the same token. Instead, state when their account is set to expire. This change concerns the optional account validity feature. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 9182dcd98716..d260d762590a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1175,69 +1175,6 @@ url_preview_accept_language: # #enable_registration: false -# Optional account validity configuration. This allows for accounts to be denied -# any request after a given period. -# -# Once this feature is enabled, Synapse will look for registered users without an -# expiration date at startup and will add one to every account it found using the -# current settings at that time. -# This means that, if a validity period is set, and Synapse is restarted (it will -# then derive an expiration date from the current validity period), and some time -# after that the validity period changes and Synapse is restarted, the users' -# expiration dates won't be updated unless their account is manually renewed. This -# date will be randomly selected within a range [now + period - d ; now + period], -# where d is equal to 10% of the validity period. -# -account_validity: - # The account validity feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # The period after which an account is valid after its registration. When - # renewing the account, its validity period will be extended by this amount - # of time. This parameter is required when using the account validity - # feature. - # - #period: 6w - - # The amount of time before an account's expiry date at which Synapse will - # send an email to the account's email address with a renewal link. By - # default, no such emails are sent. - # - # If you enable this setting, you will also need to fill out the 'email' and - # 'public_baseurl' configuration sections. - # - #renew_at: 1w - - # The subject of the email sent out with the renewal link. '%(app)s' can be - # used as a placeholder for the 'app_name' parameter from the 'email' - # section. - # - # Note that the placeholder must be written '%(app)s', including the - # trailing 's'. - # - # If this is not set, a default value is used. - # - #renew_email_subject: "Renew your %(app)s account" - - # Directory in which Synapse will try to find templates for the HTML files to - # serve to the user when trying to renew an account. If not set, default - # templates from within the Synapse package will be used. - # - #template_dir: "res/templates" - - # File within 'template_dir' giving the HTML to be displayed to the user after - # they successfully renewed their account. If not set, default text is used. - # - #account_renewed_html_path: "account_renewed.html" - - # File within 'template_dir' giving the HTML to be displayed when the user - # tries to renew an account with an invalid renewal token. If not set, - # default text is used. - # - #invalid_token_html_path: "invalid_token.html" - # Time that a user's session remains valid for, after they log in. # # Note that this is not currently compatible with guest logins. @@ -1432,6 +1369,91 @@ account_threepid_delegates: #auto_join_rooms_for_guests: false +## Account Validity ## + +# Optional account validity configuration. This allows for accounts to be denied +# any request after a given period. +# +# Once this feature is enabled, Synapse will look for registered users without an +# expiration date at startup and will add one to every account it found using the +# current settings at that time. +# This means that, if a validity period is set, and Synapse is restarted (it will +# then derive an expiration date from the current validity period), and some time +# after that the validity period changes and Synapse is restarted, the users' +# expiration dates won't be updated unless their account is manually renewed. This +# date will be randomly selected within a range [now + period - d ; now + period], +# where d is equal to 10% of the validity period. +# +account_validity: + # The account validity feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # The period after which an account is valid after its registration. When + # renewing the account, its validity period will be extended by this amount + # of time. This parameter is required when using the account validity + # feature. + # + #period: 6w + + # The amount of time before an account's expiry date at which Synapse will + # send an email to the account's email address with a renewal link. By + # default, no such emails are sent. + # + # If you enable this setting, you will also need to fill out the 'email' and + # 'public_baseurl' configuration sections. + # + #renew_at: 1w + + # The subject of the email sent out with the renewal link. '%(app)s' can be + # used as a placeholder for the 'app_name' parameter from the 'email' + # section. + # + # Note that the placeholder must be written '%(app)s', including the + # trailing 's'. + # + # If this is not set, a default value is used. + # + #renew_email_subject: "Renew your %(app)s account" + + # Directory in which Synapse will try to find templates for the HTML files to + # serve to the user when trying to renew an account. If not set, default + # templates from within the Synapse package will be used. + # + # The currently available templates are: + # + # * account_renewed.html: Displayed to the user after they have successfully + # renewed their account. + # + # * account_previously_renewed.html: Displayed to the user if they attempt to + # renew their account with a token that is valid, but that has already + # been used. In this case the account is not renewed again. + # + # * invalid_token.html: Displayed to the user when they try to renew an account + # with an unknown or invalid renewal token. + # + # See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for + # default template contents. + # + # The file name of some of these templates can be configured below for legacy + # reasons. + # + #template_dir: "res/templates" + + # A custom file name for the 'account_renewed.html' template. + # + # If not set, the file is assumed to be named "account_renewed.html". + # + #account_renewed_html_path: "account_renewed.html" + + # A custom file name for the 'invalid_token.html' template. + # + # If not set, the file is assumed to be named "invalid_token.html". + # + #invalid_token_html_path: "invalid_token.html" + + ## Metrics ### # Enable collection and rendering of performance metrics diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 6c13f53957c4..872fd100cdd0 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -79,7 +79,9 @@ def __init__(self, hs): self._auth_blocking = AuthBlocking(self.hs) - self._account_validity = hs.config.account_validity + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) self._track_appservice_user_ips = hs.config.track_appservice_user_ips self._macaroon_secret_key = hs.config.macaroon_secret_key @@ -222,7 +224,7 @@ async def get_user_by_req( shadow_banned = user_info.shadow_banned # Deny the request if the user account has expired. - if self._account_validity.enabled and not allow_expired: + if self._account_validity_enabled and not allow_expired: if await self.store.is_account_expired( user_info.user_id, self.clock.time_msec() ): diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index e896fd34e23c..ddec356a07b3 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -1,6 +1,7 @@ from typing import Any, Iterable, List, Optional from synapse.config import ( + account_validity, api, appservice, auth, @@ -59,6 +60,7 @@ class RootConfig: captcha: captcha.CaptchaConfig voip: voip.VoipConfig registration: registration.RegistrationConfig + account_validity: account_validity.AccountValidityConfig metrics: metrics.MetricsConfig api: api.ApiConfig appservice: appservice.AppServiceConfig diff --git a/synapse/config/account_validity.py b/synapse/config/account_validity.py new file mode 100644 index 000000000000..c58a7d95a785 --- /dev/null +++ b/synapse/config/account_validity.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.config._base import Config, ConfigError + + +class AccountValidityConfig(Config): + section = "account_validity" + + def read_config(self, config, **kwargs): + account_validity_config = config.get("account_validity") or {} + self.account_validity_enabled = account_validity_config.get("enabled", False) + self.account_validity_renew_by_email_enabled = ( + "renew_at" in account_validity_config + ) + + if self.account_validity_enabled: + if "period" in account_validity_config: + self.account_validity_period = self.parse_duration( + account_validity_config["period"] + ) + else: + raise ConfigError("'period' is required when using account validity") + + if "renew_at" in account_validity_config: + self.account_validity_renew_at = self.parse_duration( + account_validity_config["renew_at"] + ) + + if "renew_email_subject" in account_validity_config: + self.account_validity_renew_email_subject = account_validity_config[ + "renew_email_subject" + ] + else: + self.account_validity_renew_email_subject = "Renew your %(app)s account" + + self.account_validity_startup_job_max_delta = ( + self.account_validity_period * 10.0 / 100.0 + ) + + if self.account_validity_renew_by_email_enabled: + if not self.public_baseurl: + raise ConfigError("Can't send renewal emails without 'public_baseurl'") + + # Load account validity templates. + account_validity_template_dir = account_validity_config.get("template_dir") + + account_renewed_template_filename = account_validity_config.get( + "account_renewed_html_path", "account_renewed.html" + ) + invalid_token_template_filename = account_validity_config.get( + "invalid_token_html_path", "invalid_token.html" + ) + + # Read and store template content + ( + self.account_validity_account_renewed_template, + self.account_validity_account_previously_renewed_template, + self.account_validity_invalid_token_template, + ) = self.read_templates( + [ + account_renewed_template_filename, + "account_previously_renewed.html", + invalid_token_template_filename, + ], + account_validity_template_dir, + ) + + def generate_config_section(self, **kwargs): + return """\ + ## Account Validity ## + + # Optional account validity configuration. This allows for accounts to be denied + # any request after a given period. + # + # Once this feature is enabled, Synapse will look for registered users without an + # expiration date at startup and will add one to every account it found using the + # current settings at that time. + # This means that, if a validity period is set, and Synapse is restarted (it will + # then derive an expiration date from the current validity period), and some time + # after that the validity period changes and Synapse is restarted, the users' + # expiration dates won't be updated unless their account is manually renewed. This + # date will be randomly selected within a range [now + period - d ; now + period], + # where d is equal to 10% of the validity period. + # + account_validity: + # The account validity feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # The period after which an account is valid after its registration. When + # renewing the account, its validity period will be extended by this amount + # of time. This parameter is required when using the account validity + # feature. + # + #period: 6w + + # The amount of time before an account's expiry date at which Synapse will + # send an email to the account's email address with a renewal link. By + # default, no such emails are sent. + # + # If you enable this setting, you will also need to fill out the 'email' and + # 'public_baseurl' configuration sections. + # + #renew_at: 1w + + # The subject of the email sent out with the renewal link. '%(app)s' can be + # used as a placeholder for the 'app_name' parameter from the 'email' + # section. + # + # Note that the placeholder must be written '%(app)s', including the + # trailing 's'. + # + # If this is not set, a default value is used. + # + #renew_email_subject: "Renew your %(app)s account" + + # Directory in which Synapse will try to find templates for the HTML files to + # serve to the user when trying to renew an account. If not set, default + # templates from within the Synapse package will be used. + # + # The currently available templates are: + # + # * account_renewed.html: Displayed to the user after they have successfully + # renewed their account. + # + # * account_previously_renewed.html: Displayed to the user if they attempt to + # renew their account with a token that is valid, but that has already + # been used. In this case the account is not renewed again. + # + # * invalid_token.html: Displayed to the user when they try to renew an account + # with an unknown or invalid renewal token. + # + # See https://github.com/matrix-org/synapse/tree/master/synapse/res/templates for + # default template contents. + # + # The file name of some of these templates can be configured below for legacy + # reasons. + # + #template_dir: "res/templates" + + # A custom file name for the 'account_renewed.html' template. + # + # If not set, the file is assumed to be named "account_renewed.html". + # + #account_renewed_html_path: "account_renewed.html" + + # A custom file name for the 'invalid_token.html' template. + # + # If not set, the file is assumed to be named "invalid_token.html". + # + #invalid_token_html_path: "invalid_token.html" + """ diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index c587939c7a7a..5564d7d097d4 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -299,7 +299,7 @@ def read_config(self, config, **kwargs): "client_base_url", email_config.get("riot_base_url", None) ) - if self.account_validity.renew_by_email_enabled: + if self.account_validity_renew_by_email_enabled: expiry_template_html = email_config.get( "expiry_template_html", "notice_expiry.html" ) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 130953506825..58e3bcd5111d 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -12,8 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - from ._base import RootConfig +from .account_validity import AccountValidityConfig from .api import ApiConfig from .appservice import AppServiceConfig from .auth import AuthConfig @@ -68,6 +68,7 @@ class HomeServerConfig(RootConfig): CaptchaConfig, VoipConfig, RegistrationConfig, + AccountValidityConfig, MetricsConfig, ApiConfig, AppServiceConfig, diff --git a/synapse/config/registration.py b/synapse/config/registration.py index f8a2768af835..e6f52b4f40ea 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -12,74 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import pkg_resources - from synapse.api.constants import RoomCreationPreset from synapse.config._base import Config, ConfigError from synapse.types import RoomAlias, UserID from synapse.util.stringutils import random_string_with_symbols, strtobool -class AccountValidityConfig(Config): - section = "accountvalidity" - - def __init__(self, config, synapse_config): - if config is None: - return - super().__init__() - self.enabled = config.get("enabled", False) - self.renew_by_email_enabled = "renew_at" in config - - if self.enabled: - if "period" in config: - self.period = self.parse_duration(config["period"]) - else: - raise ConfigError("'period' is required when using account validity") - - if "renew_at" in config: - self.renew_at = self.parse_duration(config["renew_at"]) - - if "renew_email_subject" in config: - self.renew_email_subject = config["renew_email_subject"] - else: - self.renew_email_subject = "Renew your %(app)s account" - - self.startup_job_max_delta = self.period * 10.0 / 100.0 - - if self.renew_by_email_enabled: - if "public_baseurl" not in synapse_config: - raise ConfigError("Can't send renewal emails without 'public_baseurl'") - - template_dir = config.get("template_dir") - - if not template_dir: - template_dir = pkg_resources.resource_filename("synapse", "res/templates") - - if "account_renewed_html_path" in config: - file_path = os.path.join(template_dir, config["account_renewed_html_path"]) - - self.account_renewed_html_content = self.read_file( - file_path, "account_validity.account_renewed_html_path" - ) - else: - self.account_renewed_html_content = ( - "Your account has been successfully renewed." - ) - - if "invalid_token_html_path" in config: - file_path = os.path.join(template_dir, config["invalid_token_html_path"]) - - self.invalid_token_html_content = self.read_file( - file_path, "account_validity.invalid_token_html_path" - ) - else: - self.invalid_token_html_content = ( - "Invalid renewal token." - ) - - class RegistrationConfig(Config): section = "registration" @@ -92,10 +30,6 @@ def read_config(self, config, **kwargs): str(config["disable_registration"]) ) - self.account_validity = AccountValidityConfig( - config.get("account_validity") or {}, config - ) - self.registrations_require_3pid = config.get("registrations_require_3pid", []) self.allowed_local_3pids = config.get("allowed_local_3pids", []) self.enable_3pid_lookup = config.get("enable_3pid_lookup", True) @@ -207,69 +141,6 @@ def generate_config_section(self, generate_secrets=False, **kwargs): # #enable_registration: false - # Optional account validity configuration. This allows for accounts to be denied - # any request after a given period. - # - # Once this feature is enabled, Synapse will look for registered users without an - # expiration date at startup and will add one to every account it found using the - # current settings at that time. - # This means that, if a validity period is set, and Synapse is restarted (it will - # then derive an expiration date from the current validity period), and some time - # after that the validity period changes and Synapse is restarted, the users' - # expiration dates won't be updated unless their account is manually renewed. This - # date will be randomly selected within a range [now + period - d ; now + period], - # where d is equal to 10%% of the validity period. - # - account_validity: - # The account validity feature is disabled by default. Uncomment the - # following line to enable it. - # - #enabled: true - - # The period after which an account is valid after its registration. When - # renewing the account, its validity period will be extended by this amount - # of time. This parameter is required when using the account validity - # feature. - # - #period: 6w - - # The amount of time before an account's expiry date at which Synapse will - # send an email to the account's email address with a renewal link. By - # default, no such emails are sent. - # - # If you enable this setting, you will also need to fill out the 'email' and - # 'public_baseurl' configuration sections. - # - #renew_at: 1w - - # The subject of the email sent out with the renewal link. '%%(app)s' can be - # used as a placeholder for the 'app_name' parameter from the 'email' - # section. - # - # Note that the placeholder must be written '%%(app)s', including the - # trailing 's'. - # - # If this is not set, a default value is used. - # - #renew_email_subject: "Renew your %%(app)s account" - - # Directory in which Synapse will try to find templates for the HTML files to - # serve to the user when trying to renew an account. If not set, default - # templates from within the Synapse package will be used. - # - #template_dir: "res/templates" - - # File within 'template_dir' giving the HTML to be displayed to the user after - # they successfully renewed their account. If not set, default text is used. - # - #account_renewed_html_path: "account_renewed.html" - - # File within 'template_dir' giving the HTML to be displayed when the user - # tries to renew an account with an invalid renewal token. If not set, - # default text is used. - # - #invalid_token_html_path: "invalid_token.html" - # Time that a user's session remains valid for, after they log in. # # Note that this is not currently compatible with guest logins. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 66ce7e8b8318..5b927f10b3a9 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -17,7 +17,7 @@ import logging from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.errors import StoreError, SynapseError from synapse.logging.context import make_deferred_yieldable @@ -39,28 +39,44 @@ def __init__(self, hs: "HomeServer"): self.sendmail = self.hs.get_sendmail() self.clock = self.hs.get_clock() - self._account_validity = self.hs.config.account_validity + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) + self._account_validity_renew_by_email_enabled = ( + hs.config.account_validity.account_validity_renew_by_email_enabled + ) + + self._account_validity_period = None + if self._account_validity_enabled: + self._account_validity_period = ( + hs.config.account_validity.account_validity_period + ) if ( - self._account_validity.enabled - and self._account_validity.renew_by_email_enabled + self._account_validity_enabled + and self._account_validity_renew_by_email_enabled ): # Don't do email-specific configuration if renewal by email is disabled. - self._template_html = self.config.account_validity_template_html - self._template_text = self.config.account_validity_template_text + self._template_html = ( + hs.config.account_validity.account_validity_template_html + ) + self._template_text = ( + hs.config.account_validity.account_validity_template_text + ) + account_validity_renew_email_subject = ( + hs.config.account_validity.account_validity_renew_email_subject + ) try: - app_name = self.hs.config.email_app_name + app_name = hs.config.email_app_name - self._subject = self._account_validity.renew_email_subject % { - "app": app_name - } + self._subject = account_validity_renew_email_subject % {"app": app_name} - self._from_string = self.hs.config.email_notif_from % {"app": app_name} + self._from_string = hs.config.email_notif_from % {"app": app_name} except Exception: # If substitution failed, fall back to the bare strings. - self._subject = self._account_validity.renew_email_subject - self._from_string = self.hs.config.email_notif_from + self._subject = account_validity_renew_email_subject + self._from_string = hs.config.email_notif_from self._raw_from = email.utils.parseaddr(self._from_string)[1] @@ -220,50 +236,87 @@ async def _get_renewal_token(self, user_id: str) -> str: attempts += 1 raise StoreError(500, "Couldn't generate a unique string as refresh string.") - async def renew_account(self, renewal_token: str) -> bool: + async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]: """Renews the account attached to a given renewal token by pushing back the expiration date by the current validity period in the server's configuration. + If it turns out that the token is valid but has already been used, then the + token is considered stale. A token is stale if the 'token_used_ts_ms' db column + is non-null. + Args: renewal_token: Token sent with the renewal request. Returns: - Whether the provided token is valid. + A tuple containing: + * A bool representing whether the token is valid and unused. + * A bool which is `True` if the token is valid, but stale. + * An int representing the user's expiry timestamp as milliseconds since the + epoch, or 0 if the token was invalid. """ try: - user_id = await self.store.get_user_from_renewal_token(renewal_token) + ( + user_id, + current_expiration_ts, + token_used_ts, + ) = await self.store.get_user_from_renewal_token(renewal_token) except StoreError: - return False + return False, False, 0 + + # Check whether this token has already been used. + if token_used_ts: + logger.info( + "User '%s' attempted to use previously used token '%s' to renew account", + user_id, + renewal_token, + ) + return False, True, current_expiration_ts logger.debug("Renewing an account for user %s", user_id) - await self.renew_account_for_user(user_id) - return True + # Renew the account. Pass the renewal_token here so that it is not cleared. + # We want to keep the token around in case the user attempts to renew their + # account with the same token twice (clicking the email link twice). + # + # In that case, the token will be accepted, but the account's expiration ts + # will remain unchanged. + new_expiration_ts = await self.renew_account_for_user( + user_id, renewal_token=renewal_token + ) + + return True, False, new_expiration_ts async def renew_account_for_user( self, user_id: str, expiration_ts: Optional[int] = None, email_sent: bool = False, + renewal_token: Optional[str] = None, ) -> int: """Renews the account attached to a given user by pushing back the expiration date by the current validity period in the server's configuration. Args: - renewal_token: Token sent with the renewal request. + user_id: The ID of the user to renew. expiration_ts: New expiration date. Defaults to now + validity period. - email_sen: Whether an email has been sent for this validity period. - Defaults to False. + email_sent: Whether an email has been sent for this validity period. + renewal_token: Token sent with the renewal request. The user's token + will be cleared if this is None. Returns: New expiration date for this account, as a timestamp in milliseconds since epoch. """ + now = self.clock.time_msec() if expiration_ts is None: - expiration_ts = self.clock.time_msec() + self._account_validity.period + expiration_ts = now + self._account_validity_period await self.store.set_account_validity_for_user( - user_id=user_id, expiration_ts=expiration_ts, email_sent=email_sent + user_id=user_id, + expiration_ts=expiration_ts, + email_sent=email_sent, + renewal_token=renewal_token, + token_used_ts=now, ) return expiration_ts diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 3f6f9f7f3d57..45d2404ddebf 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -49,7 +49,9 @@ def __init__(self, hs: "HomeServer"): if hs.config.run_background_tasks: hs.get_reactor().callWhenRunning(self._start_user_parting) - self._account_validity_enabled = hs.config.account_validity.enabled + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) async def deactivate_account( self, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 564a5ed0df53..579fcdf47250 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -62,7 +62,9 @@ def __init__(self, hs: "HomeServer"): self.store = self.hs.get_datastore() self.clock = self.hs.get_clock() - self._account_validity = hs.config.account_validity + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) # We shard the handling of push notifications by user ID. self._pusher_shard_config = hs.config.push.pusher_shard_config @@ -236,7 +238,7 @@ async def _on_new_notifications(self, max_token: RoomStreamToken) -> None: for u in users_affected: # Don't push if the user account has expired - if self._account_validity.enabled: + if self._account_validity_enabled: expired = await self.store.is_account_expired( u, self.clock.time_msec() ) @@ -266,7 +268,7 @@ async def on_new_receipts( for u in users_affected: # Don't push if the user account has expired - if self._account_validity.enabled: + if self._account_validity_enabled: expired = await self.store.is_account_expired( u, self.clock.time_msec() ) diff --git a/synapse/res/templates/account_previously_renewed.html b/synapse/res/templates/account_previously_renewed.html new file mode 100644 index 000000000000..b751359bdfb7 --- /dev/null +++ b/synapse/res/templates/account_previously_renewed.html @@ -0,0 +1 @@ +Your account is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. diff --git a/synapse/res/templates/account_renewed.html b/synapse/res/templates/account_renewed.html index 894da030afb7..e8c0f52f0542 100644 --- a/synapse/res/templates/account_renewed.html +++ b/synapse/res/templates/account_renewed.html @@ -1 +1 @@ -Your account has been successfully renewed. +Your account has been successfully renewed and is valid until {{ expiration_ts|format_ts("%d-%m-%Y") }}. diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index 0ad07fb89526..2d1ad3d3fbf0 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -36,24 +36,40 @@ def __init__(self, hs): self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() self.auth = hs.get_auth() - self.success_html = hs.config.account_validity.account_renewed_html_content - self.failure_html = hs.config.account_validity.invalid_token_html_content + self.account_renewed_template = ( + hs.config.account_validity.account_validity_account_renewed_template + ) + self.account_previously_renewed_template = ( + hs.config.account_validity.account_validity_account_previously_renewed_template + ) + self.invalid_token_template = ( + hs.config.account_validity.account_validity_invalid_token_template + ) async def on_GET(self, request): if b"token" not in request.args: raise SynapseError(400, "Missing renewal token") renewal_token = request.args[b"token"][0] - token_valid = await self.account_activity_handler.renew_account( + ( + token_valid, + token_stale, + expiration_ts, + ) = await self.account_activity_handler.renew_account( renewal_token.decode("utf8") ) if token_valid: status_code = 200 - response = self.success_html + response = self.account_renewed_template.render(expiration_ts=expiration_ts) + elif token_stale: + status_code = 200 + response = self.account_previously_renewed_template.render( + expiration_ts=expiration_ts + ) else: status_code = 404 - response = self.failure_html + response = self.invalid_token_template.render(expiration_ts=expiration_ts) respond_with_html(request, status_code, response) @@ -71,10 +87,12 @@ def __init__(self, hs): self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() self.auth = hs.get_auth() - self.account_validity = self.hs.config.account_validity + self.account_validity_renew_by_email_enabled = ( + hs.config.account_validity.account_validity_renew_by_email_enabled + ) async def on_POST(self, request): - if not self.account_validity.renew_by_email_enabled: + if not self.account_validity_renew_by_email_enabled: raise AuthError( 403, "Account renewal via email is disabled on this server." ) diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 833214b7e07b..6e5ee557d2ef 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -91,13 +91,25 @@ def __init__( id_column=None, ) - self._account_validity = hs.config.account_validity - if hs.config.run_background_tasks and self._account_validity.enabled: - self._clock.call_later( - 0.0, - self._set_expiration_date_when_missing, + self._account_validity_enabled = ( + hs.config.account_validity.account_validity_enabled + ) + self._account_validity_period = None + self._account_validity_startup_job_max_delta = None + if self._account_validity_enabled: + self._account_validity_period = ( + hs.config.account_validity.account_validity_period + ) + self._account_validity_startup_job_max_delta = ( + hs.config.account_validity.account_validity_startup_job_max_delta ) + if hs.config.run_background_tasks: + self._clock.call_later( + 0.0, + self._set_expiration_date_when_missing, + ) + # Create a background job for culling expired 3PID validity tokens if hs.config.run_background_tasks: self._clock.looping_call( @@ -194,6 +206,7 @@ async def set_account_validity_for_user( expiration_ts: int, email_sent: bool, renewal_token: Optional[str] = None, + token_used_ts: Optional[int] = None, ) -> None: """Updates the account validity properties of the given account, with the given values. @@ -207,6 +220,8 @@ async def set_account_validity_for_user( period. renewal_token: Renewal token the user can use to extend the validity of their account. Defaults to no token. + token_used_ts: A timestamp of when the current token was used to renew + the account. """ def set_account_validity_for_user_txn(txn): @@ -218,6 +233,7 @@ def set_account_validity_for_user_txn(txn): "expiration_ts_ms": expiration_ts, "email_sent": email_sent, "renewal_token": renewal_token, + "token_used_ts_ms": token_used_ts, }, ) self._invalidate_cache_and_stream( @@ -231,7 +247,7 @@ def set_account_validity_for_user_txn(txn): async def set_renewal_token_for_user( self, user_id: str, renewal_token: str ) -> None: - """Defines a renewal token for a given user. + """Defines a renewal token for a given user, and clears the token_used timestamp. Args: user_id: ID of the user to set the renewal token for. @@ -244,26 +260,40 @@ async def set_renewal_token_for_user( await self.db_pool.simple_update_one( table="account_validity", keyvalues={"user_id": user_id}, - updatevalues={"renewal_token": renewal_token}, + updatevalues={"renewal_token": renewal_token, "token_used_ts_ms": None}, desc="set_renewal_token_for_user", ) - async def get_user_from_renewal_token(self, renewal_token: str) -> str: - """Get a user ID from a renewal token. + async def get_user_from_renewal_token( + self, renewal_token: str + ) -> Tuple[str, int, Optional[int]]: + """Get a user ID and renewal status from a renewal token. Args: renewal_token: The renewal token to perform the lookup with. Returns: - The ID of the user to which the token belongs. + A tuple of containing the following values: + * The ID of a user to which the token belongs. + * An int representing the user's expiry timestamp as milliseconds since the + epoch, or 0 if the token was invalid. + * An optional int representing the timestamp of when the user renewed their + account timestamp as milliseconds since the epoch. None if the account + has not been renewed using the current token yet. """ - return await self.db_pool.simple_select_one_onecol( + ret_dict = await self.db_pool.simple_select_one( table="account_validity", keyvalues={"renewal_token": renewal_token}, - retcol="user_id", + retcols=["user_id", "expiration_ts_ms", "token_used_ts_ms"], desc="get_user_from_renewal_token", ) + return ( + ret_dict["user_id"], + ret_dict["expiration_ts_ms"], + ret_dict["token_used_ts_ms"], + ) + async def get_renewal_token_for_user(self, user_id: str) -> str: """Get the renewal token associated with a given user ID. @@ -302,7 +332,7 @@ def select_users_txn(txn, now_ms, renew_at): "get_users_expiring_soon", select_users_txn, self._clock.time_msec(), - self.config.account_validity.renew_at, + self.config.account_validity_renew_at, ) async def set_renewal_mail_status(self, user_id: str, email_sent: bool) -> None: @@ -964,11 +994,11 @@ def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() - expiration_ts = now_ms + self._account_validity.period + expiration_ts = now_ms + self._account_validity_period if use_delta: expiration_ts = self.rand.randrange( - expiration_ts - self._account_validity.startup_job_max_delta, + expiration_ts - self._account_validity_startup_job_max_delta, expiration_ts, ) @@ -1412,7 +1442,7 @@ def _register_user( except self.database_engine.module.IntegrityError: raise StoreError(400, "User ID already taken.", errcode=Codes.USER_IN_USE) - if self._account_validity.enabled: + if self._account_validity_enabled: self.set_expiration_date_for_user_txn(txn, user_id) if create_profile_with_displayname: diff --git a/synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql b/synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql new file mode 100644 index 000000000000..4836dac16ebf --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql @@ -0,0 +1,18 @@ +/* Copyright 2020 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Track when users renew their account using the value of the 'renewal_token' column. +-- This field should be set to NULL after a fresh token is generated. +ALTER TABLE account_validity ADD token_used_ts_ms BIGINT; diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 054d4e41402d..98695b05d5c4 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -492,8 +492,8 @@ def test_renewal_email(self): (user_id, tok) = self.create_user() - # Move 6 days forward. This should trigger a renewal email to be sent. - self.reactor.advance(datetime.timedelta(days=6).total_seconds()) + # Move 5 days forward. This should trigger a renewal email to be sent. + self.reactor.advance(datetime.timedelta(days=5).total_seconds()) self.assertEqual(len(self.email_attempts), 1) # Retrieving the URL from the email is too much pain for now, so we @@ -504,14 +504,32 @@ def test_renewal_email(self): self.assertEquals(channel.result["code"], b"200", channel.result) # Check that we're getting HTML back. - content_type = None - for header in channel.result.get("headers", []): - if header[0] == b"Content-Type": - content_type = header[1] - self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + content_type = channel.headers.getRawHeaders(b"Content-Type") + self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) # Check that the HTML we're getting is the one we expect on a successful renewal. - expected_html = self.hs.config.account_validity.account_renewed_html_content + expiration_ts = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + expected_html = self.hs.config.account_validity.account_validity_account_renewed_template.render( + expiration_ts=expiration_ts + ) + self.assertEqual( + channel.result["body"], expected_html.encode("utf8"), channel.result + ) + + # Move 1 day forward. Try to renew with the same token again. + url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token + channel = self.make_request(b"GET", url) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Check that we're getting HTML back. + content_type = channel.headers.getRawHeaders(b"Content-Type") + self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) + + # Check that the HTML we're getting is the one we expect when reusing a + # token. The account expiration date should not have changed. + expected_html = self.hs.config.account_validity.account_validity_account_previously_renewed_template.render( + expiration_ts=expiration_ts + ) self.assertEqual( channel.result["body"], expected_html.encode("utf8"), channel.result ) @@ -531,15 +549,14 @@ def test_renewal_invalid_token(self): self.assertEquals(channel.result["code"], b"404", channel.result) # Check that we're getting HTML back. - content_type = None - for header in channel.result.get("headers", []): - if header[0] == b"Content-Type": - content_type = header[1] - self.assertEqual(content_type, b"text/html; charset=utf-8", channel.result) + content_type = channel.headers.getRawHeaders(b"Content-Type") + self.assertEqual(content_type, [b"text/html; charset=utf-8"], channel.result) # Check that the HTML we're getting is the one we expect when using an # invalid/unknown token. - expected_html = self.hs.config.account_validity.invalid_token_html_content + expected_html = ( + self.hs.config.account_validity.account_validity_invalid_token_template.render() + ) self.assertEqual( channel.result["body"], expected_html.encode("utf8"), channel.result ) @@ -647,7 +664,12 @@ def make_homeserver(self, reactor, clock): config["account_validity"] = {"enabled": False} self.hs = self.setup_test_homeserver(config=config) - self.hs.config.account_validity.period = self.validity_period + + # We need to set these directly, instead of in the homeserver config dict above. + # This is due to account validity-related config options not being read by + # Synapse when account_validity.enabled is False. + self.hs.get_datastore()._account_validity_period = self.validity_period + self.hs.get_datastore()._account_validity_startup_job_max_delta = self.max_delta self.store = self.hs.get_datastore() From 495b214f4f8f45d16ffee851c8ab7a380dd0e2b2 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Tue, 20 Apr 2021 12:50:49 +0200 Subject: [PATCH 045/222] Fix (final) Bugbear violations (#9838) --- changelog.d/9838.misc | 1 + scripts-dev/definitions.py | 2 +- scripts-dev/list_url_patterns.py | 2 +- setup.cfg | 3 +-- synapse/event_auth.py | 2 +- synapse/federation/send_queue.py | 4 ++-- synapse/handlers/auth.py | 2 +- synapse/handlers/device.py | 13 +++++-------- synapse/handlers/federation.py | 2 +- synapse/logging/_remote.py | 4 ++-- synapse/rest/key/v2/remote_key_resource.py | 4 ++-- synapse/storage/databases/main/events.py | 10 +++++----- tests/handlers/test_federation.py | 2 +- tests/replication/tcp/streams/test_events.py | 4 ++-- tests/rest/admin/test_device.py | 4 ++-- tests/rest/admin/test_event_reports.py | 8 ++++---- tests/rest/admin/test_room.py | 8 ++++---- tests/rest/admin/test_statistics.py | 2 +- tests/rest/admin/test_user.py | 4 ++-- tests/rest/client/v1/test_rooms.py | 6 +++--- tests/storage/test_event_metrics.py | 4 ++-- tests/unittest.py | 2 +- tests/utils.py | 2 +- 23 files changed, 46 insertions(+), 49 deletions(-) create mode 100644 changelog.d/9838.misc diff --git a/changelog.d/9838.misc b/changelog.d/9838.misc new file mode 100644 index 000000000000..b98ce563093c --- /dev/null +++ b/changelog.d/9838.misc @@ -0,0 +1 @@ +Introduce flake8-bugbear to the test suite and fix some of its lint violations. \ No newline at end of file diff --git a/scripts-dev/definitions.py b/scripts-dev/definitions.py index 313860df139a..c82ddd96776c 100755 --- a/scripts-dev/definitions.py +++ b/scripts-dev/definitions.py @@ -140,7 +140,7 @@ def used_names(prefix, item, defs, names): definitions = {} for directory in args.directories: - for root, dirs, files in os.walk(directory): + for root, _, files in os.walk(directory): for filename in files: if filename.endswith(".py"): filepath = os.path.join(root, filename) diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index 26ad7c67f483..e85420dea876 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -48,7 +48,7 @@ def find_patterns_in_file(filepath): for directory in args.directories: - for root, dirs, files in os.walk(directory): + for root, _, files in os.walk(directory): for filename in files: if filename.endswith(".py"): filepath = os.path.join(root, filename) diff --git a/setup.cfg b/setup.cfg index 33601b71d58c..e5ceb7ed1933 100644 --- a/setup.cfg +++ b/setup.cfg @@ -18,8 +18,7 @@ ignore = # E203: whitespace before ':' (which is contrary to pep8?) # E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) -# B007: Subsection of the bugbear suite (TODO: add in remaining fixes) -ignore=W503,W504,E203,E731,E501,B007 +ignore=W503,W504,E203,E731,E501 [isort] line_length = 88 diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 5234e3f81e3f..c831d9f73cee 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -670,7 +670,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase public_key = public_key_object["public_key"] try: for server, signature_block in signed["signatures"].items(): - for key_name, encoded_signature in signature_block.items(): + for key_name in signature_block.keys(): if not key_name.startswith("ed25519:"): continue verify_key = decode_verify_key_bytes( diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index d71f04e43e41..65d76ea97415 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -501,10 +501,10 @@ def process_rows_for_federation( states=[state], destinations=destinations ) - for destination, edu_map in buff.keyed_edus.items(): + for edu_map in buff.keyed_edus.values(): for key, edu in edu_map.items(): transaction_queue.send_edu(edu, key) - for destination, edu_list in buff.edus.items(): + for edu_list in buff.edus.values(): for edu in edu_list: transaction_queue.send_edu(edu, None) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index b8a37b647772..36f2450e2e93 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1248,7 +1248,7 @@ async def delete_access_tokens_for_user( # see if any of our auth providers want to know about this for provider in self.password_providers: - for token, token_id, device_id in tokens_and_devices: + for token, _, device_id in tokens_and_devices: await provider.on_logged_out( user_id=user_id, device_id=device_id, access_token=token ) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d75edb184b3a..c1d780098153 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -156,8 +156,7 @@ async def get_user_ids_changed( # The user may have left the room # TODO: Check if they actually did or if we were just invited. if room_id not in room_ids: - for key, event_id in current_state_ids.items(): - etype, state_key = key + for etype, state_key in current_state_ids.keys(): if etype != EventTypes.Member: continue possibly_left.add(state_key) @@ -179,8 +178,7 @@ async def get_user_ids_changed( log_kv( {"event": "encountered empty previous state", "room_id": room_id} ) - for key, event_id in current_state_ids.items(): - etype, state_key = key + for etype, state_key in current_state_ids.keys(): if etype != EventTypes.Member: continue possibly_changed.add(state_key) @@ -198,8 +196,7 @@ async def get_user_ids_changed( for state_dict in prev_state_ids.values(): member_event = state_dict.get((EventTypes.Member, user_id), None) if not member_event or member_event != current_member_id: - for key, event_id in current_state_ids.items(): - etype, state_key = key + for etype, state_key in current_state_ids.keys(): if etype != EventTypes.Member: continue possibly_changed.add(state_key) @@ -714,7 +711,7 @@ async def _handle_device_updates(self, user_id: str) -> None: # This can happen since we batch updates return - for device_id, stream_id, prev_ids, content in pending_updates: + for device_id, stream_id, prev_ids, _ in pending_updates: logger.debug( "Handling update %r/%r, ID: %r, prev: %r ", user_id, @@ -740,7 +737,7 @@ async def _handle_device_updates(self, user_id: str) -> None: else: # Simply update the single device, since we know that is the only # change (because of the single prev_id matching the current cache) - for device_id, stream_id, prev_ids, content in pending_updates: + for device_id, stream_id, _, content in pending_updates: await self.store.update_remote_device_list_cache_entry( user_id, device_id, content, stream_id ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4b3730aa3b7d..dbdd7d2db327 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2956,7 +2956,7 @@ async def _check_signature(self, event: EventBase, context: EventContext) -> Non try: # for each sig on the third_party_invite block of the actual invite for server, signature_block in signed["signatures"].items(): - for key_name, encoded_signature in signature_block.items(): + for key_name in signature_block.keys(): if not key_name.startswith("ed25519:"): continue diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index 4e8b0f8d10f2..c515690b38f0 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -226,11 +226,11 @@ def _handle_pressure(self) -> None: old_buffer = self._buffer self._buffer = deque() - for i in range(buffer_split): + for _ in range(buffer_split): self._buffer.append(old_buffer.popleft()) end_buffer = [] - for i in range(buffer_split): + for _ in range(buffer_split): end_buffer.append(old_buffer.pop()) self._buffer.extend(reversed(end_buffer)) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index c57ac22e5894..f648678b0903 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -144,7 +144,7 @@ async def query_keys(self, request, query, query_remote_on_cache_miss=False): # Note that the value is unused. cache_misses = {} # type: Dict[str, Dict[str, int]] - for (server_name, key_id, from_server), results in cached.items(): + for (server_name, key_id, _), results in cached.items(): results = [(result["ts_added_ms"], result) for result in results] if not results and key_id is not None: @@ -206,7 +206,7 @@ async def query_keys(self, request, query, query_remote_on_cache_miss=False): # Cast to bytes since postgresql returns a memoryview. json_results.add(bytes(most_recent_result["key_json"])) else: - for ts_added, result in results: + for _, result in results: # Cast to bytes since postgresql returns a memoryview. json_results.add(bytes(result["key_json"])) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index a362521e20e7..fd25c8112d73 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -170,7 +170,7 @@ async def _persist_events_and_state_updates( ) async with stream_ordering_manager as stream_orderings: - for (event, context), stream in zip(events_and_contexts, stream_orderings): + for (event, _), stream in zip(events_and_contexts, stream_orderings): event.internal_metadata.stream_ordering = stream await self.db_pool.runInteraction( @@ -297,7 +297,7 @@ def _get_prevs_before_rejected_txn(txn, batch): txn.execute(sql + clause, args) to_recursively_check = [] - for event_id, prev_event_id, metadata, rejected in txn: + for _, prev_event_id, metadata, rejected in txn: if prev_event_id in existing_prevs: continue @@ -1127,7 +1127,7 @@ def _upsert_room_version_txn(self, txn: LoggingTransaction, room_id: str): def _update_forward_extremities_txn( self, txn, new_forward_extremities, max_stream_order ): - for room_id, new_extrem in new_forward_extremities.items(): + for room_id in new_forward_extremities.keys(): self.db_pool.simple_delete_txn( txn, table="event_forward_extremities", keyvalues={"room_id": room_id} ) @@ -1399,7 +1399,7 @@ def get_internal_metadata(event): ] state_values = [] - for event, context in state_events_and_contexts: + for event, _ in state_events_and_contexts: vals = { "event_id": event.event_id, "room_id": event.room_id, @@ -1468,7 +1468,7 @@ def _update_metadata_tables_txn( # nothing to do here return - for event, context in events_and_contexts: + for event, _ in events_and_contexts: if event.type == EventTypes.Redaction and event.redacts is not None: # Remove the entries in the event_push_actions table for the # redacted event. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index c7b0975a19e5..8796af45edb4 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -222,7 +222,7 @@ def create_invite(): room_version, ) - for i in range(3): + for _ in range(3): event = create_invite() self.get_success( self.handler.on_invite_request( diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 323237c1bb88..f51fa0a79e9e 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -239,7 +239,7 @@ def test_update_function_huge_state_change(self): # the state rows are unsorted state_rows = [] # type: List[EventsStreamCurrentStateRow] - for stream_name, token, row in received_rows: + for stream_name, _, row in received_rows: self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) self.assertEqual(row.type, "state") @@ -356,7 +356,7 @@ def test_update_function_state_row_limit(self): # the state rows are unsorted state_rows = [] # type: List[EventsStreamCurrentStateRow] - for j in range(STATES_PER_USER + 1): + for _ in range(STATES_PER_USER + 1): stream_name, token, row = received_rows.pop(0) self.assertEqual("events", stream_name) self.assertIsInstance(row, EventsStreamRow) diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index ecbee30bb52f..120730b76417 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -430,7 +430,7 @@ def test_get_devices(self): """ # Create devices number_devices = 5 - for n in range(number_devices): + for _ in range(number_devices): self.login("user", "pass") # Get devices @@ -547,7 +547,7 @@ def test_delete_devices(self): # Create devices number_devices = 5 - for n in range(number_devices): + for _ in range(number_devices): self.login("user", "pass") # Get devices diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 8c66da3af406..29341bc6e98f 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -48,22 +48,22 @@ def prepare(self, reactor, clock, hs): self.helper.join(self.room_id2, user=self.admin_user, tok=self.admin_user_tok) # Two rooms and two users. Every user sends and reports every room event - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id1, user_tok=self.other_user_tok, ) - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id2, user_tok=self.other_user_tok, ) - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id1, user_tok=self.admin_user_tok, ) - for i in range(5): + for _ in range(5): self._create_event_and_report( room_id=self.room_id2, user_tok=self.admin_user_tok, diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6bcd997085dd..6b841881200f 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -615,7 +615,7 @@ def test_list_rooms(self): # Create 3 test rooms total_rooms = 3 room_ids = [] - for x in range(total_rooms): + for _ in range(total_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) @@ -679,7 +679,7 @@ def test_list_rooms_pagination(self): # Create 5 test rooms total_rooms = 5 room_ids = [] - for x in range(total_rooms): + for _ in range(total_rooms): room_id = self.helper.create_room_as( self.admin_user, tok=self.admin_user_tok ) @@ -1577,7 +1577,7 @@ def test_context_as_admin(self): channel.json_body["event"]["event_id"], events[midway]["event_id"] ) - for i, found_event in enumerate(channel.json_body["events_before"]): + for found_event in channel.json_body["events_before"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j < midway) @@ -1585,7 +1585,7 @@ def test_context_as_admin(self): else: self.fail("Event %s from events_before not found" % j) - for i, found_event in enumerate(channel.json_body["events_after"]): + for found_event in channel.json_body["events_after"]: for j, posted_event in enumerate(events): if found_event["event_id"] == posted_event["event_id"]: self.assertTrue(j > midway) diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 363bdeeb2dae..79cac4266bf1 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -467,7 +467,7 @@ def _create_media(self, user_token: str, number_media: int): number_media: Number of media to be created for the user """ upload_resource = self.media_repo.children[b"upload"] - for i in range(number_media): + for _ in range(number_media): # file size is 67 Byte image_data = unhexlify( b"89504e470d0a1a0a0000000d4948445200000001000000010806" diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 2844c493fc0a..b3afd51522d6 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -1937,7 +1937,7 @@ def test_get_rooms(self): # Create rooms and join other_user_tok = self.login("user", "pass") number_rooms = 5 - for n in range(number_rooms): + for _ in range(number_rooms): self.helper.create_room_as(self.other_user, tok=other_user_tok) # Get rooms @@ -2517,7 +2517,7 @@ def _create_media_for_user(self, user_token: str, number_media: int): user_token: Access token of the user number_media: Number of media to be created for the user """ - for i in range(number_media): + for _ in range(number_media): # file size is 67 Byte image_data = unhexlify( b"89504e470d0a1a0a0000000d4948445200000001000000010806" diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 92babf65e0f3..a3694f3d0200 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -646,7 +646,7 @@ def test_invites_by_rooms_ratelimit(self): def test_invites_by_users_ratelimit(self): """Tests that invites to a specific user are actually rate-limited.""" - for i in range(3): + for _ in range(3): room_id = self.helper.create_room_as(self.user_id) self.helper.invite(room_id, self.user_id, "@other-users:red") @@ -668,7 +668,7 @@ class RoomJoinRatelimitTestCase(RoomBase): ) def test_join_local_ratelimit(self): """Tests that local joins are actually rate-limited.""" - for i in range(3): + for _ in range(3): self.helper.create_room_as(self.user_id) self.helper.create_room_as(self.user_id, expect_code=429) @@ -733,7 +733,7 @@ def test_join_local_ratelimit_idempotent(self): for path in paths_to_test: # Make sure we send more requests than the rate-limiting config would allow # if all of these requests ended up joining the user to a room. - for i in range(4): + for _ in range(4): channel = self.make_request("POST", path % room_id, {}) self.assertEquals(channel.code, 200) diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 397e68fe0ad3..088fbb247b50 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -38,12 +38,12 @@ def test_exposed_to_prometheus(self): last_event = None # Make a real event chain - for i in range(event_count): + for _ in range(event_count): ev = self.create_and_send_event(room_id, user, False, last_event) last_event = [ev] # Sprinkle in some extremities - for i in range(extrems): + for _ in range(extrems): ev = self.create_and_send_event(room_id, user, False, last_event) # Let it run for a while, then pull out the statistics from the diff --git a/tests/unittest.py b/tests/unittest.py index d890ad981f26..ee22a5384935 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -133,7 +133,7 @@ def tearDown(orig): def assertObjectHasAttributes(self, attrs, obj): """Asserts that the given object has each of the attributes given, and that the value of each matches according to assertEquals.""" - for (key, value) in attrs.items(): + for key in attrs.keys(): if not hasattr(obj, key): raise AssertionError("Expected obj to have a '.%s'" % key) try: diff --git a/tests/utils.py b/tests/utils.py index af6b32fc66c2..63d52b91407a 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -303,7 +303,7 @@ def cleanup(): # database for a few more seconds due to flakiness, preventing # us from dropping it when the test is over. If we can't drop # it, warn and move on. - for x in range(5): + for _ in range(5): try: cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) db_conn.commit() From db70435de740b534936df75c435290a37dcc015f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Apr 2021 13:37:54 +0100 Subject: [PATCH 046/222] Fix bug where we sent remote presence states to remote servers (#9850) --- changelog.d/9850.feature | 1 + synapse/federation/sender/__init__.py | 4 ++++ synapse/handlers/presence.py | 11 ++++++++--- 3 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9850.feature diff --git a/changelog.d/9850.feature b/changelog.d/9850.feature new file mode 100644 index 000000000000..f56b0bb3bdeb --- /dev/null +++ b/changelog.d/9850.feature @@ -0,0 +1 @@ +Add experimental support for handling presence on a worker. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 6266accaf533..b00a55324c02 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -539,6 +539,10 @@ def send_presence_to_destinations( # No-op if presence is disabled. return + # Ensure we only send out presence states for local users. + for state in states: + assert self.is_mine_id(state.user_id) + for destination in destinations: if destination == self.server_name: continue diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6460eb99521c..bd2382193f64 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -125,6 +125,7 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self.presence_router = hs.get_presence_router() self.state = hs.get_state_handler() + self.is_mine_id = hs.is_mine_id self._federation = None if hs.should_send_federation() or not hs.config.worker_app: @@ -261,7 +262,8 @@ async def maybe_send_presence_to_interested_destinations( self, states: List[UserPresenceState] ): """If this instance is a federation sender, send the states to all - destinations that are interested. + destinations that are interested. Filters out any states for remote + users. """ if not self._send_federation: @@ -270,6 +272,11 @@ async def maybe_send_presence_to_interested_destinations( # If this worker sends federation we must have a FederationSender. assert self._federation + states = [s for s in states if self.is_mine_id(s.user_id)] + + if not states: + return + hosts_and_states = await get_interested_remotes( self.store, self.presence_router, @@ -292,7 +299,6 @@ class WorkerPresenceHandler(BasePresenceHandler): def __init__(self, hs): super().__init__(hs) self.hs = hs - self.is_mine_id = hs.is_mine_id self._presence_enabled = hs.config.use_presence @@ -492,7 +498,6 @@ class PresenceHandler(BasePresenceHandler): def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs - self.is_mine_id = hs.is_mine_id self.server_name = hs.hostname self.wheel_timer = WheelTimer() self.notifier = hs.get_notifier() From de0d088adc0cf3d5bbd80238b88143426cd6eaca Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 20 Apr 2021 14:11:24 +0100 Subject: [PATCH 047/222] Add presence federation stream (#9819) --- changelog.d/9819.feature | 1 + synapse/handlers/presence.py | 243 ++++++++++++++++++-- synapse/replication/tcp/client.py | 7 +- synapse/replication/tcp/streams/__init__.py | 3 + synapse/replication/tcp/streams/_base.py | 24 ++ tests/handlers/test_presence.py | 179 +++++++++++++- 6 files changed, 426 insertions(+), 31 deletions(-) create mode 100644 changelog.d/9819.feature diff --git a/changelog.d/9819.feature b/changelog.d/9819.feature new file mode 100644 index 000000000000..f56b0bb3bdeb --- /dev/null +++ b/changelog.d/9819.feature @@ -0,0 +1 @@ +Add experimental support for handling presence on a worker. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index bd2382193f64..598466c9bdf7 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -24,6 +24,7 @@ import abc import contextlib import logging +from bisect import bisect from contextlib import contextmanager from typing import ( TYPE_CHECKING, @@ -53,7 +54,9 @@ ReplicationBumpPresenceActiveTime, ReplicationPresenceSetState, ) +from synapse.replication.http.streams import ReplicationGetStreamUpdates from synapse.replication.tcp.commands import ClearUserSyncsCommand +from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream from synapse.state import StateHandler from synapse.storage.databases.main import DataStore from synapse.types import Collection, JsonDict, UserID, get_domain_from_id @@ -128,10 +131,10 @@ def __init__(self, hs: "HomeServer"): self.is_mine_id = hs.is_mine_id self._federation = None - if hs.should_send_federation() or not hs.config.worker_app: + if hs.should_send_federation(): self._federation = hs.get_federation_sender() - self._send_federation = hs.should_send_federation() + self._federation_queue = PresenceFederationQueue(hs, self) self._busy_presence_enabled = hs.config.experimental.msc3026_enabled @@ -254,9 +257,17 @@ async def update_external_syncs_clear(self, process_id): """ pass - async def process_replication_rows(self, token, rows): - """Process presence stream rows received over replication.""" - pass + async def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: list + ): + """Process streams received over replication.""" + await self._federation_queue.process_replication_rows( + stream_name, instance_name, token, rows + ) + + def get_federation_queue(self) -> "PresenceFederationQueue": + """Get the presence federation queue.""" + return self._federation_queue async def maybe_send_presence_to_interested_destinations( self, states: List[UserPresenceState] @@ -266,12 +277,9 @@ async def maybe_send_presence_to_interested_destinations( users. """ - if not self._send_federation: + if not self._federation: return - # If this worker sends federation we must have a FederationSender. - assert self._federation - states = [s for s in states if self.is_mine_id(s.user_id)] if not states: @@ -427,7 +435,14 @@ async def notify_from_replication(self, states, stream_id): # If this is a federation sender, notify about presence updates. await self.maybe_send_presence_to_interested_destinations(states) - async def process_replication_rows(self, token, rows): + async def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: list + ): + await super().process_replication_rows(stream_name, instance_name, token, rows) + + if stream_name != PresenceStream.NAME: + return + states = [ UserPresenceState( row.user_id, @@ -729,12 +744,10 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: self.state, ) - # Since this is master we know that we have a federation sender or - # queue, and so this will be defined. - assert self._federation - for destinations, states in hosts_and_states: - self._federation.send_presence_to_destinations(states, destinations) + self._federation_queue.send_presence_to_destinations( + states, destinations + ) async def _handle_timeouts(self): """Checks the presence of users that have timed out and updates as @@ -1213,13 +1226,9 @@ async def _handle_state_delta(self, deltas): user_presence_states ) - # Since this is master we know that we have a federation sender or - # queue, and so this will be defined. - assert self._federation - # Send out user presence updates for each destination for destination, user_state_set in presence_destinations.items(): - self._federation.send_presence_to_destinations( + self._federation_queue.send_presence_to_destinations( destinations=[destination], states=user_state_set ) @@ -1864,3 +1873,197 @@ async def get_interested_remotes( hosts_and_states.append(([host], states)) return hosts_and_states + + +class PresenceFederationQueue: + """Handles sending ad hoc presence updates over federation, which are *not* + due to state updates (that get handled via the presence stream), e.g. + federation pings and sending existing present states to newly joined hosts. + + Only the last N minutes will be queued, so if a federation sender instance + is down for longer then some updates will be dropped. This is OK as presence + is ephemeral, and so it will self correct eventually. + + On workers the class tracks the last received position of the stream from + replication, and handles querying for missed updates over HTTP replication, + c.f. `get_current_token` and `get_replication_rows`. + """ + + # How long to keep entries in the queue for. Workers that are down for + # longer than this duration will miss out on older updates. + _KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000 + + # How often to check if we can expire entries from the queue. + _CLEAR_ITEMS_EVERY_MS = 60 * 1000 + + def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler): + self._clock = hs.get_clock() + self._notifier = hs.get_notifier() + self._instance_name = hs.get_instance_name() + self._presence_handler = presence_handler + self._repl_client = ReplicationGetStreamUpdates.make_client(hs) + + # Should we keep a queue of recent presence updates? We only bother if + # another process may be handling federation sending. + self._queue_presence_updates = True + + # Whether this instance is a presence writer. + self._presence_writer = hs.config.worker.worker_app is None + + # The FederationSender instance, if this process sends federation traffic directly. + self._federation = None + + if hs.should_send_federation(): + self._federation = hs.get_federation_sender() + + # We don't bother queuing up presence states if only this instance + # is sending federation. + if hs.config.worker.federation_shard_config.instances == [ + self._instance_name + ]: + self._queue_presence_updates = False + + # The queue of recently queued updates as tuples of: `(timestamp, + # stream_id, destinations, user_ids)`. We don't store the full states + # for efficiency, and remote workers will already have the full states + # cached. + self._queue = [] # type: List[Tuple[int, int, Collection[str], Set[str]]] + + self._next_id = 1 + + # Map from instance name to current token + self._current_tokens = {} # type: Dict[str, int] + + if self._queue_presence_updates: + self._clock.looping_call(self._clear_queue, self._CLEAR_ITEMS_EVERY_MS) + + def _clear_queue(self): + """Clear out older entries from the queue.""" + clear_before = self._clock.time_msec() - self._KEEP_ITEMS_IN_QUEUE_FOR_MS + + # The queue is sorted by timestamp, so we can bisect to find the right + # place to purge before. Note that we are searching using a 1-tuple with + # the time, which does The Right Thing since the queue is a tuple where + # the first item is a timestamp. + index = bisect(self._queue, (clear_before,)) + self._queue = self._queue[index:] + + def send_presence_to_destinations( + self, states: Collection[UserPresenceState], destinations: Collection[str] + ) -> None: + """Send the presence states to the given destinations. + + Will forward to the local federation sender (if there is one) and queue + to send over replication (if there are other federation sender instances.). + + Must only be called on the master process. + """ + + # This should only be called on a presence writer. + assert self._presence_writer + + if self._federation: + self._federation.send_presence_to_destinations( + states=states, + destinations=destinations, + ) + + if not self._queue_presence_updates: + return + + now = self._clock.time_msec() + + stream_id = self._next_id + self._next_id += 1 + + self._queue.append((now, stream_id, destinations, {s.user_id for s in states})) + + self._notifier.notify_replication() + + def get_current_token(self, instance_name: str) -> int: + """Get the current position of the stream. + + On workers this returns the last stream ID received from replication. + """ + if instance_name == self._instance_name: + return self._next_id - 1 + else: + return self._current_tokens.get(instance_name, 0) + + async def get_replication_rows( + self, + instance_name: str, + from_token: int, + upto_token: int, + target_row_count: int, + ) -> Tuple[List[Tuple[int, Tuple[str, str]]], int, bool]: + """Get all the updates between the two tokens. + + We return rows in the form of `(destination, user_id)` to keep the size + of each row bounded (rather than returning the sets in a row). + + On workers this will query the master process via HTTP replication. + """ + if instance_name != self._instance_name: + # If not local we query over http replication from the master + result = await self._repl_client( + instance_name=instance_name, + stream_name=PresenceFederationStream.NAME, + from_token=from_token, + upto_token=upto_token, + ) + return result["updates"], result["upto_token"], result["limited"] + + # We can find the correct position in the queue by noting that there is + # exactly one entry per stream ID, and that the last entry has an ID of + # `self._next_id - 1`, so we can count backwards from the end. + # + # Since the start of the queue is periodically truncated we need to + # handle the case where `from_token` stream ID has already been dropped. + start_idx = max(from_token - self._next_id, -len(self._queue)) + + to_send = [] # type: List[Tuple[int, Tuple[str, str]]] + limited = False + new_id = upto_token + for _, stream_id, destinations, user_ids in self._queue[start_idx:]: + if stream_id > upto_token: + break + + new_id = stream_id + + to_send.extend( + (stream_id, (destination, user_id)) + for destination in destinations + for user_id in user_ids + ) + + if len(to_send) > target_row_count: + limited = True + break + + return to_send, new_id, limited + + async def process_replication_rows( + self, stream_name: str, instance_name: str, token: int, rows: list + ): + if stream_name != PresenceFederationStream.NAME: + return + + # We keep track of the current tokens (so that we can catch up with anything we missed after a disconnect) + self._current_tokens[instance_name] = token + + # If we're a federation sender we pull out the presence states to send + # and forward them on. + if not self._federation: + return + + hosts_to_users = {} # type: Dict[str, Set[str]] + for row in rows: + hosts_to_users.setdefault(row.destination, set()).add(row.user_id) + + for host, user_ids in hosts_to_users.items(): + states = await self._presence_handler.current_state_for_users(user_ids) + self._federation.send_presence_to_destinations( + states=states.values(), + destinations=[host], + ) diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index ce5d651cb8c5..4f3c6a18b6cf 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -29,7 +29,6 @@ AccountDataStream, DeviceListsStream, GroupServerStream, - PresenceStream, PushersStream, PushRulesStream, ReceiptsStream, @@ -191,8 +190,6 @@ async def on_rdata( self.stop_pusher(row.user_id, row.app_id, row.pushkey) else: await self.start_pusher(row.user_id, row.app_id, row.pushkey) - elif stream_name == PresenceStream.NAME: - await self._presence_handler.process_replication_rows(token, rows) elif stream_name == EventsStream.NAME: # We shouldn't get multiple rows per token for events stream, so # we don't need to optimise this for multiple rows. @@ -221,6 +218,10 @@ async def on_rdata( membership=row.data.membership, ) + await self._presence_handler.process_replication_rows( + stream_name, instance_name, token, rows + ) + # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is # greater than the received row position. diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index fb74ac4e98ad..4c0023c68aee 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -30,6 +30,7 @@ CachesStream, DeviceListsStream, GroupServerStream, + PresenceFederationStream, PresenceStream, PublicRoomsStream, PushersStream, @@ -50,6 +51,7 @@ EventsStream, BackfillStream, PresenceStream, + PresenceFederationStream, TypingStream, ReceiptsStream, PushRulesStream, @@ -71,6 +73,7 @@ "Stream", "BackfillStream", "PresenceStream", + "PresenceFederationStream", "TypingStream", "ReceiptsStream", "PushRulesStream", diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 520c45f151d4..9d75a89f1caa 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -290,6 +290,30 @@ def __init__(self, hs): ) +class PresenceFederationStream(Stream): + """A stream used to send ad hoc presence updates over federation. + + Streams the remote destination and the user ID of the presence state to + send. + """ + + @attr.s(slots=True, auto_attribs=True) + class PresenceFederationStreamRow: + destination: str + user_id: str + + NAME = "presence_federation" + ROW_TYPE = PresenceFederationStreamRow + + def __init__(self, hs: "HomeServer"): + federation_queue = hs.get_presence_handler().get_federation_queue() + super().__init__( + hs.get_instance_name(), + federation_queue.get_current_token, + federation_queue.get_replication_rows, + ) + + class TypingStream(Stream): TypingStreamRow = namedtuple( "TypingStreamRow", ("room_id", "user_ids") # str # list(str) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 2d12e8289748..61271cd08498 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -21,6 +21,7 @@ from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events.builder import EventBuilder +from synapse.federation.sender import FederationSender from synapse.handlers.presence import ( EXTERNAL_PROCESS_EXPIRY, FEDERATION_PING_INTERVAL, @@ -471,6 +472,168 @@ def test_external_process_timeout(self): self.assertEqual(state.state, PresenceState.OFFLINE) +class PresenceFederationQueueTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.presence_handler = hs.get_presence_handler() + self.clock = hs.get_clock() + self.instance_name = hs.get_instance_name() + + self.queue = self.presence_handler.get_federation_queue() + + def test_send_and_get(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (1, ("dest1", "@user1:test")), + (1, ("dest2", "@user1:test")), + (1, ("dest1", "@user2:test")), + (1, ("dest2", "@user2:test")), + (2, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + def test_send_and_get_split(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + + now_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (1, ("dest1", "@user1:test")), + (1, ("dest2", "@user1:test")), + (1, ("dest1", "@user2:test")), + (1, ("dest2", "@user2:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + def test_clear_queue_all(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + self.reactor.advance(10 * 60 * 1000) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + self.assertCountEqual(rows, []) + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (3, ("dest1", "@user1:test")), + (3, ("dest2", "@user1:test")), + (3, ("dest1", "@user2:test")), + (3, ("dest2", "@user2:test")), + (4, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + def test_partially_clear_queue(self): + state1 = UserPresenceState.default("@user1:test") + state2 = UserPresenceState.default("@user2:test") + state3 = UserPresenceState.default("@user3:test") + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + + self.reactor.advance(2 * 60 * 1000) + + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + self.reactor.advance(4 * 60 * 1000) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (2, ("dest3", "@user3:test")), + ] + self.assertCountEqual(rows, []) + + prev_token = self.queue.get_current_token(self.instance_name) + + self.queue.send_presence_to_destinations((state1, state2), ("dest1", "dest2")) + self.queue.send_presence_to_destinations((state3,), ("dest3",)) + + now_token = self.queue.get_current_token(self.instance_name) + + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", prev_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (3, ("dest1", "@user1:test")), + (3, ("dest2", "@user1:test")), + (3, ("dest1", "@user2:test")), + (3, ("dest2", "@user2:test")), + (4, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + + class PresenceJoinTestCase(unittest.HomeserverTestCase): """Tests remote servers get told about presence of users in the room when they join and when new local users join. @@ -482,10 +645,17 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - "server", federation_http_client=None, federation_sender=Mock() + "server", + federation_http_client=None, + federation_sender=Mock(spec=FederationSender), ) return hs + def default_config(self): + config = super().default_config() + config["send_federation"] = True + return config + def prepare(self, reactor, clock, hs): self.federation_sender = hs.get_federation_sender() self.event_builder_factory = hs.get_event_builder_factory() @@ -529,9 +699,6 @@ def test_remote_joins(self): # Add a new remote server to the room self._add_new_user(room_id, "@alice:server2") - # We shouldn't have sent out any local presence *updates* - self.federation_sender.send_presence.assert_not_called() - # When new server is joined we send it the local users presence states. # We expect to only see user @test2:server, as @test:server is offline # and has a zero last_active_ts @@ -550,7 +717,6 @@ def test_remote_joins(self): self.federation_sender.reset_mock() self._add_new_user(room_id, "@bob:server3") - self.federation_sender.send_presence.assert_not_called() self.federation_sender.send_presence_to_destinations.assert_called_once_with( destinations=["server3"], states={expected_state} ) @@ -595,9 +761,6 @@ def test_remote_gets_presence_when_local_user_joins(self): self.reactor.pump([0]) # Wait for presence updates to be handled - # We shouldn't have sent out any local presence *updates* - self.federation_sender.send_presence.assert_not_called() - # We expect to only send test2 presence to server2 and server3 expected_state = self.get_success( self.presence_handler.current_state_for_user("@test2:server") From b076bc276e881b262048307b6a226061d96c4a8d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 20 Apr 2021 09:19:00 -0400 Subject: [PATCH 048/222] Always use the name as the log ID. (#9829) As far as I can tell our logging contexts are meant to log the request ID, or sometimes the request ID followed by a suffix (this is generally stored in the name field of LoggingContext). There's also code to log the name@memory location, but I'm not sure this is ever used. This simplifies the code paths to require every logging context to have a name and use that in logging. For sub-contexts (created via nested_logging_contexts, defer_to_threadpool, Measure) we use the current context's str (which becomes their name or the string "sentinel") and then potentially modify that (e.g. add a suffix). --- changelog.d/9829.bugfix | 1 + synapse/logging/context.py | 14 ++++---------- synapse/metrics/background_process_metrics.py | 15 ++++----------- synapse/replication/tcp/protocol.py | 2 +- synapse/util/metrics.py | 14 +++++++++----- tests/logging/test_terse_json.py | 6 ++++-- tests/test_federation.py | 2 +- tests/util/caches/test_descriptors.py | 6 ++---- 8 files changed, 26 insertions(+), 34 deletions(-) create mode 100644 changelog.d/9829.bugfix diff --git a/changelog.d/9829.bugfix b/changelog.d/9829.bugfix new file mode 100644 index 000000000000..d0c1e49fd8b8 --- /dev/null +++ b/changelog.d/9829.bugfix @@ -0,0 +1 @@ +Fix the log lines of nested logging contexts. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index e78343f5545b..dbd7d3a33a6e 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -277,7 +277,7 @@ class LoggingContext: def __init__( self, - name: Optional[str] = None, + name: str, parent_context: "Optional[LoggingContext]" = None, request: Optional[ContextRequest] = None, ) -> None: @@ -315,9 +315,7 @@ def __init__( self.request = request def __str__(self) -> str: - if self.request: - return self.request.request_id - return "%s@%x" % (self.name, id(self)) + return self.name @classmethod def current_context(cls) -> LoggingContextOrSentinel: @@ -694,17 +692,13 @@ def nested_logging_context(suffix: str) -> LoggingContext: "Starting nested logging context from sentinel context: metrics will be lost" ) parent_context = None - prefix = "" - request = None else: assert isinstance(curr_context, LoggingContext) parent_context = curr_context - prefix = str(parent_context.name) - request = parent_context.request + prefix = str(curr_context) return LoggingContext( prefix + "-" + suffix, parent_context=parent_context, - request=request, ) @@ -895,7 +889,7 @@ def defer_to_threadpool(reactor, threadpool, f, *args, **kwargs): parent_context = curr_context def g(): - with LoggingContext(parent_context=parent_context): + with LoggingContext(str(curr_context), parent_context=parent_context): return f(*args, **kwargs) return make_deferred_yieldable(threads.deferToThreadPool(reactor, threadpool, g)) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index e8a9096c039e..78e9cfbc26ee 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -16,7 +16,7 @@ import logging import threading from functools import wraps -from typing import TYPE_CHECKING, Dict, Optional, Set, Union +from typing import TYPE_CHECKING, Dict, Optional, Set from prometheus_client.core import REGISTRY, Counter, Gauge @@ -199,7 +199,7 @@ async def run(): _background_process_start_count.labels(desc).inc() _background_process_in_flight_count.labels(desc).inc() - with BackgroundProcessLoggingContext(desc, count) as context: + with BackgroundProcessLoggingContext("%s-%s" % (desc, count)) as context: try: ctx = noop_context_manager() if bg_start_span: @@ -242,19 +242,12 @@ class BackgroundProcessLoggingContext(LoggingContext): processes. """ - __slots__ = ["_id", "_proc"] + __slots__ = ["_proc"] - def __init__(self, name: str, id: Optional[Union[int, str]] = None): + def __init__(self, name: str): super().__init__(name) - self._id = id - self._proc = _BackgroundProcess(name, self) - def __str__(self) -> str: - if self._id is not None: - return "%s-%s" % (self.name, self._id) - return "%s@%x" % (self.name, id(self)) - def start(self, rusage: "Optional[resource._RUsage]"): """Log context has started running (again).""" diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index d10d57424618..ba753318bd81 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -185,7 +185,7 @@ def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. self._logging_context = BackgroundProcessLoggingContext( - "replication-conn", self.conn_id + "replication-conn-%s" % (self.conn_id,) ) def connectionMade(self): diff --git a/synapse/util/metrics.py b/synapse/util/metrics.py index 1023c856d143..019cfa17cc93 100644 --- a/synapse/util/metrics.py +++ b/synapse/util/metrics.py @@ -105,7 +105,13 @@ class Measure: "start", ] - def __init__(self, clock, name): + def __init__(self, clock, name: str): + """ + Args: + clock: A n object with a "time()" method, which returns the current + time in seconds. + name: The name of the metric to report. + """ self.clock = clock self.name = name curr_context = current_context() @@ -118,10 +124,8 @@ def __init__(self, clock, name): else: assert isinstance(curr_context, LoggingContext) parent_context = curr_context - self._logging_context = LoggingContext( - "Measure[%s]" % (self.name,), parent_context - ) - self.start = None + self._logging_context = LoggingContext(str(curr_context), parent_context) + self.start = None # type: Optional[int] def __enter__(self) -> "Measure": if self.start is not None: diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 215fd8b0f9e3..ecf873e2ab06 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -138,7 +138,7 @@ def test_with_context(self): ] self.assertCountEqual(log.keys(), expected_log_keys) self.assertEqual(log["log"], "Hello there, wally!") - self.assertTrue(log["request"].startswith("name@")) + self.assertEqual(log["request"], "name") def test_with_request_context(self): """ @@ -165,7 +165,9 @@ def test_with_request_context(self): # Also set the requester to ensure the processing works. request.requester = "@foo:test" - with LoggingContext(parent_context=request.logcontext): + with LoggingContext( + request.get_request_id(), parent_context=request.logcontext + ): logger.info("Hello there, %s!", "wally") log = self.get_log_line() diff --git a/tests/test_federation.py b/tests/test_federation.py index 8928597d17db..382cedbd5db9 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -134,7 +134,7 @@ async def post_json(destination, path, data, headers=None, timeout=0): } ) - with LoggingContext(): + with LoggingContext("test-context"): failure = self.get_failure( self.handler.on_receive_pdu( "test.serv", lying_event, sent_to_us_directly=True diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 2d1f9360e0b0..8c082e743260 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -231,8 +231,7 @@ def inner_fn(): @defer.inlineCallbacks def do_lookup(): - with LoggingContext() as c1: - c1.name = "c1" + with LoggingContext("c1") as c1: r = yield obj.fn(1) self.assertEqual(current_context(), c1) return r @@ -274,8 +273,7 @@ def inner_fn(): @defer.inlineCallbacks def do_lookup(): - with LoggingContext() as c1: - c1.name = "c1" + with LoggingContext("c1") as c1: try: d = obj.fn(1) self.assertEqual( From 0a88ec0a879d9fcb6f2202b7cff3766ed5f7253b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 20 Apr 2021 14:19:35 +0100 Subject: [PATCH 049/222] Add Application Service registration type requirement + py35, pg95 deprecation notices to v1.32.0 upgrade notes (#9849) Fixes https://github.com/matrix-org/synapse/issues/9846. Adds important removal information from the top of https://github.com/matrix-org/synapse/releases/tag/v1.32.0rc1 into UPGRADE.rst. --- UPGRADE.rst | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/UPGRADE.rst b/UPGRADE.rst index 665821d4ef04..7a9b86905568 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -88,6 +88,14 @@ for example: Upgrading to v1.32.0 ==================== +Dropping support for old Python, Postgres and SQLite versions +------------------------------------------------------------- + +In line with our `deprecation policy `_, +we've dropped support for Python 3.5 and PostgreSQL 9.5, as they are no longer supported upstream. + +This release of Synapse requires Python 3.6+ and PostgresSQL 9.6+ or SQLite 3.22+. + Removal of old List Accounts Admin API -------------------------------------- @@ -98,6 +106,16 @@ has been available since Synapse 1.7.0 (2019-12-13), and is accessible under ``G The deprecation of the old endpoint was announced with Synapse 1.28.0 (released on 2021-02-25). +Application Services must use type ``m.login.application_service`` when registering users +----------------------------------------------------------------------------------------- + +In compliance with the +`Application Service spec `_, +Application Services are now required to use the ``m.login.application_service`` type when registering users via the +``/_matrix/client/r0/register`` endpoint. This behaviour was deprecated in Synapse v1.30.0. + +Please ensure your Application Services are up to date. + Upgrading to v1.29.0 ==================== From e031c7e0cca2422aa2c5d3704adc66723d8094e7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 20 Apr 2021 14:31:27 +0100 Subject: [PATCH 050/222] 1.32.0 --- CHANGES.md | 13 +++++++++++-- changelog.d/9829.bugfix | 1 - debian/changelog | 8 ++++++-- synapse/__init__.py | 2 +- 4 files changed, 18 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/9829.bugfix diff --git a/CHANGES.md b/CHANGES.md index 41908f84be0b..4d48a321c6bd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,5 @@ -Synapse 1.32.0rc1 (2021-04-13) -============================== +Synapse 1.32.0 (2021-04-20) +=========================== **Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+. @@ -7,6 +7,15 @@ This release removes the deprecated `GET /_synapse/admin/v1/users/` adm This release requires Application Services to use type `m.login.application_services` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. +Bugfixes +-------- + +- Fix the log lines of nested logging contexts. Broke in 1.32.0rc1. ([\#9829](https://github.com/matrix-org/synapse/issues/9829)) + + +Synapse 1.32.0rc1 (2021-04-13) +============================== + Features -------- diff --git a/changelog.d/9829.bugfix b/changelog.d/9829.bugfix deleted file mode 100644 index d0c1e49fd8b8..000000000000 --- a/changelog.d/9829.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the log lines of nested logging contexts. diff --git a/debian/changelog b/debian/changelog index 5d526316fcf2..83be4497ec39 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,12 @@ -matrix-synapse-py3 (1.31.0+nmu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.32.0) stable; urgency=medium + [ Dan Callahan ] * Skip tests when DEB_BUILD_OPTIONS contains "nocheck". - -- Dan Callahan Mon, 12 Apr 2021 13:07:36 +0000 + [ Synapse Packaging team ] + * New synapse release 1.32.0. + + -- Synapse Packaging team Tue, 20 Apr 2021 14:28:39 +0100 matrix-synapse-py3 (1.31.0) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index 125a73d37813..79232c4de1c0 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.32.0rc1" +__version__ = "1.32.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 438a8594cb5a74478da36fe33ba98d86e2ca00fc Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 20 Apr 2021 14:47:17 +0100 Subject: [PATCH 051/222] Update v1.32.0 changelog. It's m.login.application_service, not plural --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 4d48a321c6bd..482863c0e891 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,7 +5,7 @@ Synapse 1.32.0 (2021-04-20) This release removes the deprecated `GET /_synapse/admin/v1/users/` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities. -This release requires Application Services to use type `m.login.application_services` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. +This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. Bugfixes -------- From 913f790bb2ef7f1186e03afea85323dfa4da6df8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 20 Apr 2021 15:33:56 +0100 Subject: [PATCH 052/222] Add note about expired Debian gpg signing keys to CHANGES.md --- CHANGES.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 482863c0e891..b0fbc5a45279 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,12 @@ This release removes the deprecated `GET /_synapse/admin/v1/users/` adm This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. +If you are using the `packages.matrix.org` Debian repository for Synapse packages, note that our gpg signing keys have rotated and the old pair have expired. You can pull the latest keys with: + +``` +sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg +``` + Bugfixes -------- From 05fa06834df10966be3f727fa0797424b12660f3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 20 Apr 2021 15:50:54 +0100 Subject: [PATCH 053/222] Further tweaking on gpg signing key notice --- CHANGES.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index b0fbc5a45279..170d1e447db1 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,9 +7,12 @@ This release removes the deprecated `GET /_synapse/admin/v1/users/` adm This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. -If you are using the `packages.matrix.org` Debian repository for Synapse packages, note that our gpg signing keys have rotated and the old pair have expired. You can pull the latest keys with: +If you are using the `packages.matrix.org` Debian repository for Synapse packages, +note that we have recently updated the expiry date on the gpg signing key. If you see an +error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you +will need to get a fresh copy of the keys. You can do so with: -``` +```sh sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg ``` From b8c5f6fddbc8c3203c2841500767ef2fc9dc6ff6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 20 Apr 2021 17:11:36 +0100 Subject: [PATCH 054/222] Mention Prometheus metrics regression in v1.32.0 --- CHANGES.md | 6 ++++++ UPGRADE.rst | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 170d1e447db1..7713328f12df 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,12 @@ Synapse 1.32.0 (2021-04-20) =========================== +**Note:** This release introduces [a regression](https://githubcom/matrix-org/synapse/issues/9853) +that can overwhelm connected Prometheus instances. This issue was not present in +Synapse v1.32.0rc1. It is recommended not to update to this release. If you have +upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be +resolved in a subsequent release version shortly. + **Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+. This release removes the deprecated `GET /_synapse/admin/v1/users/` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities. diff --git a/UPGRADE.rst b/UPGRADE.rst index 7a9b86905568..c8dce6222714 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -88,6 +88,15 @@ for example: Upgrading to v1.32.0 ==================== +Regression causing connected Prometheus instances to become overwhelmed +----------------------------------------------------------------------- + +This release introduces `a regression `_ +that can overwhelm connected Prometheus instances. This issue was not present in +Synapse v1.32.0rc1. It is recommended not to update to this release. If you have +upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be +resolved in a subsequent release version shortly. + Dropping support for old Python, Postgres and SQLite versions ------------------------------------------------------------- From 683d6f75af0e941e9ab3bc0a985aa6ed5cc7a238 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 20 Apr 2021 14:55:20 -0400 Subject: [PATCH 055/222] Rename handler and config modules which end in handler/config. (#9816) --- changelog.d/9816.misc | 1 + docs/sample_config.yaml | 2 +- docs/sso_mapping_providers.md | 4 ++-- synapse/config/_base.pyi | 20 +++++++++---------- .../config/{consent_config.py => consent.py} | 0 synapse/config/homeserver.py | 10 +++++----- synapse/config/{jwt_config.py => jwt.py} | 0 synapse/config/{oidc_config.py => oidc.py} | 7 ++++++- synapse/config/{saml2_config.py => saml2.py} | 7 ++++++- ...er_notices_config.py => server_notices.py} | 0 synapse/handlers/{cas_handler.py => cas.py} | 0 synapse/handlers/{oidc_handler.py => oidc.py} | 5 +---- synapse/handlers/{saml_handler.py => saml.py} | 0 synapse/rest/client/v2_alpha/register.py | 2 +- synapse/server.py | 10 +++++----- tests/handlers/test_cas.py | 2 +- tests/handlers/test_oidc.py | 8 ++++---- 17 files changed, 43 insertions(+), 35 deletions(-) create mode 100644 changelog.d/9816.misc rename synapse/config/{consent_config.py => consent.py} (100%) rename synapse/config/{jwt_config.py => jwt.py} (100%) rename synapse/config/{oidc_config.py => oidc.py} (98%) rename synapse/config/{saml2_config.py => saml2.py} (97%) rename synapse/config/{server_notices_config.py => server_notices.py} (100%) rename synapse/handlers/{cas_handler.py => cas.py} (100%) rename synapse/handlers/{oidc_handler.py => oidc.py} (99%) rename synapse/handlers/{saml_handler.py => saml.py} (100%) diff --git a/changelog.d/9816.misc b/changelog.d/9816.misc new file mode 100644 index 000000000000..d0981225006e --- /dev/null +++ b/changelog.d/9816.misc @@ -0,0 +1 @@ +Rename some handlers and config modules to not duplicate the top-level module. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d260d762590a..e0350279ad39 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1900,7 +1900,7 @@ saml2_config: # sub-properties: # # module: The class name of a custom mapping module. Default is -# 'synapse.handlers.oidc_handler.JinjaOidcMappingProvider'. +# 'synapse.handlers.oidc.JinjaOidcMappingProvider'. # See https://github.com/matrix-org/synapse/blob/master/docs/sso_mapping_providers.md#openid-mapping-providers # for information on implementing a custom mapping provider. # diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index e1d6ede7bac3..50020d1a4ad1 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -106,7 +106,7 @@ A custom mapping provider must specify the following methods: Synapse has a built-in OpenID mapping provider if a custom provider isn't specified in the config. It is located at -[`synapse.handlers.oidc_handler.JinjaOidcMappingProvider`](../synapse/handlers/oidc_handler.py). +[`synapse.handlers.oidc.JinjaOidcMappingProvider`](../synapse/handlers/oidc.py). ## SAML Mapping Providers @@ -190,4 +190,4 @@ A custom mapping provider must specify the following methods: Synapse has a built-in SAML mapping provider if a custom provider isn't specified in the config. It is located at -[`synapse.handlers.saml_handler.DefaultSamlMappingProvider`](../synapse/handlers/saml_handler.py). +[`synapse.handlers.saml.DefaultSamlMappingProvider`](../synapse/handlers/saml.py). diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index ddec356a07b3..ff9abbc23212 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -7,16 +7,16 @@ from synapse.config import ( auth, captcha, cas, - consent_config, + consent, database, emailconfig, experimental, groups, - jwt_config, + jwt, key, logger, metrics, - oidc_config, + oidc, password_auth_providers, push, ratelimiting, @@ -24,9 +24,9 @@ from synapse.config import ( registration, repository, room_directory, - saml2_config, + saml2, server, - server_notices_config, + server_notices, spam_checker, sso, stats, @@ -65,11 +65,11 @@ class RootConfig: api: api.ApiConfig appservice: appservice.AppServiceConfig key: key.KeyConfig - saml2: saml2_config.SAML2Config + saml2: saml2.SAML2Config cas: cas.CasConfig sso: sso.SSOConfig - oidc: oidc_config.OIDCConfig - jwt: jwt_config.JWTConfig + oidc: oidc.OIDCConfig + jwt: jwt.JWTConfig auth: auth.AuthConfig email: emailconfig.EmailConfig worker: workers.WorkerConfig @@ -78,9 +78,9 @@ class RootConfig: spamchecker: spam_checker.SpamCheckerConfig groups: groups.GroupsConfig userdirectory: user_directory.UserDirectoryConfig - consent: consent_config.ConsentConfig + consent: consent.ConsentConfig stats: stats.StatsConfig - servernotices: server_notices_config.ServerNoticesConfig + servernotices: server_notices.ServerNoticesConfig roomdirectory: room_directory.RoomDirectoryConfig thirdpartyrules: third_party_event_rules.ThirdPartyRulesConfig tracer: tracer.TracerConfig diff --git a/synapse/config/consent_config.py b/synapse/config/consent.py similarity index 100% rename from synapse/config/consent_config.py rename to synapse/config/consent.py diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 58e3bcd5111d..c23b66c88c76 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -20,17 +20,17 @@ from .cache import CacheConfig from .captcha import CaptchaConfig from .cas import CasConfig -from .consent_config import ConsentConfig +from .consent import ConsentConfig from .database import DatabaseConfig from .emailconfig import EmailConfig from .experimental import ExperimentalConfig from .federation import FederationConfig from .groups import GroupsConfig -from .jwt_config import JWTConfig +from .jwt import JWTConfig from .key import KeyConfig from .logger import LoggingConfig from .metrics import MetricsConfig -from .oidc_config import OIDCConfig +from .oidc import OIDCConfig from .password_auth_providers import PasswordAuthProviderConfig from .push import PushConfig from .ratelimiting import RatelimitConfig @@ -39,9 +39,9 @@ from .repository import ContentRepositoryConfig from .room import RoomConfig from .room_directory import RoomDirectoryConfig -from .saml2_config import SAML2Config +from .saml2 import SAML2Config from .server import ServerConfig -from .server_notices_config import ServerNoticesConfig +from .server_notices import ServerNoticesConfig from .spam_checker import SpamCheckerConfig from .sso import SSOConfig from .stats import StatsConfig diff --git a/synapse/config/jwt_config.py b/synapse/config/jwt.py similarity index 100% rename from synapse/config/jwt_config.py rename to synapse/config/jwt.py diff --git a/synapse/config/oidc_config.py b/synapse/config/oidc.py similarity index 98% rename from synapse/config/oidc_config.py rename to synapse/config/oidc.py index 5fb94376fdb0..72402eb81d98 100644 --- a/synapse/config/oidc_config.py +++ b/synapse/config/oidc.py @@ -27,7 +27,10 @@ from ._base import Config, ConfigError, read_file -DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider" +DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.oidc.JinjaOidcMappingProvider" +# The module that JinjaOidcMappingProvider is in was renamed, we want to +# transparently handle both the same. +LEGACY_USER_MAPPING_PROVIDER = "synapse.handlers.oidc_handler.JinjaOidcMappingProvider" class OIDCConfig(Config): @@ -403,6 +406,8 @@ def _parse_oidc_config_dict( """ ump_config = oidc_config.get("user_mapping_provider", {}) ump_config.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) + if ump_config.get("module") == LEGACY_USER_MAPPING_PROVIDER: + ump_config["module"] = DEFAULT_USER_MAPPING_PROVIDER ump_config.setdefault("config", {}) ( diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2.py similarity index 97% rename from synapse/config/saml2_config.py rename to synapse/config/saml2.py index 55a7838b1004..3d1218c8d14b 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2.py @@ -25,7 +25,10 @@ logger = logging.getLogger(__name__) -DEFAULT_USER_MAPPING_PROVIDER = ( +DEFAULT_USER_MAPPING_PROVIDER = "synapse.handlers.saml.DefaultSamlMappingProvider" +# The module that DefaultSamlMappingProvider is in was renamed, we want to +# transparently handle both the same. +LEGACY_USER_MAPPING_PROVIDER = ( "synapse.handlers.saml_handler.DefaultSamlMappingProvider" ) @@ -97,6 +100,8 @@ def read_config(self, config, **kwargs): # Use the default user mapping provider if not set ump_dict.setdefault("module", DEFAULT_USER_MAPPING_PROVIDER) + if ump_dict.get("module") == LEGACY_USER_MAPPING_PROVIDER: + ump_dict["module"] = DEFAULT_USER_MAPPING_PROVIDER # Ensure a config is present ump_dict["config"] = ump_dict.get("config") or {} diff --git a/synapse/config/server_notices_config.py b/synapse/config/server_notices.py similarity index 100% rename from synapse/config/server_notices_config.py rename to synapse/config/server_notices.py diff --git a/synapse/handlers/cas_handler.py b/synapse/handlers/cas.py similarity index 100% rename from synapse/handlers/cas_handler.py rename to synapse/handlers/cas.py diff --git a/synapse/handlers/oidc_handler.py b/synapse/handlers/oidc.py similarity index 99% rename from synapse/handlers/oidc_handler.py rename to synapse/handlers/oidc.py index b156196a70b7..45514be50fef 100644 --- a/synapse/handlers/oidc_handler.py +++ b/synapse/handlers/oidc.py @@ -37,10 +37,7 @@ from twisted.web.http_headers import Headers from synapse.config import ConfigError -from synapse.config.oidc_config import ( - OidcProviderClientSecretJwtKey, - OidcProviderConfig, -) +from synapse.config.oidc import OidcProviderClientSecretJwtKey, OidcProviderConfig from synapse.handlers.sso import MappingException, UserAttributes from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable diff --git a/synapse/handlers/saml_handler.py b/synapse/handlers/saml.py similarity index 100% rename from synapse/handlers/saml_handler.py rename to synapse/handlers/saml.py diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index b26aad7b3419..c5a6800b8a42 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -30,7 +30,7 @@ ) from synapse.config import ConfigError from synapse.config.captcha import CaptchaConfig -from synapse.config.consent_config import ConsentConfig +from synapse.config.consent import ConsentConfig from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.registration import RegistrationConfig diff --git a/synapse/server.py b/synapse/server.py index 42d2fad8e869..59ae91b50338 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -70,7 +70,7 @@ from synapse.handlers.admin import AdminHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, MacaroonGenerator -from synapse.handlers.cas_handler import CasHandler +from synapse.handlers.cas import CasHandler from synapse.handlers.deactivate_account import DeactivateAccountHandler from synapse.handlers.device import DeviceHandler, DeviceWorkerHandler from synapse.handlers.devicemessage import DeviceMessageHandler @@ -145,8 +145,8 @@ if TYPE_CHECKING: from txredisapi import RedisProtocol - from synapse.handlers.oidc_handler import OidcHandler - from synapse.handlers.saml_handler import SamlHandler + from synapse.handlers.oidc import OidcHandler + from synapse.handlers.saml import SamlHandler T = TypeVar("T", bound=Callable[..., Any]) @@ -696,13 +696,13 @@ def get_cas_handler(self) -> CasHandler: @cache_in_self def get_saml_handler(self) -> "SamlHandler": - from synapse.handlers.saml_handler import SamlHandler + from synapse.handlers.saml import SamlHandler return SamlHandler(self) @cache_in_self def get_oidc_handler(self) -> "OidcHandler": - from synapse.handlers.oidc_handler import OidcHandler + from synapse.handlers.oidc import OidcHandler return OidcHandler(self) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 0444b2679889..b625995d1253 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -13,7 +13,7 @@ # limitations under the License. from unittest.mock import Mock -from synapse.handlers.cas_handler import CasResponse +from synapse.handlers.cas import CasResponse from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 34d2fc1dfb11..a25c89bd5bd3 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -499,7 +499,7 @@ def test_callback(self): self.assertRenderedError("fetch_error") # Handle code exchange failure - from synapse.handlers.oidc_handler import OidcError + from synapse.handlers.oidc import OidcError self.provider._exchange_code = simple_async_mock( raises=OidcError("invalid_request") @@ -583,7 +583,7 @@ def test_exchange_code(self): body=b'{"error": "foo", "error_description": "bar"}', ) ) - from synapse.handlers.oidc_handler import OidcError + from synapse.handlers.oidc import OidcError exc = self.get_failure(self.provider._exchange_code(code), OidcError) self.assertEqual(exc.value.error, "foo") @@ -1126,7 +1126,7 @@ def _generate_oidc_session_token( client_redirect_url: str, ui_auth_session_id: str = "", ) -> str: - from synapse.handlers.oidc_handler import OidcSessionData + from synapse.handlers.oidc import OidcSessionData return self.handler._token_generator.generate_oidc_session_token( state=state, @@ -1152,7 +1152,7 @@ async def _make_callback_with_userinfo( userinfo: the OIDC userinfo dict client_redirect_url: the URL to redirect to on success. """ - from synapse.handlers.oidc_handler import OidcSessionData + from synapse.handlers.oidc import OidcSessionData handler = hs.get_oidc_handler() provider = handler._providers["oidc"] From 5d281c10dd3d4d1f96635e92d803a74e3880d6b7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 21 Apr 2021 10:03:31 +0100 Subject: [PATCH 056/222] Stop BackgroundProcessLoggingContext making new prometheus timeseries (#9854) This undoes part of b076bc276e881b262048307b6a226061d96c4a8d. --- changelog.d/9854.bugfix | 1 + synapse/metrics/background_process_metrics.py | 20 +++++++++++++++---- synapse/replication/tcp/protocol.py | 2 +- 3 files changed, 18 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9854.bugfix diff --git a/changelog.d/9854.bugfix b/changelog.d/9854.bugfix new file mode 100644 index 000000000000..e39a3f991506 --- /dev/null +++ b/changelog.d/9854.bugfix @@ -0,0 +1 @@ +Fix a regression in Synapse 1.32.0 which caused Synapse to report large numbers of Prometheus time series, potentially overwhelming Prometheus instances. diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 78e9cfbc26ee..3f621539f367 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -16,7 +16,7 @@ import logging import threading from functools import wraps -from typing import TYPE_CHECKING, Dict, Optional, Set +from typing import TYPE_CHECKING, Dict, Optional, Set, Union from prometheus_client.core import REGISTRY, Counter, Gauge @@ -199,7 +199,7 @@ async def run(): _background_process_start_count.labels(desc).inc() _background_process_in_flight_count.labels(desc).inc() - with BackgroundProcessLoggingContext("%s-%s" % (desc, count)) as context: + with BackgroundProcessLoggingContext(desc, count) as context: try: ctx = noop_context_manager() if bg_start_span: @@ -244,8 +244,20 @@ class BackgroundProcessLoggingContext(LoggingContext): __slots__ = ["_proc"] - def __init__(self, name: str): - super().__init__(name) + def __init__(self, name: str, instance_id: Optional[Union[int, str]] = None): + """ + + Args: + name: The name of the background process. Each distinct `name` gets a + separate prometheus time series. + + instance_id: an identifer to add to `name` to distinguish this instance of + the named background process in the logs. If this is `None`, one is + made up based on id(self). + """ + if instance_id is None: + instance_id = id(self) + super().__init__("%s-%s" % (name, instance_id)) self._proc = _BackgroundProcess(name, self) def start(self, rusage: "Optional[resource._RUsage]"): diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index ba753318bd81..d10d57424618 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -185,7 +185,7 @@ def __init__(self, clock: Clock, handler: "ReplicationCommandHandler"): # a logcontext which we use for processing incoming commands. We declare it as a # background process so that the CPU stats get reported to prometheus. self._logging_context = BackgroundProcessLoggingContext( - "replication-conn-%s" % (self.conn_id,) + "replication-conn", self.conn_id ) def connectionMade(self): From 30c94862b4fbaa782e47eac37a673a53feae2bb1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 20 Apr 2021 17:11:36 +0100 Subject: [PATCH 057/222] Mention Prometheus metrics regression in v1.32.0 --- CHANGES.md | 6 ++++++ UPGRADE.rst | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 170d1e447db1..7713328f12df 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,12 @@ Synapse 1.32.0 (2021-04-20) =========================== +**Note:** This release introduces [a regression](https://githubcom/matrix-org/synapse/issues/9853) +that can overwhelm connected Prometheus instances. This issue was not present in +Synapse v1.32.0rc1. It is recommended not to update to this release. If you have +upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be +resolved in a subsequent release version shortly. + **Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+. This release removes the deprecated `GET /_synapse/admin/v1/users/` admin API. Please use the [v2 API](https://github.com/matrix-org/synapse/blob/develop/docs/admin_api/user_admin_api.rst#query-user-account) instead, which has improved capabilities. diff --git a/UPGRADE.rst b/UPGRADE.rst index 7a9b86905568..c8dce6222714 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -88,6 +88,15 @@ for example: Upgrading to v1.32.0 ==================== +Regression causing connected Prometheus instances to become overwhelmed +----------------------------------------------------------------------- + +This release introduces `a regression `_ +that can overwhelm connected Prometheus instances. This issue was not present in +Synapse v1.32.0rc1. It is recommended not to update to this release. If you have +upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be +resolved in a subsequent release version shortly. + Dropping support for old Python, Postgres and SQLite versions ------------------------------------------------------------- From a745531c10da75079fcac152935bc2ff505eec14 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 21 Apr 2021 14:01:12 +0100 Subject: [PATCH 058/222] 1.32.1 --- CHANGES.md | 9 +++++++++ changelog.d/9854.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/9854.bugfix diff --git a/CHANGES.md b/CHANGES.md index 7713328f12df..65819ee1e1cd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.32.1 (2021-04-21) +=========================== + +Bugfixes +-------- + +- Fix a regression in Synapse 1.32.0 which caused Synapse to report large numbers of Prometheus time series, potentially overwhelming Prometheus instances. ([\#9854](https://github.com/matrix-org/synapse/issues/9854)) + + Synapse 1.32.0 (2021-04-20) =========================== diff --git a/changelog.d/9854.bugfix b/changelog.d/9854.bugfix deleted file mode 100644 index e39a3f991506..000000000000 --- a/changelog.d/9854.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression in Synapse 1.32.0 which caused Synapse to report large numbers of Prometheus time series, potentially overwhelming Prometheus instances. diff --git a/debian/changelog b/debian/changelog index 83be4497ec39..b8cf2cac5896 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.32.1) stable; urgency=medium + + * New synapse release 1.32.1. + + -- Synapse Packaging team Wed, 21 Apr 2021 14:00:55 +0100 + matrix-synapse-py3 (1.32.0) stable; urgency=medium [ Dan Callahan ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 79232c4de1c0..a0332d602d4b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.32.0" +__version__ = "1.32.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 026a66f2b37998a6e62d07267e6a114f87dc9b84 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 21 Apr 2021 14:04:44 +0100 Subject: [PATCH 059/222] Fix typo in link to regression in 1.32.0 upgrade notes --- UPGRADE.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/UPGRADE.rst b/UPGRADE.rst index c8dce6222714..0be6e27cc0b4 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -91,7 +91,7 @@ Upgrading to v1.32.0 Regression causing connected Prometheus instances to become overwhelmed ----------------------------------------------------------------------- -This release introduces `a regression `_ +This release introduces `a regression `_ that can overwhelm connected Prometheus instances. This issue was not present in Synapse v1.32.0rc1. It is recommended not to update to this release. If you have upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be From 98a1b84631e5fbc227f211821d9bce318dc921d8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 21 Apr 2021 14:07:51 +0100 Subject: [PATCH 060/222] Add link to fixing prometheus to 1.32.0 upgrade notes; 1.32.1 has a fix --- CHANGES.md | 2 ++ UPGRADE.rst | 6 ++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 65819ee1e1cd..d06bcd9ae85e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.32.1 (2021-04-21) =========================== +This release fixes the regression introduced in Synapse + Bugfixes -------- diff --git a/UPGRADE.rst b/UPGRADE.rst index 0be6e27cc0b4..d4651ec2d3d1 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -94,8 +94,10 @@ Regression causing connected Prometheus instances to become overwhelmed This release introduces `a regression `_ that can overwhelm connected Prometheus instances. This issue was not present in Synapse v1.32.0rc1. It is recommended not to update to this release. If you have -upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be -resolved in a subsequent release version shortly. +upgraded to v1.32.0 already, please upgrade to v1.31.1 which contains a fix. +If you started Synapse on v1.32.0, you may need to remove excess writeahead logs +in order for Prometheus to recover; instructions for doing so are +`here `_. Dropping support for old Python, Postgres and SQLite versions ------------------------------------------------------------- From acb8c81041aa66e03b22150e457cfd2c7a44d436 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 21 Apr 2021 14:24:16 +0100 Subject: [PATCH 061/222] Add regression notes to CHANGES.md; fix link in 1.32.0 changelog --- CHANGES.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d06bcd9ae85e..cc66f2f01de9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,11 @@ Synapse 1.32.1 (2021-04-21) =========================== -This release fixes the regression introduced in Synapse +This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853) +in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. If you +ran Synapse 1.32.0 with Prometheus metrics, first upgrade to Synapse 1.32.1 and follow +[these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183) +to clean up any excess writeahead logs. Bugfixes -------- @@ -12,7 +16,7 @@ Bugfixes Synapse 1.32.0 (2021-04-20) =========================== -**Note:** This release introduces [a regression](https://githubcom/matrix-org/synapse/issues/9853) +**Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853) that can overwhelm connected Prometheus instances. This issue was not present in Synapse v1.32.0rc1. It is recommended not to update to this release. If you have upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be From bdb4c20dc1ed4b5c88ac2889d1a02a32a9dd8a1f Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 21 Apr 2021 14:44:04 +0100 Subject: [PATCH 062/222] Clarify 1.32.0/1 changelog and upgrade notes --- CHANGES.md | 4 +--- UPGRADE.rst | 11 ++++++----- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index cc66f2f01de9..7188f94445dc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -18,9 +18,7 @@ Synapse 1.32.0 (2021-04-20) **Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853) that can overwhelm connected Prometheus instances. This issue was not present in -Synapse v1.32.0rc1. It is recommended not to update to this release. If you have -upgraded to v1.32.0 already, please downgrade to v1.31.0. This issue will be -resolved in a subsequent release version shortly. +1.32.0rc1, and is fixed in 1.32.1. See the changelog for 1.32.1 above for more information. **Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+. diff --git a/UPGRADE.rst b/UPGRADE.rst index d4651ec2d3d1..76d2ee394fc0 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -92,11 +92,12 @@ Regression causing connected Prometheus instances to become overwhelmed ----------------------------------------------------------------------- This release introduces `a regression `_ -that can overwhelm connected Prometheus instances. This issue was not present in -Synapse v1.32.0rc1. It is recommended not to update to this release. If you have -upgraded to v1.32.0 already, please upgrade to v1.31.1 which contains a fix. -If you started Synapse on v1.32.0, you may need to remove excess writeahead logs -in order for Prometheus to recover; instructions for doing so are +that can overwhelm connected Prometheus instances. This issue is not present in +Synapse v1.32.0rc1, and is fixed in Synapse v1.32.1. + +If you have been affected, please first upgrade to a more recent Synapse version. +You then may need to remove excess writeahead logs in order for Prometheus to recover. +Instructions for doing so are provided `here `_. Dropping support for old Python, Postgres and SQLite versions From d9bd62f9d1a6238b3f485caee07f9fd399b27134 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 21 Apr 2021 16:39:34 +0100 Subject: [PATCH 063/222] Make LoggingContext's name optional (#9857) Fixes https://github.com/matrix-org/synapse-s3-storage-provider/issues/55 --- changelog.d/9857.bugfix | 1 + synapse/logging/context.py | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9857.bugfix diff --git a/changelog.d/9857.bugfix b/changelog.d/9857.bugfix new file mode 100644 index 000000000000..7eed41594df7 --- /dev/null +++ b/changelog.d/9857.bugfix @@ -0,0 +1 @@ +Fix a regression in Synapse v1.32.1 which caused `LoggingContext` errors in plugins. diff --git a/synapse/logging/context.py b/synapse/logging/context.py index dbd7d3a33a6e..7fc11a9ac2f8 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -258,7 +258,8 @@ class LoggingContext: child to the parent Args: - name (str): Name for the context for debugging. + name: Name for the context for logging. If this is omitted, it is + inherited from the parent context. parent_context (LoggingContext|None): The parent of the new context """ @@ -277,12 +278,11 @@ class LoggingContext: def __init__( self, - name: str, + name: Optional[str] = None, parent_context: "Optional[LoggingContext]" = None, request: Optional[ContextRequest] = None, ) -> None: self.previous_context = current_context() - self.name = name # track the resources used by this context so far self._resource_usage = ContextResourceUsage() @@ -314,6 +314,15 @@ def __init__( # the request param overrides the request from the parent context self.request = request + # if we don't have a `name`, but do have a parent context, use its name. + if self.parent_context and name is None: + name = str(self.parent_context) + if name is None: + raise ValueError( + "LoggingContext must be given either a name or a parent context" + ) + self.name = name + def __str__(self) -> str: return self.name From 0c23aa393cf5f26a9a49267d113767ffcf82d58f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 21 Apr 2021 18:16:58 +0100 Subject: [PATCH 064/222] Note LoggingContext signature change incompatibility in 1.32.0 (#9859) 1.32.0 also introduced an incompatibility with Synapse modules that make use of `synapse.logging.context.LoggingContext`, such as [synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider). This PR adds a note to the 1.32.0 changelog and upgrade notes about it. --- CHANGES.md | 17 ++++++++++++----- UPGRADE.rst | 8 ++++---- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7188f94445dc..a1349252cbaa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,10 +2,10 @@ Synapse 1.32.1 (2021-04-21) =========================== This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853) -in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. If you -ran Synapse 1.32.0 with Prometheus metrics, first upgrade to Synapse 1.32.1 and follow -[these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183) -to clean up any excess writeahead logs. +in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. + +However, as this release is still subject to the `LoggingContext` change in 1.32.0, +it is recommended to remain on or downgrade to 1.31.0. Bugfixes -------- @@ -18,7 +18,14 @@ Synapse 1.32.0 (2021-04-20) **Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853) that can overwhelm connected Prometheus instances. This issue was not present in -1.32.0rc1, and is fixed in 1.32.1. See the changelog for 1.32.1 above for more information. +1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and +follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183) +to clean up any excess writeahead logs. + +**Note:** This release also mistakenly included a change that may affected Synapse +modules that import `synapse.logging.context.LoggingContext`, such as +[synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider). +This will be fixed in a later Synapse version. **Note:** This release requires Python 3.6+ and Postgres 9.6+ or SQLite 3.22+. diff --git a/UPGRADE.rst b/UPGRADE.rst index 76d2ee394fc0..6af35bc38f43 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -93,11 +93,11 @@ Regression causing connected Prometheus instances to become overwhelmed This release introduces `a regression `_ that can overwhelm connected Prometheus instances. This issue is not present in -Synapse v1.32.0rc1, and is fixed in Synapse v1.32.1. +Synapse v1.32.0rc1. -If you have been affected, please first upgrade to a more recent Synapse version. -You then may need to remove excess writeahead logs in order for Prometheus to recover. -Instructions for doing so are provided +If you have been affected, please downgrade to 1.31.0. You then may need to +remove excess writeahead logs in order for Prometheus to recover. Instructions +for doing so are provided `here `_. Dropping support for old Python, Postgres and SQLite versions From 55159c48e31129c87b55d15d203df946ca33f884 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 21 Apr 2021 18:45:39 +0100 Subject: [PATCH 065/222] 1.32.2 --- CHANGES.md | 11 +++++++++++ changelog.d/9857.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 18 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/9857.bugfix diff --git a/CHANGES.md b/CHANGES.md index a1349252cbaa..f194f4db30e4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.32.2 (2021-04-21) +=========================== + +This release includes fixes for the two regressions introduced in 1.32.0. + +Bugfixes +-------- + +- Fix a regression in Synapse v1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857)) + + Synapse 1.32.1 (2021-04-21) =========================== diff --git a/changelog.d/9857.bugfix b/changelog.d/9857.bugfix deleted file mode 100644 index 7eed41594df7..000000000000 --- a/changelog.d/9857.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression in Synapse v1.32.1 which caused `LoggingContext` errors in plugins. diff --git a/debian/changelog b/debian/changelog index b8cf2cac5896..9ebfc3c3f1dc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.32.2) stable; urgency=medium + + * New synapse release 1.32.2. + + -- Synapse Packaging team Wed, 21 Apr 2021 18:43:52 +0100 + matrix-synapse-py3 (1.32.1) stable; urgency=medium * New synapse release 1.32.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index a0332d602d4b..781f5ac3a268 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -48,7 +48,7 @@ except ImportError: pass -__version__ = "1.32.1" +__version__ = "1.32.2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From ca380881b16847f61b323424aceb65548180d624 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 21 Apr 2021 18:47:31 +0100 Subject: [PATCH 066/222] Update dates in changelogs --- CHANGES.md | 2 +- debian/changelog | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f194f4db30e4..7475f7a4022b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -Synapse 1.32.2 (2021-04-21) +Synapse 1.32.2 (2021-04-22) =========================== This release includes fixes for the two regressions introduced in 1.32.0. diff --git a/debian/changelog b/debian/changelog index 9ebfc3c3f1dc..fd33bfda5c1c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -2,7 +2,7 @@ matrix-synapse-py3 (1.32.2) stable; urgency=medium * New synapse release 1.32.2. - -- Synapse Packaging team Wed, 21 Apr 2021 18:43:52 +0100 + -- Synapse Packaging team Wed, 22 Apr 2021 12:43:52 +0100 matrix-synapse-py3 (1.32.1) stable; urgency=medium From 79e6d9e4b1d6231ebdb9b4e8d1bd5e382c37f26e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 22 Apr 2021 11:04:51 +0100 Subject: [PATCH 067/222] Note regression was in 1.32.0 and 1.32.1 --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 7475f7a4022b..8381b3112d4e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,12 +1,12 @@ Synapse 1.32.2 (2021-04-22) =========================== -This release includes fixes for the two regressions introduced in 1.32.0. +This release includes a fix for a regression introduced in 1.32.0 and 1.32.1. Bugfixes -------- -- Fix a regression in Synapse v1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857)) +- Fix a regression in Synapse 1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857)) Synapse 1.32.1 (2021-04-21) From dac44459348bd1d771a2dd6970f2a9e6532ee85f Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 22 Apr 2021 11:09:31 +0100 Subject: [PATCH 068/222] A regression can't be introduced twice --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8381b3112d4e..532b30e2323f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,12 +1,12 @@ Synapse 1.32.2 (2021-04-22) =========================== -This release includes a fix for a regression introduced in 1.32.0 and 1.32.1. +This release includes a fix for a regression introduced in 1.32.0. Bugfixes -------- -- Fix a regression in Synapse 1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857)) +- Fix a regression in Synapse 1.32.0 and 1.32.1 which caused `LoggingContext` errors in plugins. ([\#9857](https://github.com/matrix-org/synapse/issues/9857)) Synapse 1.32.1 (2021-04-21) From 294c67503300b6bfa7785a5cfa55e25c1e452574 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Apr 2021 16:43:50 +0100 Subject: [PATCH 069/222] Remove `synapse.types.Collection` (#9856) This is no longer required, since we have dropped support for Python 3.5. --- changelog.d/9856.misc | 1 + synapse/config/oidc.py | 4 ++-- synapse/events/spamcheck.py | 3 +-- synapse/federation/sender/__init__.py | 14 ++++++++++++-- synapse/handlers/appservice.py | 4 ++-- synapse/handlers/device.py | 3 +-- synapse/handlers/presence.py | 3 ++- synapse/handlers/sso.py | 3 ++- synapse/handlers/sync.py | 13 +++++++++++-- synapse/notifier.py | 9 ++------- synapse/replication/tcp/protocol.py | 3 +-- synapse/state/__init__.py | 3 ++- synapse/state/v2.py | 3 ++- synapse/storage/_base.py | 4 ++-- synapse/storage/database.py | 2 +- synapse/storage/databases/main/devices.py | 4 ++-- synapse/storage/databases/main/event_federation.py | 3 +-- synapse/storage/databases/main/events_worker.py | 13 +++++++++++-- synapse/storage/databases/main/roommember.py | 14 ++++++++++++-- synapse/storage/databases/main/search.py | 3 +-- synapse/storage/databases/main/stream.py | 4 ++-- synapse/storage/persist_events.py | 3 +-- synapse/storage/prepare_database.py | 3 +-- synapse/types.py | 14 -------------- synapse/util/caches/stream_change_cache.py | 3 +-- synapse/util/iterutils.py | 3 +-- 26 files changed, 77 insertions(+), 62 deletions(-) create mode 100644 changelog.d/9856.misc diff --git a/changelog.d/9856.misc b/changelog.d/9856.misc new file mode 100644 index 000000000000..d67e8c386a3e --- /dev/null +++ b/changelog.d/9856.misc @@ -0,0 +1 @@ +Remove redundant `synapse.types.Collection` type definition. diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index 72402eb81d98..ea0abf5aa20d 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -14,14 +14,14 @@ # limitations under the License. from collections import Counter -from typing import Iterable, List, Mapping, Optional, Tuple, Type +from typing import Collection, Iterable, List, Mapping, Optional, Tuple, Type import attr from synapse.config._util import validate_config from synapse.config.sso import SsoAttributeRequirement from synapse.python_dependencies import DependencyException, check_requirements -from synapse.types import Collection, JsonDict +from synapse.types import JsonDict from synapse.util.module_loader import load_module from synapse.util.stringutils import parse_and_validate_mxc_uri diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index c727b48c1ef5..7118d5f52d12 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -15,12 +15,11 @@ import inspect import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour -from synapse.types import Collection from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index b00a55324c02..022bbf7dad44 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -14,7 +14,17 @@ import abc import logging -from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Hashable, + Iterable, + List, + Optional, + Set, + Tuple, +) from prometheus_client import Counter @@ -31,7 +41,7 @@ events_processed_counter, ) from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import Collection, JsonDict, ReadReceipt, RoomStreamToken +from synapse.types import JsonDict, ReadReceipt, RoomStreamToken from synapse.util.metrics import Measure if TYPE_CHECKING: diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index d7bc4e23edd4..177310f0beaa 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Union from prometheus_client import Counter @@ -33,7 +33,7 @@ wrap_as_background_process, ) from synapse.storage.databases.main.directory import RoomAliasMapping -from synapse.types import Collection, JsonDict, RoomAlias, RoomStreamToken, UserID +from synapse.types import JsonDict, RoomAlias, RoomStreamToken, UserID from synapse.util.metrics import Measure if TYPE_CHECKING: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index c1d780098153..34d39e3b4412 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple from synapse.api import errors from synapse.api.constants import EventTypes @@ -28,7 +28,6 @@ from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import ( - Collection, JsonDict, StreamToken, UserID, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 598466c9bdf7..7fd28ffa541f 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -28,6 +28,7 @@ from contextlib import contextmanager from typing import ( TYPE_CHECKING, + Collection, Dict, FrozenSet, Iterable, @@ -59,7 +60,7 @@ from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream from synapse.state import StateHandler from synapse.storage.databases.main import DataStore -from synapse.types import Collection, JsonDict, UserID, get_domain_from_id +from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches.descriptors import _CacheContext, cached from synapse.util.metrics import Measure diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 8d00ffdc73bc..044ff06d8402 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -18,6 +18,7 @@ Any, Awaitable, Callable, + Collection, Dict, Iterable, List, @@ -40,7 +41,7 @@ from synapse.http import get_request_user_agent from synapse.http.server import respond_with_html, respond_with_redirect from synapse.http.site import SynapseRequest -from synapse.types import Collection, JsonDict, UserID, contains_invalid_mxid_characters +from synapse.types import JsonDict, UserID, contains_invalid_mxid_characters from synapse.util.async_helpers import Linearizer from synapse.util.stringutils import random_string diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index dc8ee8cd1749..a9a3ee05c3f3 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -14,7 +14,17 @@ # limitations under the License. import itertools import logging -from typing import TYPE_CHECKING, Any, Dict, FrozenSet, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Collection, + Dict, + FrozenSet, + List, + Optional, + Set, + Tuple, +) import attr from prometheus_client import Counter @@ -28,7 +38,6 @@ from synapse.storage.roommember import MemberSummary from synapse.storage.state import StateFilter from synapse.types import ( - Collection, JsonDict, MutableStateMap, Requester, diff --git a/synapse/notifier.py b/synapse/notifier.py index d5ab77058dd3..b9531007e27c 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -17,6 +17,7 @@ from typing import ( Awaitable, Callable, + Collection, Dict, Iterable, List, @@ -42,13 +43,7 @@ from synapse.logging.utils import log_function from synapse.metrics import LaterGauge from synapse.streams.config import PaginationConfig -from synapse.types import ( - Collection, - PersistedEventPosition, - RoomStreamToken, - StreamToken, - UserID, -) +from synapse.types import PersistedEventPosition, RoomStreamToken, StreamToken, UserID from synapse.util.async_helpers import ObservableDeferred, timeout_deferred from synapse.util.metrics import Measure from synapse.visibility import filter_events_for_client diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 6860576e7817..6e3705364f47 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -49,7 +49,7 @@ import logging import struct from inspect import isawaitable -from typing import TYPE_CHECKING, List, Optional +from typing import TYPE_CHECKING, Collection, List, Optional from prometheus_client import Counter from zope.interface import Interface, implementer @@ -76,7 +76,6 @@ ServerCommand, parse_command_from_line, ) -from synapse.types import Collection from synapse.util import Clock from synapse.util.stringutils import random_string diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index c7ee73115436..b3bd92d37c02 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -19,6 +19,7 @@ Any, Awaitable, Callable, + Collection, DefaultDict, Dict, FrozenSet, @@ -46,7 +47,7 @@ from synapse.state import v1, v2 from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.roommember import ProfileInfo -from synapse.types import Collection, StateMap +from synapse.types import StateMap from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import Measure, measure_func diff --git a/synapse/state/v2.py b/synapse/state/v2.py index 32671ddbde4f..008644cd9862 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -18,6 +18,7 @@ from typing import ( Any, Callable, + Collection, Dict, Generator, Iterable, @@ -37,7 +38,7 @@ from synapse.api.errors import AuthError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase -from synapse.types import Collection, MutableStateMap, StateMap +from synapse.types import MutableStateMap, StateMap from synapse.util import Clock logger = logging.getLogger(__name__) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 56dd3a4861d9..d472676acfe1 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -16,13 +16,13 @@ import logging import random from abc import ABCMeta -from typing import TYPE_CHECKING, Any, Iterable, Optional, Union +from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union from synapse.storage.database import LoggingTransaction # noqa: F401 from synapse.storage.database import make_in_list_sql_clause # noqa: F401 from synapse.storage.database import DatabasePool from synapse.storage.types import Connection -from synapse.types import Collection, StreamToken, get_domain_from_id +from synapse.types import StreamToken, get_domain_from_id from synapse.util import json_decoder if TYPE_CHECKING: diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 9a6d2b21f9a7..9452368bf08a 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -20,6 +20,7 @@ from typing import ( Any, Callable, + Collection, Dict, Iterable, Iterator, @@ -48,7 +49,6 @@ from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.types import Connection, Cursor -from synapse.types import Collection # python 3 does not have a maximum int value MAX_TXN_ID = 2 ** 63 - 1 diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index b20487558060..9be713399f8b 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -15,7 +15,7 @@ # limitations under the License. import abc import logging -from typing import Any, Dict, Iterable, List, Optional, Set, Tuple +from typing import Any, Collection, Dict, Iterable, List, Optional, Set, Tuple from synapse.api.errors import Codes, StoreError from synapse.logging.opentracing import ( @@ -31,7 +31,7 @@ LoggingTransaction, make_tuple_comparison_clause, ) -from synapse.types import Collection, JsonDict, get_verify_key_from_cross_signing_key +from synapse.types import JsonDict, get_verify_key_from_cross_signing_key from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import LruCache diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index 32ce70a3962e..ff81d5cd1768 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -14,7 +14,7 @@ import itertools import logging from queue import Empty, PriorityQueue -from typing import Dict, Iterable, List, Set, Tuple +from typing import Collection, Dict, Iterable, List, Set, Tuple from synapse.api.errors import StoreError from synapse.events import EventBase @@ -25,7 +25,6 @@ from synapse.storage.databases.main.signatures import SignatureWorkerStore from synapse.storage.engines import PostgresEngine from synapse.storage.types import Cursor -from synapse.types import Collection from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 64d70785b806..2c823e09cfa6 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -15,7 +15,16 @@ import logging import threading from collections import namedtuple -from typing import Container, Dict, Iterable, List, Optional, Tuple, overload +from typing import ( + Collection, + Container, + Dict, + Iterable, + List, + Optional, + Tuple, + overload, +) from constantly import NamedConstant, Names from typing_extensions import Literal @@ -45,7 +54,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.storage.util.sequence import build_sequence_generator -from synapse.types import Collection, JsonDict, get_domain_from_id +from synapse.types import JsonDict, get_domain_from_id from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index fd525dce65c3..bd8513cd43aa 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -13,7 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Set, Tuple +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + FrozenSet, + Iterable, + List, + Optional, + Set, + Tuple, +) from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase @@ -33,7 +43,7 @@ ProfileInfo, RoomsForUser, ) -from synapse.types import Collection, PersistedEventPosition, get_domain_from_id +from synapse.types import PersistedEventPosition, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 0276f30656cf..6480d5a9f5eb 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -15,7 +15,7 @@ import logging import re from collections import namedtuple -from typing import List, Optional, Set +from typing import Collection, List, Optional, Set from synapse.api.errors import SynapseError from synapse.events import EventBase @@ -23,7 +23,6 @@ from synapse.storage.database import DatabasePool from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.engines import PostgresEngine, Sqlite3Engine -from synapse.types import Collection logger = logging.getLogger(__name__) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index db5ce4ea0104..7581c7d3ff92 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -37,7 +37,7 @@ import abc import logging from collections import namedtuple -from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set, Tuple from twisted.internet import defer @@ -53,7 +53,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import Collection, PersistedEventPosition, RoomStreamToken +from synapse.types import PersistedEventPosition, RoomStreamToken from synapse.util.caches.descriptors import cached from synapse.util.caches.stream_change_cache import StreamChangeCache diff --git a/synapse/storage/persist_events.py b/synapse/storage/persist_events.py index 87e040b014a0..33dc752d8fd0 100644 --- a/synapse/storage/persist_events.py +++ b/synapse/storage/persist_events.py @@ -17,7 +17,7 @@ import itertools import logging from collections import deque, namedtuple -from typing import Dict, Iterable, List, Optional, Set, Tuple +from typing import Collection, Dict, Iterable, List, Optional, Set, Tuple from prometheus_client import Counter, Histogram @@ -32,7 +32,6 @@ from synapse.storage.databases.main.events import DeltaState from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( - Collection, PersistedEventPosition, RoomStreamToken, StateMap, diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 05a93559748f..7a2cbee42600 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -17,7 +17,7 @@ import os import re from collections import Counter -from typing import Generator, Iterable, List, Optional, TextIO, Tuple +from typing import Collection, Generator, Iterable, List, Optional, TextIO, Tuple import attr from typing_extensions import Counter as CounterType @@ -27,7 +27,6 @@ from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine from synapse.storage.types import Cursor -from synapse.types import Collection logger = logging.getLogger(__name__) diff --git a/synapse/types.py b/synapse/types.py index 21654ae68661..e19f28d5437c 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -15,13 +15,11 @@ import abc import re import string -import sys from collections import namedtuple from typing import ( TYPE_CHECKING, Any, Dict, - Iterable, Mapping, MutableMapping, Optional, @@ -50,18 +48,6 @@ from synapse.appservice.api import ApplicationService from synapse.storage.databases.main import DataStore -# define a version of typing.Collection that works on python 3.5 -if sys.version_info[:3] >= (3, 6, 0): - from typing import Collection -else: - from typing import Container, Sized - - T_co = TypeVar("T_co", covariant=True) - - class Collection(Iterable[T_co], Container[T_co], Sized): # type: ignore - __slots__ = () - - # Define a state map type from type/state_key to T (usually an event ID or # event) T = TypeVar("T") diff --git a/synapse/util/caches/stream_change_cache.py b/synapse/util/caches/stream_change_cache.py index 0469e7d1204b..e81e468899ea 100644 --- a/synapse/util/caches/stream_change_cache.py +++ b/synapse/util/caches/stream_change_cache.py @@ -14,11 +14,10 @@ import logging import math -from typing import Dict, FrozenSet, List, Mapping, Optional, Set, Union +from typing import Collection, Dict, FrozenSet, List, Mapping, Optional, Set, Union from sortedcontainers import SortedDict -from synapse.types import Collection from synapse.util import caches logger = logging.getLogger(__name__) diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index 6f73b1d56de4..abfdc2983261 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -15,6 +15,7 @@ import heapq from itertools import islice from typing import ( + Collection, Dict, Generator, Iterable, @@ -26,8 +27,6 @@ TypeVar, ) -from synapse.types import Collection - T = TypeVar("T") From 69018acbd2d1f331d6a52335b4938c3753b16de6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 22 Apr 2021 16:53:24 +0100 Subject: [PATCH 070/222] Clear the resync bit after resyncing device lists (#9867) Fixes #9866. --- changelog.d/9867.bugfix | 1 + synapse/handlers/device.py | 7 +++++++ synapse/storage/databases/main/devices.py | 19 +++++++++---------- 3 files changed, 17 insertions(+), 10 deletions(-) create mode 100644 changelog.d/9867.bugfix diff --git a/changelog.d/9867.bugfix b/changelog.d/9867.bugfix new file mode 100644 index 000000000000..f236de247d8b --- /dev/null +++ b/changelog.d/9867.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists. diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 34d39e3b4412..95bdc5902a2b 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -925,6 +925,10 @@ async def user_device_resync( else: cached_devices = await self.store.get_cached_devices_for_user(user_id) if cached_devices == {d["device_id"]: d for d in devices}: + logging.info( + "Skipping device list resync for %s, as our cache matches already", + user_id, + ) devices = [] ignore_devices = True @@ -940,6 +944,9 @@ async def user_device_resync( await self.store.update_remote_device_list_cache( user_id, devices, stream_id ) + # mark the cache as valid, whether or not we actually processed any device + # list updates. + await self.store.mark_remote_user_device_cache_as_valid(user_id) device_ids = [device["device_id"] for device in devices] # Handle cross-signing keys. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 9be713399f8b..c9346de31673 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -717,7 +717,15 @@ async def mark_remote_user_device_cache_as_stale(self, user_id: str) -> None: keyvalues={"user_id": user_id}, values={}, insertion_values={"added_ts": self._clock.time_msec()}, - desc="make_remote_user_device_cache_as_stale", + desc="mark_remote_user_device_cache_as_stale", + ) + + async def mark_remote_user_device_cache_as_valid(self, user_id: str) -> None: + # Remove the database entry that says we need to resync devices, after a resync + await self.db_pool.simple_delete( + table="device_lists_remote_resync", + keyvalues={"user_id": user_id}, + desc="mark_remote_user_device_cache_as_valid", ) async def mark_remote_user_device_list_as_unsubscribed(self, user_id: str) -> None: @@ -1289,15 +1297,6 @@ def _update_remote_device_list_cache_txn( lock=False, ) - # If we're replacing the remote user's device list cache presumably - # we've done a full resync, so we remove the entry that says we need - # to resync - self.db_pool.simple_delete_txn( - txn, - table="device_lists_remote_resync", - keyvalues={"user_id": user_id}, - ) - async def add_device_change_to_streams( self, user_id: str, device_ids: Collection[str], hosts: List[str] ): From 177dae270420ee4b4c8fa5e2c74c5081d98da320 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 22 Apr 2021 17:49:11 +0100 Subject: [PATCH 071/222] Limit length of accepted email addresses (#9855) --- changelog.d/9855.misc | 1 + synapse/push/emailpusher.py | 9 +++- synapse/rest/client/v2_alpha/account.py | 8 ++-- synapse/rest/client/v2_alpha/register.py | 8 +++- synapse/util/threepids.py | 30 ++++++++++++ tests/rest/client/v2_alpha/test_register.py | 51 +++++++++++++++++++++ 6 files changed, 100 insertions(+), 7 deletions(-) create mode 100644 changelog.d/9855.misc diff --git a/changelog.d/9855.misc b/changelog.d/9855.misc new file mode 100644 index 000000000000..6a3d700fde71 --- /dev/null +++ b/changelog.d/9855.misc @@ -0,0 +1 @@ +Limit length of accepted email addresses. diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index cd89b54305ab..99a18874d1cf 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -19,8 +19,9 @@ from twisted.internet.interfaces import IDelayedCall from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.push import Pusher, PusherConfig, ThrottleParams +from synapse.push import Pusher, PusherConfig, PusherConfigException, ThrottleParams from synapse.push.mailer import Mailer +from synapse.util.threepids import validate_email if TYPE_CHECKING: from synapse.server import HomeServer @@ -71,6 +72,12 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig, mailer: Mailer self._is_processing = False + # Make sure that the email is valid. + try: + validate_email(self.email) + except ValueError: + raise PusherConfigException("Invalid email") + def on_started(self, should_check_for_notifs: bool) -> None: """Called when this pusher has been started. diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index 3aad15132d66..085561d3e9d8 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -39,7 +39,7 @@ from synapse.push.mailer import Mailer from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import assert_valid_client_secret, random_string -from synapse.util.threepids import canonicalise_email, check_3pid_allowed +from synapse.util.threepids import check_3pid_allowed, validate_email from ._base import client_patterns, interactive_auth_handler @@ -92,7 +92,7 @@ async def on_POST(self, request): # Stored in the database "foo@bar.com" # User requests with "FOO@bar.com" would raise a Not Found error try: - email = canonicalise_email(body["email"]) + email = validate_email(body["email"]) except ValueError as e: raise SynapseError(400, str(e)) send_attempt = body["send_attempt"] @@ -247,7 +247,7 @@ async def on_POST(self, request): # We store all email addresses canonicalised in the DB. # (See add_threepid in synapse/handlers/auth.py) try: - threepid["address"] = canonicalise_email(threepid["address"]) + threepid["address"] = validate_email(threepid["address"]) except ValueError as e: raise SynapseError(400, str(e)) # if using email, we must know about the email they're authing with! @@ -375,7 +375,7 @@ async def on_POST(self, request): # Otherwise the email will be sent to "FOO@bar.com" and stored as # "foo@bar.com" in database. try: - email = canonicalise_email(body["email"]) + email = validate_email(body["email"]) except ValueError as e: raise SynapseError(400, str(e)) send_attempt = body["send_attempt"] diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index c5a6800b8a42..a30a5df1b195 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -49,7 +49,11 @@ from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import assert_valid_client_secret, random_string -from synapse.util.threepids import canonicalise_email, check_3pid_allowed +from synapse.util.threepids import ( + canonicalise_email, + check_3pid_allowed, + validate_email, +) from ._base import client_patterns, interactive_auth_handler @@ -111,7 +115,7 @@ async def on_POST(self, request): # (See on_POST in EmailThreepidRequestTokenRestServlet # in synapse/rest/client/v2_alpha/account.py) try: - email = canonicalise_email(body["email"]) + email = validate_email(body["email"]) except ValueError as e: raise SynapseError(400, str(e)) send_attempt = body["send_attempt"] diff --git a/synapse/util/threepids.py b/synapse/util/threepids.py index 281c5be4fb84..a1cf1960b08f 100644 --- a/synapse/util/threepids.py +++ b/synapse/util/threepids.py @@ -18,6 +18,16 @@ logger = logging.getLogger(__name__) +# it's unclear what the maximum length of an email address is. RFC3696 (as corrected +# by errata) says: +# the upper limit on address lengths should normally be considered to be 254. +# +# In practice, mail servers appear to be more tolerant and allow 400 characters +# or so. Let's allow 500, which should be plenty for everyone. +# +MAX_EMAIL_ADDRESS_LENGTH = 500 + + def check_3pid_allowed(hs, medium, address): """Checks whether a given format of 3PID is allowed to be used on this HS @@ -70,3 +80,23 @@ def canonicalise_email(address: str) -> str: raise ValueError("Unable to parse email address") return parts[0].casefold() + "@" + parts[1].lower() + + +def validate_email(address: str) -> str: + """Does some basic validation on an email address. + + Returns the canonicalised email, as returned by `canonicalise_email`. + + Raises a ValueError if the email is invalid. + """ + # First we try canonicalising in case that fails + address = canonicalise_email(address) + + # Email addresses have to be at least 3 characters. + if len(address) < 3: + raise ValueError("Unable to parse email address") + + if len(address) > MAX_EMAIL_ADDRESS_LENGTH: + raise ValueError("Unable to parse email address") + + return address diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 98695b05d5c4..1cad5f00eb20 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -310,6 +310,57 @@ def test_request_token_existing_email_inhibit_error(self): self.assertIsNotNone(channel.json_body.get("sid")) + @unittest.override_config( + { + "public_baseurl": "https://test_server", + "email": { + "smtp_host": "mail_server", + "smtp_port": 2525, + "notif_from": "sender@host", + }, + } + ) + def test_reject_invalid_email(self): + """Check that bad emails are rejected""" + + # Test for email with multiple @ + channel = self.make_request( + "POST", + b"register/email/requestToken", + {"client_secret": "foobar", "email": "email@@email", "send_attempt": 1}, + ) + self.assertEquals(400, channel.code, channel.result) + # Check error to ensure that we're not erroring due to a bug in the test. + self.assertEquals( + channel.json_body, + {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, + ) + + # Test for email with no @ + channel = self.make_request( + "POST", + b"register/email/requestToken", + {"client_secret": "foobar", "email": "email", "send_attempt": 1}, + ) + self.assertEquals(400, channel.code, channel.result) + self.assertEquals( + channel.json_body, + {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, + ) + + # Test for super long email + email = "a@" + "a" * 1000 + channel = self.make_request( + "POST", + b"register/email/requestToken", + {"client_secret": "foobar", "email": email, "send_attempt": 1}, + ) + self.assertEquals(400, channel.code, channel.result) + self.assertEquals( + channel.json_body, + {"errcode": "M_UNKNOWN", "error": "Unable to parse email address"}, + ) + class AccountValidityTestCase(unittest.HomeserverTestCase): From c1ddbbde4fb948cf740d4c59869157943d3711c6 Mon Sep 17 00:00:00 2001 From: manuroe Date: Thu, 22 Apr 2021 18:49:42 +0200 Subject: [PATCH 072/222] Handle all new rate limits in demo scripts (#9858) --- changelog.d/9858.misc | 1 + demo/start.sh | 54 +++++++++++++++++++++++++++++++++---------- 2 files changed, 43 insertions(+), 12 deletions(-) create mode 100644 changelog.d/9858.misc diff --git a/changelog.d/9858.misc b/changelog.d/9858.misc new file mode 100644 index 000000000000..f7e286fa69bf --- /dev/null +++ b/changelog.d/9858.misc @@ -0,0 +1 @@ +Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts. diff --git a/demo/start.sh b/demo/start.sh index 621a5698b86e..bc4854091b56 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -96,18 +96,48 @@ for port in 8080 8081 8082; do # Check script parameters if [ $# -eq 1 ]; then if [ $1 = "--no-rate-limit" ]; then - # messages rate limit - echo 'rc_messages_per_second: 1000' >> $DIR/etc/$port.config - echo 'rc_message_burst_count: 1000' >> $DIR/etc/$port.config - - # registration rate limit - printf 'rc_registration:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config - - # login rate limit - echo 'rc_login:' >> $DIR/etc/$port.config - printf ' address:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config - printf ' account:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config - printf ' failed_attempts:\n per_second: 1000\n burst_count: 1000\n' >> $DIR/etc/$port.config + + # Disable any rate limiting + ratelimiting=$(cat <<-RC + rc_message: + per_second: 1000 + burst_count: 1000 + rc_registration: + per_second: 1000 + burst_count: 1000 + rc_login: + address: + per_second: 1000 + burst_count: 1000 + account: + per_second: 1000 + burst_count: 1000 + failed_attempts: + per_second: 1000 + burst_count: 1000 + rc_admin_redaction: + per_second: 1000 + burst_count: 1000 + rc_joins: + local: + per_second: 1000 + burst_count: 1000 + remote: + per_second: 1000 + burst_count: 1000 + rc_3pid_validation: + per_second: 1000 + burst_count: 1000 + rc_invites: + per_room: + per_second: 1000 + burst_count: 1000 + per_user: + per_second: 1000 + burst_count: 1000 + RC + ) + echo "${ratelimiting}" >> $DIR/etc/$port.config fi fi From 51a20914a863ac24387c424ccee14aa877e218f8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 23 Apr 2021 11:08:41 +0100 Subject: [PATCH 073/222] Limit the size of HTTP responses read over federation. (#9833) --- changelog.d/9833.bugfix | 1 + synapse/http/client.py | 15 ++++++- synapse/http/matrixfederationclient.py | 43 ++++++++++++++++--- tests/http/test_fedclient.py | 59 ++++++++++++++++++++++++++ 4 files changed, 110 insertions(+), 8 deletions(-) create mode 100644 changelog.d/9833.bugfix diff --git a/changelog.d/9833.bugfix b/changelog.d/9833.bugfix new file mode 100644 index 000000000000..56f9c9626b5c --- /dev/null +++ b/changelog.d/9833.bugfix @@ -0,0 +1 @@ +Limit the size of HTTP responses read over federation. diff --git a/synapse/http/client.py b/synapse/http/client.py index 1730187ffae2..5f40f16e24d6 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -33,6 +33,7 @@ from canonicaljson import encode_canonical_json from netaddr import AddrFormatError, IPAddress, IPSet from prometheus_client import Counter +from typing_extensions import Protocol from zope.interface import implementer, provider from OpenSSL import SSL @@ -754,6 +755,16 @@ def _timeout_to_request_timed_out_error(f: Failure): return f +class ByteWriteable(Protocol): + """The type of object which must be passed into read_body_with_max_size. + + Typically this is a file object. + """ + + def write(self, data: bytes) -> int: + pass + + class BodyExceededMaxSize(Exception): """The maximum allowed size of the HTTP body was exceeded.""" @@ -790,7 +801,7 @@ class _ReadBodyWithMaxSizeProtocol(protocol.Protocol): transport = None # type: Optional[ITCPTransport] def __init__( - self, stream: BinaryIO, deferred: defer.Deferred, max_size: Optional[int] + self, stream: ByteWriteable, deferred: defer.Deferred, max_size: Optional[int] ): self.stream = stream self.deferred = deferred @@ -830,7 +841,7 @@ def connectionLost(self, reason: Failure = connectionDone) -> None: def read_body_with_max_size( - response: IResponse, stream: BinaryIO, max_size: Optional[int] + response: IResponse, stream: ByteWriteable, max_size: Optional[int] ) -> defer.Deferred: """ Read a HTTP response body to a file-object. Optionally enforcing a maximum file size. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index d48721a4e2a4..bb837b7b1979 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1,5 +1,4 @@ -# Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2014-2021 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,11 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. import cgi +import codecs import logging import random import sys +import typing import urllib.parse -from io import BytesIO +from io import BytesIO, StringIO from typing import Callable, Dict, List, Optional, Tuple, Union import attr @@ -72,6 +73,9 @@ "synapse_http_matrixfederationclient_responses", "", ["method", "code"] ) +# a federation response can be rather large (eg a big state_ids is 50M or so), so we +# need a generous limit here. +MAX_RESPONSE_SIZE = 100 * 1024 * 1024 MAX_LONG_RETRIES = 10 MAX_SHORT_RETRIES = 3 @@ -167,12 +171,27 @@ async def _handle_json_response( try: check_content_type_is_json(response.headers) - # Use the custom JSON decoder (partially re-implements treq.json_content). - d = treq.text_content(response, encoding="utf-8") - d.addCallback(json_decoder.decode) + buf = StringIO() + d = read_body_with_max_size(response, BinaryIOWrapper(buf), MAX_RESPONSE_SIZE) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) + def parse(_len: int): + return json_decoder.decode(buf.getvalue()) + + d.addCallback(parse) + body = await make_deferred_yieldable(d) + except BodyExceededMaxSize as e: + # The response was too big. + logger.warning( + "{%s} [%s] JSON response exceeded max size %i - %s %s", + request.txn_id, + request.destination, + MAX_RESPONSE_SIZE, + request.method, + request.uri.decode("ascii"), + ) + raise RequestSendFailed(e, can_retry=False) from e except ValueError as e: # The JSON content was invalid. logger.warning( @@ -218,6 +237,18 @@ async def _handle_json_response( return body +class BinaryIOWrapper: + """A wrapper for a TextIO which converts from bytes on the fly.""" + + def __init__(self, file: typing.TextIO, encoding="utf-8", errors="strict"): + self.decoder = codecs.getincrementaldecoder(encoding)(errors) + self.file = file + + def write(self, b: Union[bytes, bytearray]) -> int: + self.file.write(self.decoder.decode(b)) + return len(b) + + class MatrixFederationHttpClient: """HTTP client used to talk to other homeservers over the federation protocol. Send client certificates and signs requests. diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 9e9718550711..ed9a884d761b 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -26,6 +26,7 @@ from synapse.api.errors import RequestSendFailed from synapse.http.matrixfederationclient import ( + MAX_RESPONSE_SIZE, MatrixFederationHttpClient, MatrixFederationRequest, ) @@ -560,3 +561,61 @@ def test_json_error(self, return_value): f = self.failureResultOf(test_d) self.assertIsInstance(f.value, RequestSendFailed) + + def test_too_big(self): + """ + Test what happens if a huge response is returned from the remote endpoint. + """ + + test_d = defer.ensureDeferred(self.cl.get_json("testserv:8008", "foo/bar")) + + self.pump() + + # Nothing happened yet + self.assertNoResult(test_d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, "1.2.3.4") + self.assertEqual(port, 8008) + + # complete the connection and wire it up to a fake transport + protocol = factory.buildProtocol(None) + transport = StringTransport() + protocol.makeConnection(transport) + + # that should have made it send the request to the transport + self.assertRegex(transport.value(), b"^GET /foo/bar") + self.assertRegex(transport.value(), b"Host: testserv:8008") + + # Deferred is still without a result + self.assertNoResult(test_d) + + # Send it a huge HTTP response + protocol.dataReceived( + b"HTTP/1.1 200 OK\r\n" + b"Server: Fake\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + ) + + self.pump() + + # should still be waiting + self.assertNoResult(test_d) + + sent = 0 + chunk_size = 1024 * 512 + while not test_d.called: + protocol.dataReceived(b"a" * chunk_size) + sent += chunk_size + self.assertLessEqual(sent, MAX_RESPONSE_SIZE) + + self.assertEqual(sent, MAX_RESPONSE_SIZE) + + f = self.failureResultOf(test_d) + self.assertIsInstance(f.value, RequestSendFailed) + + self.assertTrue(transport.disconnecting) From 3853a7edfcee1c00ba4df04b06821397e1155257 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Apr 2021 11:47:07 +0100 Subject: [PATCH 074/222] Only store data in caches, not "smart" objects (#9845) --- changelog.d/9845.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 161 +++++++++++-------- synapse/storage/databases/main/roommember.py | 161 ++++++++++--------- 3 files changed, 182 insertions(+), 141 deletions(-) create mode 100644 changelog.d/9845.misc diff --git a/changelog.d/9845.misc b/changelog.d/9845.misc new file mode 100644 index 000000000000..875dd6d13156 --- /dev/null +++ b/changelog.d/9845.misc @@ -0,0 +1 @@ +Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 50b470c310b1..350646f45888 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -106,6 +106,10 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self.auth = hs.get_auth() + # Used by `RulesForRoom` to ensure only one thing mutates the cache at a + # time. Keyed off room_id. + self._rules_linearizer = Linearizer(name="rules_for_room") + self.room_push_rule_cache_metrics = register_cache( "cache", "room_push_rule_cache", @@ -123,7 +127,16 @@ async def _get_rules_for_event( dict of user_id -> push_rules """ room_id = event.room_id - rules_for_room = self._get_rules_for_room(room_id) + + rules_for_room_data = self._get_rules_for_room(room_id) + rules_for_room = RulesForRoom( + hs=self.hs, + room_id=room_id, + rules_for_room_cache=self._get_rules_for_room.cache, + room_push_rule_cache_metrics=self.room_push_rule_cache_metrics, + linearizer=self._rules_linearizer, + cached_data=rules_for_room_data, + ) rules_by_user = await rules_for_room.get_rules(event, context) @@ -142,17 +155,12 @@ async def _get_rules_for_event( return rules_by_user @lru_cache() - def _get_rules_for_room(self, room_id: str) -> "RulesForRoom": - """Get the current RulesForRoom object for the given room id""" - # It's important that RulesForRoom gets added to self._get_rules_for_room.cache + def _get_rules_for_room(self, room_id: str) -> "RulesForRoomData": + """Get the current RulesForRoomData object for the given room id""" + # It's important that the RulesForRoomData object gets added to self._get_rules_for_room.cache # before any lookup methods get called on it as otherwise there may be # a race if invalidate_all gets called (which assumes its in the cache) - return RulesForRoom( - self.hs, - room_id, - self._get_rules_for_room.cache, - self.room_push_rule_cache_metrics, - ) + return RulesForRoomData() async def _get_power_levels_and_sender_level( self, event: EventBase, context: EventContext @@ -282,11 +290,49 @@ def _condition_checker( return True +@attr.s(slots=True) +class RulesForRoomData: + """The data stored in the cache by `RulesForRoom`. + + We don't store `RulesForRoom` directly in the cache as we want our caches to + *only* include data, and not references to e.g. the data stores. + """ + + # event_id -> (user_id, state) + member_map = attr.ib(type=Dict[str, Tuple[str, str]], factory=dict) + # user_id -> rules + rules_by_user = attr.ib(type=Dict[str, List[Dict[str, dict]]], factory=dict) + + # The last state group we updated the caches for. If the state_group of + # a new event comes along, we know that we can just return the cached + # result. + # On invalidation of the rules themselves (if the user changes them), + # we invalidate everything and set state_group to `object()` + state_group = attr.ib(type=Union[object, int], factory=object) + + # A sequence number to keep track of when we're allowed to update the + # cache. We bump the sequence number when we invalidate the cache. If + # the sequence number changes while we're calculating stuff we should + # not update the cache with it. + sequence = attr.ib(type=int, default=0) + + # A cache of user_ids that we *know* aren't interesting, e.g. user_ids + # owned by AS's, or remote users, etc. (I.e. users we will never need to + # calculate push for) + # These never need to be invalidated as we will never set up push for + # them. + uninteresting_user_set = attr.ib(type=Set[str], factory=set) + + class RulesForRoom: """Caches push rules for users in a room. This efficiently handles users joining/leaving the room by not invalidating the entire cache for the room. + + A new instance is constructed for each call to + `BulkPushRuleEvaluator._get_rules_for_event`, with the cached data from + previous calls passed in. """ def __init__( @@ -295,6 +341,8 @@ def __init__( room_id: str, rules_for_room_cache: LruCache, room_push_rule_cache_metrics: CacheMetric, + linearizer: Linearizer, + cached_data: RulesForRoomData, ): """ Args: @@ -303,38 +351,21 @@ def __init__( rules_for_room_cache: The cache object that caches these RoomsForUser objects. room_push_rule_cache_metrics: The metrics object + linearizer: The linearizer used to ensure only one thing mutates + the cache at a time. Keyed off room_id + cached_data: Cached data from previous calls to `self.get_rules`, + can be mutated. """ self.room_id = room_id self.is_mine_id = hs.is_mine_id self.store = hs.get_datastore() self.room_push_rule_cache_metrics = room_push_rule_cache_metrics - self.linearizer = Linearizer(name="rules_for_room") - - # event_id -> (user_id, state) - self.member_map = {} # type: Dict[str, Tuple[str, str]] - # user_id -> rules - self.rules_by_user = {} # type: Dict[str, List[Dict[str, dict]]] - - # The last state group we updated the caches for. If the state_group of - # a new event comes along, we know that we can just return the cached - # result. - # On invalidation of the rules themselves (if the user changes them), - # we invalidate everything and set state_group to `object()` - self.state_group = object() - - # A sequence number to keep track of when we're allowed to update the - # cache. We bump the sequence number when we invalidate the cache. If - # the sequence number changes while we're calculating stuff we should - # not update the cache with it. - self.sequence = 0 - - # A cache of user_ids that we *know* aren't interesting, e.g. user_ids - # owned by AS's, or remote users, etc. (I.e. users we will never need to - # calculate push for) - # These never need to be invalidated as we will never set up push for - # them. - self.uninteresting_user_set = set() # type: Set[str] + # Used to ensure only one thing mutates the cache at a time. Keyed off + # room_id. + self.linearizer = linearizer + + self.data = cached_data # We need to be clever on the invalidating caches callbacks, as # otherwise the invalidation callback holds a reference to the object, @@ -352,25 +383,25 @@ async def get_rules( """ state_group = context.state_group - if state_group and self.state_group == state_group: + if state_group and self.data.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - return self.rules_by_user + return self.data.rules_by_user - with (await self.linearizer.queue(())): - if state_group and self.state_group == state_group: + with (await self.linearizer.queue(self.room_id)): + if state_group and self.data.state_group == state_group: logger.debug("Using cached rules for %r", self.room_id) self.room_push_rule_cache_metrics.inc_hits() - return self.rules_by_user + return self.data.rules_by_user self.room_push_rule_cache_metrics.inc_misses() ret_rules_by_user = {} missing_member_event_ids = {} - if state_group and self.state_group == context.prev_group: + if state_group and self.data.state_group == context.prev_group: # If we have a simple delta then we can reuse most of the previous # results. - ret_rules_by_user = self.rules_by_user + ret_rules_by_user = self.data.rules_by_user current_state_ids = context.delta_ids push_rules_delta_state_cache_metric.inc_hits() @@ -393,24 +424,24 @@ async def get_rules( if typ != EventTypes.Member: continue - if user_id in self.uninteresting_user_set: + if user_id in self.data.uninteresting_user_set: continue if not self.is_mine_id(user_id): - self.uninteresting_user_set.add(user_id) + self.data.uninteresting_user_set.add(user_id) continue if self.store.get_if_app_services_interested_in_user(user_id): - self.uninteresting_user_set.add(user_id) + self.data.uninteresting_user_set.add(user_id) continue event_id = current_state_ids[key] - res = self.member_map.get(event_id, None) + res = self.data.member_map.get(event_id, None) if res: user_id, state = res if state == Membership.JOIN: - rules = self.rules_by_user.get(user_id, None) + rules = self.data.rules_by_user.get(user_id, None) if rules: ret_rules_by_user[user_id] = rules continue @@ -430,7 +461,7 @@ async def get_rules( else: # The push rules didn't change but lets update the cache anyway self.update_cache( - self.sequence, + self.data.sequence, members={}, # There were no membership changes rules_by_user=ret_rules_by_user, state_group=state_group, @@ -461,7 +492,7 @@ async def _update_rules_with_member_event_ids( for. Used when updating the cache. event: The event we are currently computing push rules for. """ - sequence = self.sequence + sequence = self.data.sequence rows = await self.store.get_membership_from_event_ids(member_event_ids.values()) @@ -501,23 +532,11 @@ async def _update_rules_with_member_event_ids( self.update_cache(sequence, members, ret_rules_by_user, state_group) - def invalidate_all(self) -> None: - # Note: Don't hand this function directly to an invalidation callback - # as it keeps a reference to self and will stop this instance from being - # GC'd if it gets dropped from the rules_to_user cache. Instead use - # `self.invalidate_all_cb` - logger.debug("Invalidating RulesForRoom for %r", self.room_id) - self.sequence += 1 - self.state_group = object() - self.member_map = {} - self.rules_by_user = {} - push_rules_invalidation_counter.inc() - def update_cache(self, sequence, members, rules_by_user, state_group) -> None: - if sequence == self.sequence: - self.member_map.update(members) - self.rules_by_user = rules_by_user - self.state_group = state_group + if sequence == self.data.sequence: + self.data.member_map.update(members) + self.data.rules_by_user = rules_by_user + self.data.state_group = state_group @attr.attrs(slots=True, frozen=True) @@ -535,6 +554,10 @@ class _Invalidation: room_id = attr.ib(type=str) def __call__(self) -> None: - rules = self.cache.get(self.room_id, None, update_metrics=False) - if rules: - rules.invalidate_all() + rules_data = self.cache.get(self.room_id, None, update_metrics=False) + if rules_data: + rules_data.sequence += 1 + rules_data.state_group = object() + rules_data.member_map = {} + rules_data.rules_by_user = {} + push_rules_invalidation_counter.inc() diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index bd8513cd43aa..2a8532f8c1f9 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -23,8 +23,11 @@ Optional, Set, Tuple, + Union, ) +import attr + from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase from synapse.events.snapshot import EventContext @@ -43,7 +46,7 @@ ProfileInfo, RoomsForUser, ) -from synapse.types import PersistedEventPosition, get_domain_from_id +from synapse.types import PersistedEventPosition, StateMap, get_domain_from_id from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -63,6 +66,10 @@ class RoomMemberWorkerStore(EventsWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) + # Used by `_get_joined_hosts` to ensure only one thing mutates the cache + # at a time. Keyed by room_id. + self._joined_host_linearizer = Linearizer("_JoinedHostsCache") + # Is the current_state_events.membership up to date? Or is the # background update still running? self._current_state_events_membership_up_to_date = False @@ -740,19 +747,82 @@ async def get_joined_hosts(self, room_id: str, state_entry): @cached(num_args=2, max_entries=10000, iterable=True) async def _get_joined_hosts( - self, room_id, state_group, current_state_ids, state_entry - ): - # We don't use `state_group`, its there so that we can cache based - # on it. However, its important that its never None, since two current_state's - # with a state_group of None are likely to be different. + self, + room_id: str, + state_group: int, + current_state_ids: StateMap[str], + state_entry: "_StateCacheEntry", + ) -> FrozenSet[str]: + # We don't use `state_group`, its there so that we can cache based on + # it. However, its important that its never None, since two + # current_state's with a state_group of None are likely to be different. + # + # The `state_group` must match the `state_entry.state_group` (if not None). assert state_group is not None - + assert state_entry.state_group is None or state_entry.state_group == state_group + + # We use a secondary cache of previous work to allow us to build up the + # joined hosts for the given state group based on previous state groups. + # + # We cache one object per room containing the results of the last state + # group we got joined hosts for. The idea is that generally + # `get_joined_hosts` is called with the "current" state group for the + # room, and so consecutive calls will be for consecutive state groups + # which point to the previous state group. cache = await self._get_joined_hosts_cache(room_id) - return await cache.get_destinations(state_entry) + + # If the state group in the cache matches, we already have the data we need. + if state_entry.state_group == cache.state_group: + return frozenset(cache.hosts_to_joined_users) + + # Since we'll mutate the cache we need to lock. + with (await self._joined_host_linearizer.queue(room_id)): + if state_entry.state_group == cache.state_group: + # Same state group, so nothing to do. We've already checked for + # this above, but the cache may have changed while waiting on + # the lock. + pass + elif state_entry.prev_group == cache.state_group: + # The cached work is for the previous state group, so we work out + # the delta. + for (typ, state_key), event_id in state_entry.delta_ids.items(): + if typ != EventTypes.Member: + continue + + host = intern_string(get_domain_from_id(state_key)) + user_id = state_key + known_joins = cache.hosts_to_joined_users.setdefault(host, set()) + + event = await self.get_event(event_id) + if event.membership == Membership.JOIN: + known_joins.add(user_id) + else: + known_joins.discard(user_id) + + if not known_joins: + cache.hosts_to_joined_users.pop(host, None) + else: + # The cache doesn't match the state group or prev state group, + # so we calculate the result from first principles. + joined_users = await self.get_joined_users_from_state( + room_id, state_entry + ) + + cache.hosts_to_joined_users = {} + for user_id in joined_users: + host = intern_string(get_domain_from_id(user_id)) + cache.hosts_to_joined_users.setdefault(host, set()).add(user_id) + + if state_entry.state_group: + cache.state_group = state_entry.state_group + else: + cache.state_group = object() + + return frozenset(cache.hosts_to_joined_users) @cached(max_entries=10000) def _get_joined_hosts_cache(self, room_id: str) -> "_JoinedHostsCache": - return _JoinedHostsCache(self, room_id) + return _JoinedHostsCache() @cached(num_args=2) async def did_forget(self, user_id: str, room_id: str) -> bool: @@ -1062,71 +1132,18 @@ def f(txn): await self.db_pool.runInteraction("forget_membership", f) +@attr.s(slots=True) class _JoinedHostsCache: - """Cache for joined hosts in a room that is optimised to handle updates - via state deltas. - """ - - def __init__(self, store, room_id): - self.store = store - self.room_id = room_id + """The cached data used by the `_get_joined_hosts_cache`.""" - self.hosts_to_joined_users = {} + # Dict of host to the set of their users in the room at the state group. + hosts_to_joined_users = attr.ib(type=Dict[str, Set[str]], factory=dict) - self.state_group = object() - - self.linearizer = Linearizer("_JoinedHostsCache") - - self._len = 0 - - async def get_destinations(self, state_entry: "_StateCacheEntry") -> Set[str]: - """Get set of destinations for a state entry - - Args: - state_entry - - Returns: - The destinations as a set. - """ - if state_entry.state_group == self.state_group: - return frozenset(self.hosts_to_joined_users) - - with (await self.linearizer.queue(())): - if state_entry.state_group == self.state_group: - pass - elif state_entry.prev_group == self.state_group: - for (typ, state_key), event_id in state_entry.delta_ids.items(): - if typ != EventTypes.Member: - continue - - host = intern_string(get_domain_from_id(state_key)) - user_id = state_key - known_joins = self.hosts_to_joined_users.setdefault(host, set()) - - event = await self.store.get_event(event_id) - if event.membership == Membership.JOIN: - known_joins.add(user_id) - else: - known_joins.discard(user_id) - - if not known_joins: - self.hosts_to_joined_users.pop(host, None) - else: - joined_users = await self.store.get_joined_users_from_state( - self.room_id, state_entry - ) - - self.hosts_to_joined_users = {} - for user_id in joined_users: - host = intern_string(get_domain_from_id(user_id)) - self.hosts_to_joined_users.setdefault(host, set()).add(user_id) - - if state_entry.state_group: - self.state_group = state_entry.state_group - else: - self.state_group = object() - self._len = sum(len(v) for v in self.hosts_to_joined_users.values()) - return frozenset(self.hosts_to_joined_users) + # The state group `hosts_to_joined_users` is derived from. Will be an object + # if the instance is newly created or if the state is not based on a state + # group. (An object is used as a sentinel value to ensure that it never is + # equal to anything else). + state_group = attr.ib(type=Union[object, int], factory=object) def __len__(self): - return self._len + return sum(len(v) for v in self.hosts_to_joined_users.values()) From d924827da1db5d210eb06db2247a1403ed4c8b9a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 23 Apr 2021 07:05:51 -0400 Subject: [PATCH 075/222] Check for space membership during a remote join of a restricted room (#9814) When receiving a /send_join request for a room with join rules set to 'restricted', check if the user is a member of the spaces defined in the 'allow' key of the join rules. This only applies to an experimental room version, as defined in MSC3083. --- changelog.d/9814.feature | 1 + synapse/api/auth.py | 1 + synapse/handlers/event_auth.py | 86 +++++++++++++++++++++++++++++++++ synapse/handlers/federation.py | 44 +++++++++++++---- synapse/handlers/room_member.py | 62 ++---------------------- synapse/server.py | 5 ++ 6 files changed, 131 insertions(+), 68 deletions(-) create mode 100644 changelog.d/9814.feature create mode 100644 synapse/handlers/event_auth.py diff --git a/changelog.d/9814.feature b/changelog.d/9814.feature new file mode 100644 index 000000000000..9404ad2fc047 --- /dev/null +++ b/changelog.d/9814.feature @@ -0,0 +1 @@ +Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 872fd100cdd0..2d845d0d5c33 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -65,6 +65,7 @@ class Auth: """ FIXME: This class contains a mix of functions for authenticating users of our client-server API and authenticating events added to room graphs. + The latter should be moved to synapse.handlers.event_auth.EventAuthHandler. """ def __init__(self, hs): diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py new file mode 100644 index 000000000000..eff639f40760 --- /dev/null +++ b/synapse/handlers/event_auth.py @@ -0,0 +1,86 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from synapse.api.constants import EventTypes, JoinRules +from synapse.api.room_versions import RoomVersion +from synapse.types import StateMap + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class EventAuthHandler: + """ + This class contains methods for authenticating events added to room graphs. + """ + + def __init__(self, hs: "HomeServer"): + self._store = hs.get_datastore() + + async def can_join_without_invite( + self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str + ) -> bool: + """ + Check whether a user can join a room without an invite. + + When joining a room with restricted joined rules (as defined in MSC3083), + the membership of spaces must be checked during join. + + Args: + state_ids: The state of the room as it currently is. + room_version: The room version of the room being joined. + user_id: The user joining the room. + + Returns: + True if the user can join the room, false otherwise. + """ + # This only applies to room versions which support the new join rule. + if not room_version.msc3083_join_rules: + return True + + # If there's no join rule, then it defaults to invite (so this doesn't apply). + join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + if not join_rules_event_id: + return True + + # If the join rule is not restricted, this doesn't apply. + join_rules_event = await self._store.get_event(join_rules_event_id) + if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: + return True + + # If allowed is of the wrong form, then only allow invited users. + allowed_spaces = join_rules_event.content.get("allow", []) + if not isinstance(allowed_spaces, list): + return False + + # Get the list of joined rooms and see if there's an overlap. + joined_rooms = await self._store.get_rooms_for_user(user_id) + + # Pull out the other room IDs, invalid data gets filtered. + for space in allowed_spaces: + if not isinstance(space, dict): + continue + + space_id = space.get("space") + if not isinstance(space_id, str): + continue + + # The user was joined to one of the spaces specified, they can join + # this room! + if space_id in joined_rooms: + return True + + # The user was not in any of the required spaces. + return False diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index dbdd7d2db327..9d867aaf4d55 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -146,6 +146,7 @@ def __init__(self, hs: "HomeServer"): self.is_mine_id = hs.is_mine_id self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() + self._event_auth_handler = hs.get_event_auth_handler() self._message_handler = hs.get_message_handler() self._server_notices_mxid = hs.config.server_notices_mxid self.config = hs.config @@ -1673,8 +1674,40 @@ async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict: # would introduce the danger of backwards-compatibility problems. event.internal_metadata.send_on_behalf_of = origin + # Calculate the event context. context = await self.state_handler.compute_event_context(event) - context = await self._auth_and_persist_event(origin, event, context) + + # Get the state before the new event. + prev_state_ids = await context.get_prev_state_ids() + + # Check if the user is already in the room or invited to the room. + user_id = event.state_key + prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) + newly_joined = True + user_is_invited = False + if prev_member_event_id: + prev_member_event = await self.store.get_event(prev_member_event_id) + newly_joined = prev_member_event.membership != Membership.JOIN + user_is_invited = prev_member_event.membership == Membership.INVITE + + # If the member is not already in the room, and not invited, check if + # they should be allowed access via membership in a space. + if ( + newly_joined + and not user_is_invited + and not await self._event_auth_handler.can_join_without_invite( + prev_state_ids, + event.room_version, + user_id, + ) + ): + raise AuthError( + 403, + "You do not belong to any of the required spaces to join this room.", + ) + + # Persist the event. + await self._auth_and_persist_event(origin, event, context) logger.debug( "on_send_join_request: After _auth_and_persist_event: %s, sigs: %s", @@ -1682,8 +1715,6 @@ async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict: event.signatures, ) - prev_state_ids = await context.get_prev_state_ids() - state_ids = list(prev_state_ids.values()) auth_chain = await self.store.get_auth_chain(event.room_id, state_ids) @@ -2006,7 +2037,7 @@ async def _auth_and_persist_event( state: Optional[Iterable[EventBase]] = None, auth_events: Optional[MutableStateMap[EventBase]] = None, backfilled: bool = False, - ) -> EventContext: + ) -> None: """ Process an event by performing auth checks and then persisting to the database. @@ -2028,9 +2059,6 @@ async def _auth_and_persist_event( event is an outlier), may be the auth events claimed by the remote server. backfilled: True if the event was backfilled. - - Returns: - The event context. """ context = await self._check_event_auth( origin, @@ -2060,8 +2088,6 @@ async def _auth_and_persist_event( ) raise - return context - async def _auth_and_persist_events( self, origin: str, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 2bbfac6471e5..2c5bada1d89b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -19,7 +19,7 @@ from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple from synapse import types -from synapse.api.constants import AccountDataTypes, EventTypes, JoinRules, Membership +from synapse.api.constants import AccountDataTypes, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -28,7 +28,6 @@ SynapseError, ) from synapse.api.ratelimiting import Ratelimiter -from synapse.api.room_versions import RoomVersion from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.types import JsonDict, Requester, RoomAlias, RoomID, StateMap, UserID @@ -64,6 +63,7 @@ def __init__(self, hs: "HomeServer"): self.profile_handler = hs.get_profile_handler() self.event_creation_handler = hs.get_event_creation_handler() self.account_data_handler = hs.get_account_data_handler() + self.event_auth_handler = hs.get_event_auth_handler() self.member_linearizer = Linearizer(name="member") @@ -178,62 +178,6 @@ async def ratelimit_invite( await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id) - async def _can_join_without_invite( - self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str - ) -> bool: - """ - Check whether a user can join a room without an invite. - - When joining a room with restricted joined rules (as defined in MSC3083), - the membership of spaces must be checked during join. - - Args: - state_ids: The state of the room as it currently is. - room_version: The room version of the room being joined. - user_id: The user joining the room. - - Returns: - True if the user can join the room, false otherwise. - """ - # This only applies to room versions which support the new join rule. - if not room_version.msc3083_join_rules: - return True - - # If there's no join rule, then it defaults to public (so this doesn't apply). - join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) - if not join_rules_event_id: - return True - - # If the join rule is not restricted, this doesn't apply. - join_rules_event = await self.store.get_event(join_rules_event_id) - if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: - return True - - # If allowed is of the wrong form, then only allow invited users. - allowed_spaces = join_rules_event.content.get("allow", []) - if not isinstance(allowed_spaces, list): - return False - - # Get the list of joined rooms and see if there's an overlap. - joined_rooms = await self.store.get_rooms_for_user(user_id) - - # Pull out the other room IDs, invalid data gets filtered. - for space in allowed_spaces: - if not isinstance(space, dict): - continue - - space_id = space.get("space") - if not isinstance(space_id, str): - continue - - # The user was joined to one of the spaces specified, they can join - # this room! - if space_id in joined_rooms: - return True - - # The user was not in any of the required spaces. - return False - async def _local_membership_update( self, requester: Requester, @@ -302,7 +246,7 @@ async def _local_membership_update( if ( newly_joined and not user_is_invited - and not await self._can_join_without_invite( + and not await self.event_auth_handler.can_join_without_invite( prev_state_ids, event.room_version, user_id ) ): diff --git a/synapse/server.py b/synapse/server.py index 59ae91b50338..67598fffe34c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -77,6 +77,7 @@ from synapse.handlers.directory import DirectoryHandler from synapse.handlers.e2e_keys import E2eKeysHandler from synapse.handlers.e2e_room_keys import E2eRoomKeysHandler +from synapse.handlers.event_auth import EventAuthHandler from synapse.handlers.events import EventHandler, EventStreamHandler from synapse.handlers.federation import FederationHandler from synapse.handlers.groups_local import GroupsLocalHandler, GroupsLocalWorkerHandler @@ -746,6 +747,10 @@ def get_account_data_handler(self) -> AccountDataHandler: def get_space_summary_handler(self) -> SpaceSummaryHandler: return SpaceSummaryHandler(self) + @cache_in_self + def get_event_auth_handler(self) -> EventAuthHandler: + return EventAuthHandler(self) + @cache_in_self def get_external_cache(self) -> ExternalCache: return ExternalCache(self) From 9d25a0ae65ce8728d0fda1eebaf0b469316f84d7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Apr 2021 12:21:55 +0100 Subject: [PATCH 076/222] Split presence out of master (#9820) --- changelog.d/9820.feature | 1 + scripts/synapse_port_db | 7 +- synapse/app/generic_worker.py | 31 +------ synapse/config/workers.py | 27 +++++- synapse/handlers/presence.py | 56 +++++++---- synapse/replication/http/_base.py | 5 +- synapse/replication/slave/storage/presence.py | 50 ---------- synapse/replication/tcp/handler.py | 18 +++- synapse/replication/tcp/streams/_base.py | 17 +++- synapse/rest/client/v1/presence.py | 7 +- synapse/server.py | 6 +- synapse/storage/databases/main/__init__.py | 47 +--------- synapse/storage/databases/main/presence.py | 92 ++++++++++++++++++- .../delta/59/12presence_stream_instance.sql | 18 ++++ ...2presence_stream_instance_seq.sql.postgres | 20 ++++ tests/app/test_frontend_proxy.py | 83 ----------------- tests/rest/client/v1/test_presence.py | 5 +- 17 files changed, 245 insertions(+), 245 deletions(-) create mode 100644 changelog.d/9820.feature delete mode 100644 synapse/replication/slave/storage/presence.py create mode 100644 synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql create mode 100644 synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres delete mode 100644 tests/app/test_frontend_proxy.py diff --git a/changelog.d/9820.feature b/changelog.d/9820.feature new file mode 100644 index 000000000000..f56b0bb3bdeb --- /dev/null +++ b/changelog.d/9820.feature @@ -0,0 +1 @@ +Add experimental support for handling presence on a worker. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index b7c1ffc956f7..f0c93d522622 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -634,8 +634,11 @@ class Porter(object): "device_inbox_sequence", ("device_inbox", "device_federation_outbox") ) await self._setup_sequence( - "account_data_sequence", ("room_account_data", "room_tags_revisions", "account_data")) - await self._setup_sequence("receipts_sequence", ("receipts_linearized", )) + "account_data_sequence", + ("room_account_data", "room_tags_revisions", "account_data"), + ) + await self._setup_sequence("receipts_sequence", ("receipts_linearized",)) + await self._setup_sequence("presence_stream_sequence", ("presence_stream",)) await self._setup_auth_chain_sequence() # Step 3. Get tables. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 26c458dbb68b..7b2ac3ca6427 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -55,7 +55,6 @@ from synapse.replication.slave.storage.filtering import SlavedFilteringStore from synapse.replication.slave.storage.groups import SlavedGroupServerStore from synapse.replication.slave.storage.keys import SlavedKeyStore -from synapse.replication.slave.storage.presence import SlavedPresenceStore from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.pushers import SlavedPusherStore @@ -64,7 +63,7 @@ from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.rest.admin import register_servlets_for_media_repo -from synapse.rest.client.v1 import events, login, room +from synapse.rest.client.v1 import events, login, presence, room from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet from synapse.rest.client.v1.profile import ( ProfileAvatarURLRestServlet, @@ -110,6 +109,7 @@ from synapse.storage.databases.main.monthly_active_users import ( MonthlyActiveUsersWorkerStore, ) +from synapse.storage.databases.main.presence import PresenceStore from synapse.storage.databases.main.search import SearchWorkerStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.transactions import TransactionWorkerStore @@ -121,26 +121,6 @@ logger = logging.getLogger("synapse.app.generic_worker") -class PresenceStatusStubServlet(RestServlet): - """If presence is disabled this servlet can be used to stub out setting - presence status. - """ - - PATTERNS = client_patterns("/presence/(?P[^/]*)/status") - - def __init__(self, hs): - super().__init__() - self.auth = hs.get_auth() - - async def on_GET(self, request, user_id): - await self.auth.get_user_by_req(request) - return 200, {"presence": "offline"} - - async def on_PUT(self, request, user_id): - await self.auth.get_user_by_req(request) - return 200, {} - - class KeyUploadServlet(RestServlet): """An implementation of the `KeyUploadServlet` that responds to read only requests, but otherwise proxies through to the master instance. @@ -241,6 +221,7 @@ class GenericWorkerSlavedStore( StatsStore, UIAuthWorkerStore, EndToEndRoomKeyStore, + PresenceStore, SlavedDeviceInboxStore, SlavedDeviceStore, SlavedReceiptsStore, @@ -259,7 +240,6 @@ class GenericWorkerSlavedStore( SlavedTransactionStore, SlavedProfileStore, SlavedClientIpStore, - SlavedPresenceStore, SlavedFilteringStore, MonthlyActiveUsersWorkerStore, MediaRepositoryStore, @@ -327,10 +307,7 @@ def _listen_http(self, listener_config: ListenerConfig): user_directory.register_servlets(self, resource) - # If presence is disabled, use the stub servlet that does - # not allow sending presence - if not self.config.use_presence: - PresenceStatusStubServlet(self).register(resource) + presence.register_servlets(self, resource) groups.register_servlets(self, resource) diff --git a/synapse/config/workers.py b/synapse/config/workers.py index b2540163d1b9..462630201d2e 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -64,6 +64,14 @@ class WriterLocations: Attributes: events: The instances that write to the event and backfill streams. typing: The instance that writes to the typing stream. + to_device: The instances that write to the to_device stream. Currently + can only be a single instance. + account_data: The instances that write to the account data streams. Currently + can only be a single instance. + receipts: The instances that write to the receipts stream. Currently + can only be a single instance. + presence: The instances that write to the presence stream. Currently + can only be a single instance. """ events = attr.ib( @@ -85,6 +93,11 @@ class WriterLocations: type=List[str], converter=_instance_to_list_converter, ) + presence = attr.ib( + default=["master"], + type=List[str], + converter=_instance_to_list_converter, + ) class WorkerConfig(Config): @@ -188,7 +201,14 @@ def read_config(self, config, **kwargs): # Check that the configured writers for events and typing also appears in # `instance_map`. - for stream in ("events", "typing", "to_device", "account_data", "receipts"): + for stream in ( + "events", + "typing", + "to_device", + "account_data", + "receipts", + "presence", + ): instances = _instance_to_list_converter(getattr(self.writers, stream)) for instance in instances: if instance != "master" and instance not in self.instance_map: @@ -215,6 +235,11 @@ def read_config(self, config, **kwargs): if len(self.writers.events) == 0: raise ConfigError("Must specify at least one instance to handle `events`.") + if len(self.writers.presence) != 1: + raise ConfigError( + "Must only specify one instance to handle `presence` messages." + ) + self.events_shard_config = RoutableShardedWorkerHandlingConfig( self.writers.events ) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 7fd28ffa541f..9938be3821ea 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -122,7 +122,8 @@ class BasePresenceHandler(abc.ABC): - """Parts of the PresenceHandler that are shared between workers and master""" + """Parts of the PresenceHandler that are shared between workers and presence + writer""" def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() @@ -309,8 +310,16 @@ def __init__(self, hs): super().__init__(hs) self.hs = hs + self._presence_writer_instance = hs.config.worker.writers.presence[0] + self._presence_enabled = hs.config.use_presence + # Route presence EDUs to the right worker + hs.get_federation_registry().register_instances_for_edu( + "m.presence", + hs.config.worker.writers.presence, + ) + # The number of ongoing syncs on this process, by user id. # Empty if _presence_enabled is false. self._user_to_num_current_syncs = {} # type: Dict[str, int] @@ -318,8 +327,8 @@ def __init__(self, hs): self.notifier = hs.get_notifier() self.instance_id = hs.get_instance_id() - # user_id -> last_sync_ms. Lists the users that have stopped syncing - # but we haven't notified the master of that yet + # user_id -> last_sync_ms. Lists the users that have stopped syncing but + # we haven't notified the presence writer of that yet self.users_going_offline = {} self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) @@ -352,22 +361,23 @@ def send_user_sync(self, user_id, is_syncing, last_sync_ms): ) def mark_as_coming_online(self, user_id): - """A user has started syncing. Send a UserSync to the master, unless they - had recently stopped syncing. + """A user has started syncing. Send a UserSync to the presence writer, + unless they had recently stopped syncing. Args: user_id (str) """ going_offline = self.users_going_offline.pop(user_id, None) if not going_offline: - # Safe to skip because we haven't yet told the master they were offline + # Safe to skip because we haven't yet told the presence writer they + # were offline self.send_user_sync(user_id, True, self.clock.time_msec()) def mark_as_going_offline(self, user_id): - """A user has stopped syncing. We wait before notifying the master as - its likely they'll come back soon. This allows us to avoid sending - a stopped syncing immediately followed by a started syncing notification - to the master + """A user has stopped syncing. We wait before notifying the presence + writer as its likely they'll come back soon. This allows us to avoid + sending a stopped syncing immediately followed by a started syncing + notification to the presence writer Args: user_id (str) @@ -375,8 +385,8 @@ def mark_as_going_offline(self, user_id): self.users_going_offline[user_id] = self.clock.time_msec() def send_stop_syncing(self): - """Check if there are any users who have stopped syncing a while ago - and haven't come back yet. If there are poke the master about them. + """Check if there are any users who have stopped syncing a while ago and + haven't come back yet. If there are poke the presence writer about them. """ now = self.clock.time_msec() for user_id, last_sync_ms in list(self.users_going_offline.items()): @@ -492,9 +502,12 @@ async def set_state(self, target_user, state, ignore_status_msg=False): if not self.hs.config.use_presence: return - # Proxy request to master + # Proxy request to instance that writes presence await self._set_state_client( - user_id=user_id, state=state, ignore_status_msg=ignore_status_msg + instance_name=self._presence_writer_instance, + user_id=user_id, + state=state, + ignore_status_msg=ignore_status_msg, ) async def bump_presence_active_time(self, user): @@ -505,9 +518,11 @@ async def bump_presence_active_time(self, user): if not self.hs.config.use_presence: return - # Proxy request to master + # Proxy request to instance that writes presence user_id = user.to_string() - await self._bump_active_client(user_id=user_id) + await self._bump_active_client( + instance_name=self._presence_writer_instance, user_id=user_id + ) class PresenceHandler(BasePresenceHandler): @@ -1909,7 +1924,7 @@ def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler): self._queue_presence_updates = True # Whether this instance is a presence writer. - self._presence_writer = hs.config.worker.worker_app is None + self._presence_writer = self._instance_name in hs.config.worker.writers.presence # The FederationSender instance, if this process sends federation traffic directly. self._federation = None @@ -1957,7 +1972,7 @@ def send_presence_to_destinations( Will forward to the local federation sender (if there is one) and queue to send over replication (if there are other federation sender instances.). - Must only be called on the master process. + Must only be called on the presence writer process. """ # This should only be called on a presence writer. @@ -2003,10 +2018,11 @@ async def get_replication_rows( We return rows in the form of `(destination, user_id)` to keep the size of each row bounded (rather than returning the sets in a row). - On workers this will query the master process via HTTP replication. + On workers this will query the presence writer process via HTTP replication. """ if instance_name != self._instance_name: - # If not local we query over http replication from the master + # If not local we query over http replication from the presence + # writer result = await self._repl_client( instance_name=instance_name, stream_name=PresenceFederationStream.NAME, diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index ece03467b57d..5685cf212144 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -158,7 +158,10 @@ async def _handle_request(self, request, **kwargs): def make_client(cls, hs): """Create a client that makes requests. - Returns a callable that accepts the same parameters as `_serialize_payload`. + Returns a callable that accepts the same parameters as + `_serialize_payload`, and also accepts an optional `instance_name` + parameter to specify which instance to hit (the instance must be in + the `instance_map` config). """ clock = hs.get_clock() client = hs.get_simple_http_client() diff --git a/synapse/replication/slave/storage/presence.py b/synapse/replication/slave/storage/presence.py deleted file mode 100644 index 57327d910d56..000000000000 --- a/synapse/replication/slave/storage/presence.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.replication.tcp.streams import PresenceStream -from synapse.storage import DataStore -from synapse.storage.database import DatabasePool -from synapse.storage.databases.main.presence import PresenceStore -from synapse.util.caches.stream_change_cache import StreamChangeCache - -from ._base import BaseSlavedStore -from ._slaved_id_tracker import SlavedIdTracker - - -class SlavedPresenceStore(BaseSlavedStore): - def __init__(self, database: DatabasePool, db_conn, hs): - super().__init__(database, db_conn, hs) - self._presence_id_gen = SlavedIdTracker(db_conn, "presence_stream", "stream_id") - - self._presence_on_startup = self._get_active_presence(db_conn) # type: ignore - - self.presence_stream_cache = StreamChangeCache( - "PresenceStreamChangeCache", self._presence_id_gen.get_current_token() - ) - - _get_active_presence = DataStore._get_active_presence - take_presence_startup_info = DataStore.take_presence_startup_info - _get_presence_for_user = PresenceStore.__dict__["_get_presence_for_user"] - get_presence_for_users = PresenceStore.__dict__["get_presence_for_users"] - - def get_current_presence_token(self): - return self._presence_id_gen.get_current_token() - - def process_replication_rows(self, stream_name, instance_name, token, rows): - if stream_name == PresenceStream.NAME: - self._presence_id_gen.advance(instance_name, token) - for row in rows: - self.presence_stream_cache.entity_has_changed(row.user_id, token) - self._get_presence_for_user.invalidate((row.user_id,)) - return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 2ce1b9f2224f..7ced4c543c26 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -55,6 +55,8 @@ CachesStream, EventsStream, FederationStream, + PresenceFederationStream, + PresenceStream, ReceiptsStream, Stream, TagAccountDataStream, @@ -99,6 +101,10 @@ def __init__(self, hs: "HomeServer"): self._instance_id = hs.get_instance_id() self._instance_name = hs.get_instance_name() + self._is_presence_writer = ( + hs.get_instance_name() in hs.config.worker.writers.presence + ) + self._streams = { stream.NAME: stream(hs) for stream in STREAMS_MAP.values() } # type: Dict[str, Stream] @@ -153,6 +159,14 @@ def __init__(self, hs: "HomeServer"): continue + if isinstance(stream, (PresenceStream, PresenceFederationStream)): + # Only add PresenceStream as a source on the instance in charge + # of presence. + if self._is_presence_writer: + self._streams_to_replicate.append(stream) + + continue + # Only add any other streams if we're on master. if hs.config.worker_app is not None: continue @@ -350,7 +364,7 @@ def on_USER_SYNC( ) -> Optional[Awaitable[None]]: user_sync_counter.inc() - if self._is_master: + if self._is_presence_writer: return self._presence_handler.update_external_syncs_row( cmd.instance_id, cmd.user_id, cmd.is_syncing, cmd.last_sync_ms ) @@ -360,7 +374,7 @@ def on_USER_SYNC( def on_CLEAR_USER_SYNC( self, conn: IReplicationConnection, cmd: ClearUserSyncsCommand ) -> Optional[Awaitable[None]]: - if self._is_master: + if self._is_presence_writer: return self._presence_handler.update_external_syncs_clear(cmd.instance_id) else: return None diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 9d75a89f1caa..b03824925a91 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -272,15 +272,22 @@ class PresenceStream(Stream): NAME = "presence" ROW_TYPE = PresenceStreamRow - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): store = hs.get_datastore() - if hs.config.worker_app is None: - # on the master, query the presence handler + if hs.get_instance_name() in hs.config.worker.writers.presence: + # on the presence writer, query the presence handler presence_handler = hs.get_presence_handler() - update_function = presence_handler.get_all_presence_updates + + from synapse.handlers.presence import PresenceHandler + + assert isinstance(presence_handler, PresenceHandler) + + update_function = ( + presence_handler.get_all_presence_updates + ) # type: UpdateFunction else: - # Query master process + # Query presence writer process update_function = make_http_update_function(hs, self.NAME) super().__init__( diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index c232484f29ec..2b24fe5aa65f 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -35,10 +35,15 @@ def __init__(self, hs): self.clock = hs.get_clock() self.auth = hs.get_auth() + self._use_presence = hs.config.server.use_presence + async def on_GET(self, request, user_id): requester = await self.auth.get_user_by_req(request) user = UserID.from_string(user_id) + if not self._use_presence: + return 200, {"presence": "offline"} + if requester.user != user: allowed = await self.presence_handler.is_visible( observed_user=user, observer_user=requester.user @@ -80,7 +85,7 @@ async def on_PUT(self, request, user_id): except Exception: raise SynapseError(400, "Unable to parse state") - if self.hs.config.use_presence: + if self._use_presence: await self.presence_handler.set_state(user, state) return 200, {} diff --git a/synapse/server.py b/synapse/server.py index 67598fffe34c..8c147be2b3d0 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -418,10 +418,10 @@ def get_state_resolution_handler(self) -> StateResolutionHandler: @cache_in_self def get_presence_handler(self) -> BasePresenceHandler: - if self.config.worker_app: - return WorkerPresenceHandler(self) - else: + if self.get_instance_name() in self.config.worker.writers.presence: return PresenceHandler(self) + else: + return WorkerPresenceHandler(self) @cache_in_self def get_typing_writer_handler(self) -> TypingWriterHandler: diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 5c50f5f950a4..49c7606d5138 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -17,7 +17,6 @@ import logging from typing import List, Optional, Tuple -from synapse.api.constants import PresenceState from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import DatabasePool from synapse.storage.databases.main.stats import UserSortOrder @@ -51,7 +50,7 @@ from .metrics import ServerMetricsStore from .monthly_active_users import MonthlyActiveUsersStore from .openid import OpenIdStore -from .presence import PresenceStore, UserPresenceState +from .presence import PresenceStore from .profile import ProfileStore from .purge_events import PurgeEventsStore from .push_rule import PushRuleStore @@ -126,9 +125,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._clock = hs.get_clock() self.database_engine = database.engine - self._presence_id_gen = StreamIdGenerator( - db_conn, "presence_stream", "stream_id" - ) self._public_room_id_gen = StreamIdGenerator( db_conn, "public_room_list_stream", "stream_id" ) @@ -177,21 +173,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) - self._presence_on_startup = self._get_active_presence(db_conn) - - presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict( - db_conn, - "presence_stream", - entity_column="user_id", - stream_column="stream_id", - max_value=self._presence_id_gen.get_current_token(), - ) - self.presence_stream_cache = StreamChangeCache( - "PresenceStreamChangeCache", - min_presence_val, - prefilled_cache=presence_cache_prefill, - ) - device_list_max = self._device_list_id_gen.get_current_token() self._device_list_stream_cache = StreamChangeCache( "DeviceListStreamChangeCache", device_list_max @@ -238,32 +219,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() - def take_presence_startup_info(self): - active_on_startup = self._presence_on_startup - self._presence_on_startup = None - return active_on_startup - - def _get_active_presence(self, db_conn): - """Fetch non-offline presence from the database so that we can register - the appropriate time outs. - """ - - sql = ( - "SELECT user_id, state, last_active_ts, last_federation_update_ts," - " last_user_sync_ts, status_msg, currently_active FROM presence_stream" - " WHERE state != ?" - ) - - txn = db_conn.cursor() - txn.execute(sql, (PresenceState.OFFLINE,)) - rows = self.db_pool.cursor_to_dict(txn) - txn.close() - - for row in rows: - row["currently_active"] = bool(row["currently_active"]) - - return [UserPresenceState(**row) for row in rows] - async def get_users(self) -> List[JsonDict]: """Function to retrieve a list of users in users table. diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index c207d917b109..db22fab23ea0 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -12,16 +12,69 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Tuple +from typing import TYPE_CHECKING, Dict, List, Tuple -from synapse.api.presence import UserPresenceState +from synapse.api.presence import PresenceState, UserPresenceState +from synapse.replication.tcp.streams import PresenceStream from synapse.storage._base import SQLBaseStore, make_in_list_sql_clause +from synapse.storage.database import DatabasePool +from synapse.storage.engines import PostgresEngine +from synapse.storage.types import Connection +from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.iterutils import batch_iter +if TYPE_CHECKING: + from synapse.server import HomeServer + class PresenceStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: Connection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self._can_persist_presence = ( + hs.get_instance_name() in hs.config.worker.writers.presence + ) + + if isinstance(database.engine, PostgresEngine): + self._presence_id_gen = MultiWriterIdGenerator( + db_conn=db_conn, + db=database, + stream_name="presence_stream", + instance_name=self._instance_name, + tables=[("presence_stream", "instance_name", "stream_id")], + sequence_name="presence_stream_sequence", + writers=hs.config.worker.writers.to_device, + ) + else: + self._presence_id_gen = StreamIdGenerator( + db_conn, "presence_stream", "stream_id" + ) + + self._presence_on_startup = self._get_active_presence(db_conn) + + presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict( + db_conn, + "presence_stream", + entity_column="user_id", + stream_column="stream_id", + max_value=self._presence_id_gen.get_current_token(), + ) + self.presence_stream_cache = StreamChangeCache( + "PresenceStreamChangeCache", + min_presence_val, + prefilled_cache=presence_cache_prefill, + ) + async def update_presence(self, presence_states): + assert self._can_persist_presence + stream_ordering_manager = self._presence_id_gen.get_next_mult( len(presence_states) ) @@ -57,6 +110,7 @@ def _update_presence_txn(self, txn, stream_orderings, presence_states): "last_user_sync_ts": state.last_user_sync_ts, "status_msg": state.status_msg, "currently_active": state.currently_active, + "instance_name": self._instance_name, } for stream_id, state in zip(stream_orderings, presence_states) ], @@ -216,3 +270,37 @@ async def get_presence_for_all_users( def get_current_presence_token(self): return self._presence_id_gen.get_current_token() + + def _get_active_presence(self, db_conn: Connection): + """Fetch non-offline presence from the database so that we can register + the appropriate time outs. + """ + + sql = ( + "SELECT user_id, state, last_active_ts, last_federation_update_ts," + " last_user_sync_ts, status_msg, currently_active FROM presence_stream" + " WHERE state != ?" + ) + + txn = db_conn.cursor() + txn.execute(sql, (PresenceState.OFFLINE,)) + rows = self.db_pool.cursor_to_dict(txn) + txn.close() + + for row in rows: + row["currently_active"] = bool(row["currently_active"]) + + return [UserPresenceState(**row) for row in rows] + + def take_presence_startup_info(self): + active_on_startup = self._presence_on_startup + self._presence_on_startup = None + return active_on_startup + + def process_replication_rows(self, stream_name, instance_name, token, rows): + if stream_name == PresenceStream.NAME: + self._presence_id_gen.advance(instance_name, token) + for row in rows: + self.presence_stream_cache.entity_has_changed(row.user_id, token) + self._get_presence_for_user.invalidate((row.user_id,)) + return super().process_replication_rows(stream_name, instance_name, token, rows) diff --git a/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql b/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql new file mode 100644 index 000000000000..b6ba0bda1a81 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql @@ -0,0 +1,18 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a column to specify which instance wrote the row. Historic rows have +-- `NULL`, which indicates that the master instance wrote them. +ALTER TABLE presence_stream ADD COLUMN instance_name TEXT; diff --git a/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres b/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres new file mode 100644 index 000000000000..02b182adf984 --- /dev/null +++ b/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres @@ -0,0 +1,20 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE SEQUENCE IF NOT EXISTS presence_stream_sequence; + +SELECT setval('presence_stream_sequence', ( + SELECT COALESCE(MAX(stream_id), 1) FROM presence_stream +)); diff --git a/tests/app/test_frontend_proxy.py b/tests/app/test_frontend_proxy.py deleted file mode 100644 index 3d45da38ab25..000000000000 --- a/tests/app/test_frontend_proxy.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.app.generic_worker import GenericWorkerServer - -from tests.server import make_request -from tests.unittest import HomeserverTestCase - - -class FrontendProxyTests(HomeserverTestCase): - def make_homeserver(self, reactor, clock): - - hs = self.setup_test_homeserver( - federation_http_client=None, homeserver_to_use=GenericWorkerServer - ) - - return hs - - def default_config(self): - c = super().default_config() - c["worker_app"] = "synapse.app.frontend_proxy" - - c["worker_listeners"] = [ - { - "type": "http", - "port": 8080, - "bind_addresses": ["0.0.0.0"], - "resources": [{"names": ["client"]}], - } - ] - - return c - - def test_listen_http_with_presence_enabled(self): - """ - When presence is on, the stub servlet will not register. - """ - # Presence is on - self.hs.config.use_presence = True - - # Listen with the config - self.hs._listen_http(self.hs.config.worker.worker_listeners[0]) - - # Grab the resource from the site that was told to listen - self.assertEqual(len(self.reactor.tcpServers), 1) - site = self.reactor.tcpServers[0][1] - - channel = make_request(self.reactor, site, "PUT", "presence/a/status") - - # 400 + unrecognised, because nothing is registered - self.assertEqual(channel.code, 400) - self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED") - - def test_listen_http_with_presence_disabled(self): - """ - When presence is off, the stub servlet will register. - """ - # Presence is off - self.hs.config.use_presence = False - - # Listen with the config - self.hs._listen_http(self.hs.config.worker.worker_listeners[0]) - - # Grab the resource from the site that was told to listen - self.assertEqual(len(self.reactor.tcpServers), 1) - site = self.reactor.tcpServers[0][1] - - channel = make_request(self.reactor, site, "PUT", "presence/a/status") - - # 401, because the stub servlet still checks authentication - self.assertEqual(channel.code, 401) - self.assertEqual(channel.json_body["errcode"], "M_MISSING_TOKEN") diff --git a/tests/rest/client/v1/test_presence.py b/tests/rest/client/v1/test_presence.py index 3a050659caa6..409f3949dc09 100644 --- a/tests/rest/client/v1/test_presence.py +++ b/tests/rest/client/v1/test_presence.py @@ -16,6 +16,7 @@ from twisted.internet import defer +from synapse.handlers.presence import PresenceHandler from synapse.rest.client.v1 import presence from synapse.types import UserID @@ -32,7 +33,7 @@ class PresenceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): - presence_handler = Mock() + presence_handler = Mock(spec=PresenceHandler) presence_handler.set_state.return_value = defer.succeed(None) hs = self.setup_test_homeserver( @@ -59,12 +60,12 @@ def test_put_presence(self): self.assertEqual(channel.code, 200) self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 1) + @unittest.override_config({"use_presence": False}) def test_put_presence_disabled(self): """ PUT to the status endpoint with use_presence disabled will NOT call set_state on the presence handler. """ - self.hs.config.use_presence = False body = {"presence": "here", "status_msg": "beep boop"} channel = self.make_request( From ceaa76970fa0092bbdc35055c6f32dc63dd59960 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 23 Apr 2021 13:37:48 +0100 Subject: [PATCH 077/222] Remove room and user invite ratelimits in default unit test config (#9871) --- changelog.d/9871.misc | 1 + tests/utils.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/9871.misc diff --git a/changelog.d/9871.misc b/changelog.d/9871.misc new file mode 100644 index 000000000000..b19acfab6298 --- /dev/null +++ b/changelog.d/9871.misc @@ -0,0 +1 @@ +Disable invite rate-limiting by default when running the unit tests. \ No newline at end of file diff --git a/tests/utils.py b/tests/utils.py index 63d52b91407a..6bd008dcfe21 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -153,6 +153,10 @@ def default_config(name, parse=False): "local": {"per_second": 10000, "burst_count": 10000}, "remote": {"per_second": 10000, "burst_count": 10000}, }, + "rc_invites": { + "per_room": {"per_second": 10000, "burst_count": 10000}, + "per_user": {"per_second": 10000, "burst_count": 10000}, + }, "rc_3pid_validation": {"per_second": 10000, "burst_count": 10000}, "saml2_enabled": False, "public_baseurl": None, From a15c003e5b0bff8bf78a675f3b719d3f25fe8bde Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 23 Apr 2021 15:46:29 +0100 Subject: [PATCH 078/222] Make DomainSpecificString an attrs class (#9875) --- changelog.d/9875.misc | 1 + synapse/handlers/oidc.py | 5 +++++ synapse/rest/synapse/client/new_user_consent.py | 9 +++++++++ synapse/types.py | 17 +++++++++-------- 4 files changed, 24 insertions(+), 8 deletions(-) create mode 100644 changelog.d/9875.misc diff --git a/changelog.d/9875.misc b/changelog.d/9875.misc new file mode 100644 index 000000000000..9345c0bf4530 --- /dev/null +++ b/changelog.d/9875.misc @@ -0,0 +1 @@ +Make `DomainSpecificString` an `attrs` class. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 45514be50fef..1c4a43be0a81 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -957,6 +957,11 @@ async def grandfather_existing_users() -> Optional[str]: # and attempt to match it. attributes = await oidc_response_to_user_attributes(failures=0) + if attributes.localpart is None: + # If no localpart is returned then we will generate one, so + # there is no need to search for existing users. + return None + user_id = UserID(attributes.localpart, self._server_name).to_string() users = await self._store.get_users_by_id_case_insensitive(user_id) if users: diff --git a/synapse/rest/synapse/client/new_user_consent.py b/synapse/rest/synapse/client/new_user_consent.py index e5634f967966..488b97b32e02 100644 --- a/synapse/rest/synapse/client/new_user_consent.py +++ b/synapse/rest/synapse/client/new_user_consent.py @@ -61,6 +61,15 @@ async def _async_render_GET(self, request: Request) -> None: self._sso_handler.render_error(request, "bad_session", e.msg, code=e.code) return + # It should be impossible to get here without having first been through + # the pick-a-username step, which ensures chosen_localpart gets set. + if not session.chosen_localpart: + logger.warning("Session has no user name selected") + self._sso_handler.render_error( + request, "no_user", "No user name has been selected.", code=400 + ) + return + user_id = UserID(session.chosen_localpart, self._server_name) user_profile = { "display_name": session.display_name, diff --git a/synapse/types.py b/synapse/types.py index e19f28d5437c..e52cd7ffd4b9 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -199,9 +199,8 @@ def get_localpart_from_id(string): DS = TypeVar("DS", bound="DomainSpecificString") -class DomainSpecificString( - namedtuple("DomainSpecificString", ("localpart", "domain")), metaclass=abc.ABCMeta -): +@attr.s(slots=True, frozen=True, repr=False) +class DomainSpecificString(metaclass=abc.ABCMeta): """Common base class among ID/name strings that have a local part and a domain name, prefixed with a sigil. @@ -213,11 +212,8 @@ class DomainSpecificString( SIGIL = abc.abstractproperty() # type: str # type: ignore - # Deny iteration because it will bite you if you try to create a singleton - # set by: - # users = set(user) - def __iter__(self): - raise ValueError("Attempted to iterate a %s" % (type(self).__name__,)) + localpart = attr.ib(type=str) + domain = attr.ib(type=str) # Because this class is a namedtuple of strings and booleans, it is deeply # immutable. @@ -272,30 +268,35 @@ def is_valid(cls: Type[DS], s: str) -> bool: __repr__ = to_string +@attr.s(slots=True, frozen=True, repr=False) class UserID(DomainSpecificString): """Structure representing a user ID.""" SIGIL = "@" +@attr.s(slots=True, frozen=True, repr=False) class RoomAlias(DomainSpecificString): """Structure representing a room name.""" SIGIL = "#" +@attr.s(slots=True, frozen=True, repr=False) class RoomID(DomainSpecificString): """Structure representing a room id. """ SIGIL = "!" +@attr.s(slots=True, frozen=True, repr=False) class EventID(DomainSpecificString): """Structure representing an event id. """ SIGIL = "$" +@attr.s(slots=True, frozen=True, repr=False) class GroupID(DomainSpecificString): """Structure representing a group ID.""" From e83627926fb5373b383129b99a5039e8a2e329af Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 23 Apr 2021 12:02:16 -0400 Subject: [PATCH 079/222] Add type hints to auth and auth_blocking. (#9876) --- changelog.d/9876.misc | 1 + synapse/api/auth.py | 78 ++++++++++++++++++------------------ synapse/api/auth_blocking.py | 9 +++-- synapse/event_auth.py | 4 +- 4 files changed, 48 insertions(+), 44 deletions(-) create mode 100644 changelog.d/9876.misc diff --git a/changelog.d/9876.misc b/changelog.d/9876.misc new file mode 100644 index 000000000000..28390e32e67e --- /dev/null +++ b/changelog.d/9876.misc @@ -0,0 +1 @@ +Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 2d845d0d5c33..efc926d0941d 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -12,14 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple import pymacaroons from netaddr import IPAddress from twisted.web.server import Request -import synapse.types from synapse import event_auth from synapse.api.auth_blocking import AuthBlocking from synapse.api.constants import EventTypes, HistoryVisibility, Membership @@ -36,11 +35,14 @@ from synapse.http.site import SynapseRequest from synapse.logging import opentracing as opentracing from synapse.storage.databases.main.registration import TokenLookupResult -from synapse.types import StateMap, UserID +from synapse.types import Requester, StateMap, UserID, create_requester from synapse.util.caches.lrucache import LruCache from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry from synapse.util.metrics import Measure +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -68,7 +70,7 @@ class Auth: The latter should be moved to synapse.handlers.event_auth.EventAuthHandler. """ - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.hs = hs self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -88,13 +90,13 @@ def __init__(self, hs): async def check_from_context( self, room_version: str, event, context, do_sig_check=True - ): + ) -> None: prev_state_ids = await context.get_prev_state_ids() auth_events_ids = self.compute_auth_events( event, prev_state_ids, for_verification=True ) - auth_events = await self.store.get_events(auth_events_ids) - auth_events = {(e.type, e.state_key): e for e in auth_events.values()} + auth_events_by_id = await self.store.get_events(auth_events_ids) + auth_events = {(e.type, e.state_key): e for e in auth_events_by_id.values()} room_version_obj = KNOWN_ROOM_VERSIONS[room_version] event_auth.check( @@ -151,17 +153,11 @@ async def check_user_in_room( raise AuthError(403, "User %s not in room %s" % (user_id, room_id)) - async def check_host_in_room(self, room_id, host): + async def check_host_in_room(self, room_id: str, host: str) -> bool: with Measure(self.clock, "check_host_in_room"): - latest_event_ids = await self.store.is_host_joined(room_id, host) - return latest_event_ids - - def can_federate(self, event, auth_events): - creation_event = auth_events.get((EventTypes.Create, "")) + return await self.store.is_host_joined(room_id, host) - return creation_event.content.get("m.federate", True) is True - - def get_public_keys(self, invite_event): + def get_public_keys(self, invite_event: EventBase) -> List[Dict[str, Any]]: return event_auth.get_public_keys(invite_event) async def get_user_by_req( @@ -170,7 +166,7 @@ async def get_user_by_req( allow_guest: bool = False, rights: str = "access", allow_expired: bool = False, - ) -> synapse.types.Requester: + ) -> Requester: """Get a registered user's ID. Args: @@ -196,7 +192,7 @@ async def get_user_by_req( access_token = self.get_access_token_from_request(request) user_id, app_service = await self._get_appservice_user_id(request) - if user_id: + if user_id and app_service: if ip_addr and self._track_appservice_user_ips: await self.store.insert_client_ip( user_id=user_id, @@ -206,9 +202,7 @@ async def get_user_by_req( device_id="dummy-device", # stubbed ) - requester = synapse.types.create_requester( - user_id, app_service=app_service - ) + requester = create_requester(user_id, app_service=app_service) request.requester = user_id opentracing.set_tag("authenticated_entity", user_id) @@ -251,7 +245,7 @@ async def get_user_by_req( errcode=Codes.GUEST_ACCESS_FORBIDDEN, ) - requester = synapse.types.create_requester( + requester = create_requester( user_info.user_id, token_id, is_guest, @@ -271,7 +265,9 @@ async def get_user_by_req( except KeyError: raise MissingClientTokenError() - async def _get_appservice_user_id(self, request): + async def _get_appservice_user_id( + self, request: Request + ) -> Tuple[Optional[str], Optional[ApplicationService]]: app_service = self.store.get_app_service_by_token( self.get_access_token_from_request(request) ) @@ -283,6 +279,9 @@ async def _get_appservice_user_id(self, request): if ip_address not in app_service.ip_range_whitelist: return None, None + # This will always be set by the time Twisted calls us. + assert request.args is not None + if b"user_id" not in request.args: return app_service.sender, app_service @@ -387,7 +386,9 @@ async def get_user_by_access_token( logger.warning("Invalid macaroon in auth: %s %s", type(e), e) raise InvalidClientTokenError("Invalid macaroon passed.") - def _parse_and_validate_macaroon(self, token, rights="access"): + def _parse_and_validate_macaroon( + self, token: str, rights: str = "access" + ) -> Tuple[str, bool]: """Takes a macaroon and tries to parse and validate it. This is cached if and only if rights == access and there isn't an expiry. @@ -432,15 +433,16 @@ def _parse_and_validate_macaroon(self, token, rights="access"): return user_id, guest - def validate_macaroon(self, macaroon, type_string, user_id): + def validate_macaroon( + self, macaroon: pymacaroons.Macaroon, type_string: str, user_id: str + ) -> None: """ validate that a Macaroon is understood by and was signed by this server. Args: - macaroon(pymacaroons.Macaroon): The macaroon to validate - type_string(str): The kind of token required (e.g. "access", - "delete_pusher") - user_id (str): The user_id required + macaroon: The macaroon to validate + type_string: The kind of token required (e.g. "access", "delete_pusher") + user_id: The user_id required """ v = pymacaroons.Verifier() @@ -465,9 +467,7 @@ def get_appservice_by_req(self, request: SynapseRequest) -> ApplicationService: if not service: logger.warning("Unrecognised appservice access token.") raise InvalidClientTokenError() - request.requester = synapse.types.create_requester( - service.sender, app_service=service - ) + request.requester = create_requester(service.sender, app_service=service) return service async def is_server_admin(self, user: UserID) -> bool: @@ -519,7 +519,7 @@ def compute_auth_events( return auth_ids - async def check_can_change_room_list(self, room_id: str, user: UserID): + async def check_can_change_room_list(self, room_id: str, user: UserID) -> bool: """Determine whether the user is allowed to edit the room's entry in the published room list. @@ -554,11 +554,11 @@ async def check_can_change_room_list(self, room_id: str, user: UserID): return user_level >= send_level @staticmethod - def has_access_token(request: Request): + def has_access_token(request: Request) -> bool: """Checks if the request has an access_token. Returns: - bool: False if no access_token was given, True otherwise. + False if no access_token was given, True otherwise. """ # This will always be set by the time Twisted calls us. assert request.args is not None @@ -568,13 +568,13 @@ def has_access_token(request: Request): return bool(query_params) or bool(auth_headers) @staticmethod - def get_access_token_from_request(request: Request): + def get_access_token_from_request(request: Request) -> str: """Extracts the access_token from the request. Args: request: The http request. Returns: - unicode: The access_token + The access_token Raises: MissingClientTokenError: If there isn't a single access_token in the request @@ -649,5 +649,5 @@ async def check_user_in_room_or_world_readable( % (user_id, room_id), ) - def check_auth_blocking(self, *args, **kwargs): - return self._auth_blocking.check_auth_blocking(*args, **kwargs) + async def check_auth_blocking(self, *args, **kwargs) -> None: + await self._auth_blocking.check_auth_blocking(*args, **kwargs) diff --git a/synapse/api/auth_blocking.py b/synapse/api/auth_blocking.py index a8df60cb890b..e6bced93d5fa 100644 --- a/synapse/api/auth_blocking.py +++ b/synapse/api/auth_blocking.py @@ -13,18 +13,21 @@ # limitations under the License. import logging -from typing import Optional +from typing import TYPE_CHECKING, Optional from synapse.api.constants import LimitBlockingTypes, UserTypes from synapse.api.errors import Codes, ResourceLimitError from synapse.config.server import is_threepid_reserved from synapse.types import Requester +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class AuthBlocking: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.store = hs.get_datastore() self._server_notices_mxid = hs.config.server_notices_mxid @@ -43,7 +46,7 @@ async def check_auth_blocking( threepid: Optional[dict] = None, user_type: Optional[str] = None, requester: Optional[Requester] = None, - ): + ) -> None: """Checks if the user should be rejected for some external reason, such as monthly active user limiting or global disable flag diff --git a/synapse/event_auth.py b/synapse/event_auth.py index c831d9f73cee..afc2bc826745 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from typing import List, Optional, Set, Tuple +from typing import Any, Dict, List, Optional, Set, Tuple from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes @@ -688,7 +688,7 @@ def _verify_third_party_invite(event: EventBase, auth_events: StateMap[EventBase return False -def get_public_keys(invite_event): +def get_public_keys(invite_event: EventBase) -> List[Dict[str, Any]]: public_keys = [] if "public_key" in invite_event.content: o = {"public_key": invite_event.content["public_key"]} From 59d24c5bef4e05fa7be0cad1f7e63f0a0097374b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 23 Apr 2021 17:06:47 +0100 Subject: [PATCH 080/222] pass a reactor into SynapseSite (#9874) --- changelog.d/9874.misc | 1 + synapse/app/generic_worker.py | 1 + synapse/app/homeserver.py | 25 ++++++++++------------- synapse/http/site.py | 37 ++++++++++++++++++++++++++--------- tests/replication/_base.py | 1 + tests/test_server.py | 1 + tests/unittest.py | 1 + 7 files changed, 43 insertions(+), 24 deletions(-) create mode 100644 changelog.d/9874.misc diff --git a/changelog.d/9874.misc b/changelog.d/9874.misc new file mode 100644 index 000000000000..ba1097e65eb4 --- /dev/null +++ b/changelog.d/9874.misc @@ -0,0 +1 @@ +Pass a reactor into `SynapseSite` to make testing easier. diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 7b2ac3ca6427..70e07d0574db 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -367,6 +367,7 @@ def _listen_http(self, listener_config: ListenerConfig): listener_config, root_resource, self.version_string, + reactor=self.get_reactor(), ), reactor=self.get_reactor(), ) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8be8b520eb1a..140f6bcdeef0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -126,19 +126,20 @@ def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConf else: root_resource = OptionsResource() - root_resource = create_resource_tree(resources, root_resource) + site = SynapseSite( + "synapse.access.%s.%s" % ("https" if tls else "http", site_tag), + site_tag, + listener_config, + create_resource_tree(resources, root_resource), + self.version_string, + reactor=self.get_reactor(), + ) if tls: ports = listen_ssl( bind_addresses, port, - SynapseSite( - "synapse.access.https.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), + site, self.tls_server_context_factory, reactor=self.get_reactor(), ) @@ -148,13 +149,7 @@ def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConf ports = listen_tcp( bind_addresses, port, - SynapseSite( - "synapse.access.http.%s" % (site_tag,), - site_tag, - listener_config, - root_resource, - self.version_string, - ), + site, reactor=self.get_reactor(), ) logger.info("Synapse now listening on TCP port %d", port) diff --git a/synapse/http/site.py b/synapse/http/site.py index 32b5e19c0926..e911ee4809f9 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -19,8 +19,9 @@ import attr from zope.interface import implementer -from twisted.internet.interfaces import IAddress +from twisted.internet.interfaces import IAddress, IReactorTime from twisted.python.failure import Failure +from twisted.web.resource import IResource from twisted.web.server import Request, Site from synapse.config.server import ListenerConfig @@ -485,21 +486,39 @@ class _XForwardedForAddress: class SynapseSite(Site): """ - Subclass of a twisted http Site that does access logging with python's - standard logging + Synapse-specific twisted http Site + + This does two main things. + + First, it replaces the requestFactory in use so that we build SynapseRequests + instead of regular t.w.server.Requests. All of the constructor params are really + just parameters for SynapseRequest. + + Second, it inhibits the log() method called by Request.finish, since SynapseRequest + does its own logging. """ def __init__( self, - logger_name, - site_tag, + logger_name: str, + site_tag: str, config: ListenerConfig, - resource, + resource: IResource, server_version_string, - *args, - **kwargs, + reactor: IReactorTime, ): - Site.__init__(self, resource, *args, **kwargs) + """ + + Args: + logger_name: The name of the logger to use for access logs. + site_tag: A tag to use for this site - mostly in access logs. + config: Configuration for the HTTP listener corresponding to this site + resource: The base of the resource tree to be used for serving requests on + this site + server_version_string: A string to present for the Server header + reactor: reactor to be used to manage connection timeouts + """ + Site.__init__(self, resource, reactor=reactor) self.site_tag = site_tag diff --git a/tests/replication/_base.py b/tests/replication/_base.py index c9d04aef2924..5cf58d8b6029 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -349,6 +349,7 @@ def make_worker_hs( config=worker_hs.config.server.listeners[0], resource=resource, server_version_string="1", + reactor=self.reactor, ) if worker_hs.config.redis.redis_enabled: diff --git a/tests/test_server.py b/tests/test_server.py index 55cde7f62f48..45400be3674c 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -202,6 +202,7 @@ def _make_request(self, method, path): parse_listener_def({"type": "http", "port": 0}), self.resource, "1.0", + reactor=self.reactor, ) # render the request and return the channel diff --git a/tests/unittest.py b/tests/unittest.py index ee22a5384935..5353e75c7c85 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -247,6 +247,7 @@ def setUp(self): config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", + reactor=self.reactor, ) from tests.rest.client.v1.utils import RestHelper From 695b73c861aa26ab591cad3f378214b2666e806e Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 23 Apr 2021 18:22:47 +0100 Subject: [PATCH 081/222] Allow OIDC cookies to work on non-root public baseurls (#9726) Applied a (slightly modified) patch from https://github.com/matrix-org/synapse/issues/9574. As far as I understand this would allow the cookie set during the OIDC flow to work on deployments using public baseurls that do not sit at the URL path root. --- changelog.d/9726.bugfix | 1 + synapse/config/server.py | 8 ++++---- synapse/handlers/oidc.py | 22 +++++++++++++++++----- 3 files changed, 22 insertions(+), 9 deletions(-) create mode 100644 changelog.d/9726.bugfix diff --git a/changelog.d/9726.bugfix b/changelog.d/9726.bugfix new file mode 100644 index 000000000000..4ba0b24327cb --- /dev/null +++ b/changelog.d/9726.bugfix @@ -0,0 +1 @@ +Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path. \ No newline at end of file diff --git a/synapse/config/server.py b/synapse/config/server.py index 02b86b11a5ee..21ca7b33e38d 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -235,7 +235,11 @@ def read_config(self, config, **kwargs): self.print_pidfile = config.get("print_pidfile") self.user_agent_suffix = config.get("user_agent_suffix") self.use_frozen_dicts = config.get("use_frozen_dicts", False) + self.public_baseurl = config.get("public_baseurl") + if self.public_baseurl is not None: + if self.public_baseurl[-1] != "/": + self.public_baseurl += "/" # Whether to enable user presence. presence_config = config.get("presence") or {} @@ -407,10 +411,6 @@ def read_config(self, config, **kwargs): config_path=("federation_ip_range_blacklist",), ) - if self.public_baseurl is not None: - if self.public_baseurl[-1] != "/": - self.public_baseurl += "/" - # (undocumented) option for torturing the worker-mode replication a bit, # for testing. The value defines the number of milliseconds to pause before # sending out any replication updates. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 1c4a43be0a81..ee6e41c0e4d9 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -15,7 +15,7 @@ import inspect import logging from typing import TYPE_CHECKING, Dict, Generic, List, Optional, TypeVar, Union -from urllib.parse import urlencode +from urllib.parse import urlencode, urlparse import attr import pymacaroons @@ -68,8 +68,8 @@ # # Here we have the names of the cookies, and the options we use to set them. _SESSION_COOKIES = [ - (b"oidc_session", b"Path=/_synapse/client/oidc; HttpOnly; Secure; SameSite=None"), - (b"oidc_session_no_samesite", b"Path=/_synapse/client/oidc; HttpOnly"), + (b"oidc_session", b"HttpOnly; Secure; SameSite=None"), + (b"oidc_session_no_samesite", b"HttpOnly"), ] #: A token exchanged from the token endpoint, as per RFC6749 sec 5.1. and @@ -279,6 +279,13 @@ def __init__( self._config = provider self._callback_url = hs.config.oidc_callback_url # type: str + # Calculate the prefix for OIDC callback paths based on the public_baseurl. + # We'll insert this into the Path= parameter of any session cookies we set. + public_baseurl_path = urlparse(hs.config.server.public_baseurl).path + self._callback_path_prefix = ( + public_baseurl_path.encode("utf-8") + b"_synapse/client/oidc" + ) + self._oidc_attribute_requirements = provider.attribute_requirements self._scopes = provider.scopes self._user_profile_method = provider.user_profile_method @@ -779,8 +786,13 @@ async def handle_redirect_request( for cookie_name, options in _SESSION_COOKIES: request.cookies.append( - b"%s=%s; Max-Age=3600; %s" - % (cookie_name, cookie.encode("utf-8"), options) + b"%s=%s; Max-Age=3600; Path=%s; %s" + % ( + cookie_name, + cookie.encode("utf-8"), + self._callback_path_prefix, + options, + ) ) metadata = await self.load_metadata() From 84936e22648d3c9f6b76028b08c33f0267f5e3a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 23 Apr 2021 18:40:57 +0100 Subject: [PATCH 082/222] Kill off `_PushHTTPChannel`. (#9878) First of all, a fixup to `FakeChannel` which is needed to make it work with the default HTTP channel implementation. Secondly, it looks like we no longer need `_PushHTTPChannel`, because as of #8013, the producer that gets attached to the `HTTPChannel` is now an `IPushProducer`. This is good, because it means we can remove a whole load of test-specific boilerplate which causes variation between tests and production. --- changelog.d/9878.misc | 1 + tests/replication/_base.py | 134 ++++++------------------------------- tests/server.py | 6 -- 3 files changed, 20 insertions(+), 121 deletions(-) create mode 100644 changelog.d/9878.misc diff --git a/changelog.d/9878.misc b/changelog.d/9878.misc new file mode 100644 index 000000000000..927876852db3 --- /dev/null +++ b/changelog.d/9878.misc @@ -0,0 +1 @@ +Remove redundant `_PushHTTPChannel` test class. diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 5cf58d8b6029..dc3519ea1397 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -12,14 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Any, Callable, Dict, List, Optional, Tuple, Type +from typing import Any, Callable, Dict, List, Optional, Tuple -from twisted.internet.interfaces import IConsumer, IPullProducer, IReactorTime from twisted.internet.protocol import Protocol -from twisted.internet.task import LoopingCall -from twisted.web.http import HTTPChannel from twisted.web.resource import Resource -from twisted.web.server import Request, Site from synapse.app.generic_worker import GenericWorkerServer from synapse.http.server import JsonResource @@ -33,7 +29,6 @@ ServerReplicationStreamProtocol, ) from synapse.server import HomeServer -from synapse.util import Clock from tests import unittest from tests.server import FakeTransport @@ -154,7 +149,19 @@ def handle_http_replication_attempt(self) -> SynapseRequest: client_protocol = client_factory.buildProtocol(None) # Set up the server side protocol - channel = _PushHTTPChannel(self.reactor, SynapseRequest, self.site) + channel = self.site.buildProtocol(None) + + # hook into the channel's request factory so that we can keep a record + # of the requests + requests: List[SynapseRequest] = [] + real_request_factory = channel.requestFactory + + def request_factory(*args, **kwargs): + request = real_request_factory(*args, **kwargs) + requests.append(request) + return request + + channel.requestFactory = request_factory # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -176,7 +183,10 @@ def handle_http_replication_attempt(self) -> SynapseRequest: server_to_client_transport.loseConnection() client_to_server_transport.loseConnection() - return channel.request + # there should have been exactly one request + self.assertEqual(len(requests), 1) + + return requests[0] def assert_request_is_get_repl_stream_updates( self, request: SynapseRequest, stream_name: str @@ -387,7 +397,7 @@ def _handle_http_replication_attempt(self, hs, repl_port): client_protocol = client_factory.buildProtocol(None) # Set up the server side protocol - channel = _PushHTTPChannel(self.reactor, SynapseRequest, self._hs_to_site[hs]) + channel = self._hs_to_site[hs].buildProtocol(None) # Connect client to server and vice versa. client_to_server_transport = FakeTransport( @@ -445,112 +455,6 @@ async def on_rdata(self, stream_name, instance_name, token, rows): self.received_rdata_rows.append((stream_name, token, r)) -class _PushHTTPChannel(HTTPChannel): - """A HTTPChannel that wraps pull producers to push producers. - - This is a hack to get around the fact that HTTPChannel transparently wraps a - pull producer (which is what Synapse uses to reply to requests) with - `_PullToPush` to convert it to a push producer. Unfortunately `_PullToPush` - uses the standard reactor rather than letting us use our test reactor, which - makes it very hard to test. - """ - - def __init__( - self, reactor: IReactorTime, request_factory: Type[Request], site: Site - ): - super().__init__() - self.reactor = reactor - self.requestFactory = request_factory - self.site = site - - self._pull_to_push_producer = None # type: Optional[_PullToPushProducer] - - def registerProducer(self, producer, streaming): - # Convert pull producers to push producer. - if not streaming: - self._pull_to_push_producer = _PullToPushProducer( - self.reactor, producer, self - ) - producer = self._pull_to_push_producer - - super().registerProducer(producer, True) - - def unregisterProducer(self): - if self._pull_to_push_producer: - # We need to manually stop the _PullToPushProducer. - self._pull_to_push_producer.stop() - - def checkPersistence(self, request, version): - """Check whether the connection can be re-used""" - # We hijack this to always say no for ease of wiring stuff up in - # `handle_http_replication_attempt`. - request.responseHeaders.setRawHeaders(b"connection", [b"close"]) - return False - - def requestDone(self, request): - # Store the request for inspection. - self.request = request - super().requestDone(request) - - -class _PullToPushProducer: - """A push producer that wraps a pull producer.""" - - def __init__( - self, reactor: IReactorTime, producer: IPullProducer, consumer: IConsumer - ): - self._clock = Clock(reactor) - self._producer = producer - self._consumer = consumer - - # While running we use a looping call with a zero delay to call - # resumeProducing on given producer. - self._looping_call = None # type: Optional[LoopingCall] - - # We start writing next reactor tick. - self._start_loop() - - def _start_loop(self): - """Start the looping call to""" - - if not self._looping_call: - # Start a looping call which runs every tick. - self._looping_call = self._clock.looping_call(self._run_once, 0) - - def stop(self): - """Stops calling resumeProducing.""" - if self._looping_call: - self._looping_call.stop() - self._looping_call = None - - def pauseProducing(self): - """Implements IPushProducer""" - self.stop() - - def resumeProducing(self): - """Implements IPushProducer""" - self._start_loop() - - def stopProducing(self): - """Implements IPushProducer""" - self.stop() - self._producer.stopProducing() - - def _run_once(self): - """Calls resumeProducing on producer once.""" - - try: - self._producer.resumeProducing() - except Exception: - logger.exception("Failed to call resumeProducing") - try: - self._consumer.unregisterProducer() - except Exception: - pass - - self.stopProducing() - - class FakeRedisPubSubServer: """A fake Redis server for pub/sub.""" diff --git a/tests/server.py b/tests/server.py index b535a5d886ca..9df8cda24fb8 100644 --- a/tests/server.py +++ b/tests/server.py @@ -603,12 +603,6 @@ def flush(self, maxbytes=None): if self.disconnected: return - if not hasattr(self.other, "transport"): - # the other has no transport yet; reschedule - if self.autoflush: - self._reactor.callLater(0.0, self.flush) - return - if maxbytes is not None: to_write = self.buffer[:maxbytes] else: From 3ff225175462dde8376aa584e3a47c43b1f0e790 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 23 Apr 2021 19:20:44 +0100 Subject: [PATCH 083/222] Improved validation for received requests (#9817) * Simplify `start_listening` callpath * Correctly check the size of uploaded files --- changelog.d/9817.misc | 1 + synapse/api/constants.py | 3 + synapse/app/_base.py | 30 +++++++-- synapse/app/admin_cmd.py | 8 +-- synapse/app/generic_worker.py | 11 ++-- synapse/app/homeserver.py | 17 +++-- synapse/config/logger.py | 3 +- synapse/event_auth.py | 4 +- synapse/http/site.py | 32 +++++++-- synapse/rest/media/v1/upload_resource.py | 2 - synapse/server.py | 8 +++ tests/http/test_site.py | 83 ++++++++++++++++++++++++ tests/replication/_base.py | 1 + tests/test_server.py | 1 + tests/unittest.py | 1 + 15 files changed, 174 insertions(+), 31 deletions(-) create mode 100644 changelog.d/9817.misc create mode 100644 tests/http/test_site.py diff --git a/changelog.d/9817.misc b/changelog.d/9817.misc new file mode 100644 index 000000000000..8aa8895f05f0 --- /dev/null +++ b/changelog.d/9817.misc @@ -0,0 +1 @@ +Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 31a59bceec20..936b6534b41f 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -17,6 +17,9 @@ """Contains constants from the specification.""" +# the max size of a (canonical-json-encoded) event +MAX_PDU_SIZE = 65536 + # the "depth" field on events is limited to 2**63 - 1 MAX_DEPTH = 2 ** 63 - 1 diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 2113c4f37002..638e01c1b2e5 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -30,9 +30,10 @@ from twisted.protocols.tls import TLSMemoryBIOFactory import synapse +from synapse.api.constants import MAX_PDU_SIZE from synapse.app import check_bind_error from synapse.app.phone_stats_home import start_phone_stats_home -from synapse.config.server import ListenerConfig +from synapse.config.homeserver import HomeServerConfig from synapse.crypto import context_factory from synapse.logging.context import PreserveLoggingContext from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -288,7 +289,7 @@ def refresh_certificate(hs): logger.info("Context factories updated.") -async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerConfig]): +async def start(hs: "synapse.server.HomeServer"): """ Start a Synapse server or worker. @@ -300,7 +301,6 @@ async def start(hs: "synapse.server.HomeServer", listeners: Iterable[ListenerCon Args: hs: homeserver instance - listeners: Listener configuration ('listeners' in homeserver.yaml) """ # Set up the SIGHUP machinery. if hasattr(signal, "SIGHUP"): @@ -336,7 +336,7 @@ def run_sighup(*args, **kwargs): synapse.logging.opentracing.init_tracer(hs) # type: ignore[attr-defined] # noqa # It is now safe to start your Synapse. - hs.start_listening(listeners) + hs.start_listening() hs.get_datastore().db_pool.start_profiling() hs.get_pusherpool().start() @@ -530,3 +530,25 @@ def sdnotify(state): # this is a bit surprising, since we don't expect to have a NOTIFY_SOCKET # unless systemd is expecting us to notify it. logger.warning("Unable to send notification to systemd: %s", e) + + +def max_request_body_size(config: HomeServerConfig) -> int: + """Get a suitable maximum size for incoming HTTP requests""" + + # Other than media uploads, the biggest request we expect to see is a fully-loaded + # /federation/v1/send request. + # + # The main thing in such a request is up to 50 PDUs, and up to 100 EDUs. PDUs are + # limited to 65536 bytes (possibly slightly more if the sender didn't use canonical + # json encoding); there is no specced limit to EDUs (see + # https://github.com/matrix-org/matrix-doc/issues/3121). + # + # in short, we somewhat arbitrarily limit requests to 200 * 64K (about 12.5M) + # + max_request_size = 200 * MAX_PDU_SIZE + + # if we have a media repo enabled, we may need to allow larger uploads than that + if config.media.can_load_media_repo: + max_request_size = max(max_request_size, config.media.max_upload_size) + + return max_request_size diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index eb256db749da..68ae19c977ce 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -70,12 +70,6 @@ class AdminCmdSlavedStore( class AdminCmdServer(HomeServer): DATASTORE_CLASS = AdminCmdSlavedStore - def _listen_http(self, listener_config): - pass - - def start_listening(self, listeners): - pass - async def export_data_command(hs, args): """Export data for a user. @@ -232,7 +226,7 @@ def start(config_options): async def run(): with LoggingContext("command"): - _base.start(ss, []) + _base.start(ss) await args.func(ss, args) _base.start_worker_reactor( diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 70e07d0574db..1a15ceee8112 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -15,7 +15,7 @@ # limitations under the License. import logging import sys -from typing import Dict, Iterable, Optional +from typing import Dict, Optional from twisted.internet import address from twisted.web.resource import IResource @@ -32,7 +32,7 @@ SERVER_KEY_V2_PREFIX, ) from synapse.app import _base -from synapse.app._base import register_start +from synapse.app._base import max_request_body_size, register_start from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging @@ -367,6 +367,7 @@ def _listen_http(self, listener_config: ListenerConfig): listener_config, root_resource, self.version_string, + max_request_body_size=max_request_body_size(self.config), reactor=self.get_reactor(), ), reactor=self.get_reactor(), @@ -374,8 +375,8 @@ def _listen_http(self, listener_config: ListenerConfig): logger.info("Synapse worker now listening on port %d", port) - def start_listening(self, listeners: Iterable[ListenerConfig]): - for listener in listeners: + def start_listening(self): + for listener in self.config.worker_listeners: if listener.type == "http": self._listen_http(listener) elif listener.type == "manhole": @@ -468,7 +469,7 @@ def start(config_options): # streams. Will no-op if no streams can be written to by this worker. hs.get_replication_streamer() - register_start(_base.start, hs, config.worker_listeners) + register_start(_base.start, hs) _base.start_worker_reactor("synapse-generic-worker", config) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 140f6bcdeef0..8e78134bbedc 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -17,7 +17,7 @@ import logging import os import sys -from typing import Iterable, Iterator +from typing import Iterator from twisted.internet import reactor from twisted.web.resource import EncodingResourceWrapper, IResource @@ -36,7 +36,13 @@ WEB_CLIENT_PREFIX, ) from synapse.app import _base -from synapse.app._base import listen_ssl, listen_tcp, quit_with_error, register_start +from synapse.app._base import ( + listen_ssl, + listen_tcp, + max_request_body_size, + quit_with_error, + register_start, +) from synapse.config._base import ConfigError from synapse.config.emailconfig import ThreepidBehaviour from synapse.config.homeserver import HomeServerConfig @@ -132,6 +138,7 @@ def _listener_http(self, config: HomeServerConfig, listener_config: ListenerConf listener_config, create_resource_tree(resources, root_resource), self.version_string, + max_request_body_size=max_request_body_size(self.config), reactor=self.get_reactor(), ) @@ -268,14 +275,14 @@ def _configure_named_resource(self, name, compress=False): return resources - def start_listening(self, listeners: Iterable[ListenerConfig]): + def start_listening(self): if self.config.redis_enabled: # If redis is enabled we connect via the replication command handler # in the same way as the workers (since we're effectively a client # rather than a server). self.get_tcp_replication().start_replication(self) - for listener in listeners: + for listener in self.config.server.listeners: if listener.type == "http": self._listening_services.extend( self._listener_http(self.config, listener) @@ -407,7 +414,7 @@ async def start(): # Loading the provider metadata also ensures the provider config is valid. await oidc.load_metadata() - await _base.start(hs, config.listeners) + await _base.start(hs) hs.get_datastore().db_pool.updates.start_doing_background_updates() diff --git a/synapse/config/logger.py b/synapse/config/logger.py index b174e0df6d54..813076dfe2c9 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -31,7 +31,6 @@ ) import synapse -from synapse.app import _base as appbase from synapse.logging._structured import setup_structured_logging from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter @@ -318,6 +317,8 @@ def setup_logging( # Perform one-time logging configuration. _setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner) # Add a SIGHUP handler to reload the logging configuration, if one is available. + from synapse.app import _base as appbase + appbase.register_sighup(_reload_logging_config, log_config_path) # Log immediately so we can grep backwards. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index afc2bc826745..70c556566e69 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -21,7 +21,7 @@ from signedjson.sign import SignatureVerifyException, verify_signed_json from unpaddedbase64 import decode_base64 -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import MAX_PDU_SIZE, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, EventSizeError, SynapseError from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, @@ -205,7 +205,7 @@ def too_big(field): too_big("type") if len(event.event_id) > 255: too_big("event_id") - if len(encode_canonical_json(event.get_pdu_json())) > 65536: + if len(encode_canonical_json(event.get_pdu_json())) > MAX_PDU_SIZE: too_big("event") diff --git a/synapse/http/site.py b/synapse/http/site.py index e911ee4809f9..671fd3fbcc29 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -14,7 +14,7 @@ import contextlib import logging import time -from typing import Optional, Tuple, Type, Union +from typing import Optional, Tuple, Union import attr from zope.interface import implementer @@ -50,6 +50,7 @@ class SynapseRequest(Request): * Redaction of access_token query-params in __repr__ * Logging at start and end * Metrics to record CPU, wallclock and DB time by endpoint. + * A limit to the size of request which will be accepted It also provides a method `processing`, which returns a context manager. If this method is called, the request won't be logged until the context manager is closed; @@ -60,8 +61,9 @@ class SynapseRequest(Request): logcontext: the log context for this request """ - def __init__(self, channel, *args, **kw): + def __init__(self, channel, *args, max_request_body_size=1024, **kw): Request.__init__(self, channel, *args, **kw) + self._max_request_body_size = max_request_body_size self.site = channel.site # type: SynapseSite self._channel = channel # this is used by the tests self.start_time = 0.0 @@ -98,6 +100,18 @@ def __repr__(self): self.site.site_tag, ) + def handleContentChunk(self, data): + # we should have a `content` by now. + assert self.content, "handleContentChunk() called before gotLength()" + if self.content.tell() + len(data) > self._max_request_body_size: + logger.warning( + "Aborting connection from %s because the request exceeds maximum size", + self.client, + ) + self.transport.abortConnection() + return + super().handleContentChunk(data) + @property def requester(self) -> Optional[Union[Requester, str]]: return self._requester @@ -505,6 +519,7 @@ def __init__( config: ListenerConfig, resource: IResource, server_version_string, + max_request_body_size: int, reactor: IReactorTime, ): """ @@ -516,6 +531,8 @@ def __init__( resource: The base of the resource tree to be used for serving requests on this site server_version_string: A string to present for the Server header + max_request_body_size: Maximum request body length to allow before + dropping the connection reactor: reactor to be used to manage connection timeouts """ Site.__init__(self, resource, reactor=reactor) @@ -524,9 +541,14 @@ def __init__( assert config.http_options is not None proxied = config.http_options.x_forwarded - self.requestFactory = ( - XForwardedForRequest if proxied else SynapseRequest - ) # type: Type[Request] + request_class = XForwardedForRequest if proxied else SynapseRequest + + def request_factory(channel, queued) -> Request: + return request_class( + channel, max_request_body_size=max_request_body_size, queued=queued + ) + + self.requestFactory = request_factory # type: ignore self.access_logger = logging.getLogger(logger_name) self.server_version_string = server_version_string.encode("ascii") diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/v1/upload_resource.py index 80f017a4ddfb..024a105bf209 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/v1/upload_resource.py @@ -51,8 +51,6 @@ async def _async_render_OPTIONS(self, request: Request) -> None: async def _async_render_POST(self, request: SynapseRequest) -> None: requester = await self.auth.get_user_by_req(request) - # TODO: The checks here are a bit late. The content will have - # already been uploaded to a tmp file at this point content_length = request.getHeader("Content-Length") if content_length is None: raise SynapseError(msg="Request must specify a Content-Length", code=400) diff --git a/synapse/server.py b/synapse/server.py index 8c147be2b3d0..06570bb1ce6d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -287,6 +287,14 @@ def setup(self) -> None: if self.config.run_background_tasks: self.setup_background_tasks() + def start_listening(self) -> None: + """Start the HTTP, manhole, metrics, etc listeners + + Does nothing in this base class; overridden in derived classes to start the + appropriate listeners. + """ + pass + def setup_background_tasks(self) -> None: """ Some handlers have side effects on instantiation (like registering diff --git a/tests/http/test_site.py b/tests/http/test_site.py new file mode 100644 index 000000000000..8c13b4f6931e --- /dev/null +++ b/tests/http/test_site.py @@ -0,0 +1,83 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet.address import IPv6Address +from twisted.test.proto_helpers import StringTransport + +from synapse.app.homeserver import SynapseHomeServer + +from tests.unittest import HomeserverTestCase + + +class SynapseRequestTestCase(HomeserverTestCase): + def make_homeserver(self, reactor, clock): + return self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer) + + def test_large_request(self): + """overlarge HTTP requests should be rejected""" + self.hs.start_listening() + + # find the HTTP server which is configured to listen on port 0 + (port, factory, _backlog, interface) = self.reactor.tcpServers[0] + self.assertEqual(interface, "::") + self.assertEqual(port, 0) + + # as a control case, first send a regular request. + + # complete the connection and wire it up to a fake transport + client_address = IPv6Address("TCP", "::1", "2345") + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + b"0\r\n" + b"\r\n" + ) + + while not transport.disconnecting: + self.reactor.advance(1) + + # we should get a 404 + self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 404 ") + + # now send an oversized request + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + ) + + # we deliberately send all the data in one big chunk, to ensure that + # twisted isn't buffering the data in the chunked transfer decoder. + # we start with the chunk size, in hex. (We won't actually send this much) + protocol.dataReceived(b"10000000\r\n") + sent = 0 + while not transport.disconnected: + self.assertLess(sent, 0x10000000, "connection did not drop") + protocol.dataReceived(b"\0" * 1024) + sent += 1024 + + # default max upload size is 50M, so it should drop on the next buffer after + # that. + self.assertEqual(sent, 50 * 1024 * 1024 + 1024) diff --git a/tests/replication/_base.py b/tests/replication/_base.py index dc3519ea1397..624bd1b92722 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -359,6 +359,7 @@ def make_worker_hs( config=worker_hs.config.server.listeners[0], resource=resource, server_version_string="1", + max_request_body_size=4096, reactor=self.reactor, ) diff --git a/tests/test_server.py b/tests/test_server.py index 45400be3674c..407e172e41de 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -202,6 +202,7 @@ def _make_request(self, method, path): parse_listener_def({"type": "http", "port": 0}), self.resource, "1.0", + max_request_body_size=1234, reactor=self.reactor, ) diff --git a/tests/unittest.py b/tests/unittest.py index 5353e75c7c85..9bd02bd9c423 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -247,6 +247,7 @@ def setUp(self): config=self.hs.config.server.listeners[0], resource=self.resource, server_version_string="1", + max_request_body_size=1234, reactor=self.reactor, ) From 0ffa5fb935ac9285217d957403861d2e3327e109 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 27 Apr 2021 10:09:41 +0100 Subject: [PATCH 084/222] Use current state table for `presence.get_interested_remotes` (#9887) This should be a lot quicker than asking the state handler. --- changelog.d/9887.misc | 1 + synapse/handlers/presence.py | 9 ++------- 2 files changed, 3 insertions(+), 7 deletions(-) create mode 100644 changelog.d/9887.misc diff --git a/changelog.d/9887.misc b/changelog.d/9887.misc new file mode 100644 index 000000000000..650ebf85e6ce --- /dev/null +++ b/changelog.d/9887.misc @@ -0,0 +1 @@ +Small performance improvement around handling new local presence updates. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 9938be3821ea..969c73c1e7a1 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -58,7 +58,6 @@ from synapse.replication.http.streams import ReplicationGetStreamUpdates from synapse.replication.tcp.commands import ClearUserSyncsCommand from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream -from synapse.state import StateHandler from synapse.storage.databases.main import DataStore from synapse.types import JsonDict, UserID, get_domain_from_id from synapse.util.async_helpers import Linearizer @@ -291,7 +290,6 @@ async def maybe_send_presence_to_interested_destinations( self.store, self.presence_router, states, - self.state, ) for destinations, states in hosts_and_states: @@ -757,7 +755,6 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: self.store, self.presence_router, list(to_federation_ping.values()), - self.state, ) for destinations, states in hosts_and_states: @@ -1384,7 +1381,6 @@ def __init__(self, hs: "HomeServer"): self.get_presence_router = hs.get_presence_router self.clock = hs.get_clock() self.store = hs.get_datastore() - self.state = hs.get_state_handler() @log_function async def get_new_events( @@ -1853,7 +1849,6 @@ async def get_interested_remotes( store: DataStore, presence_router: PresenceRouter, states: List[UserPresenceState], - state_handler: StateHandler, ) -> List[Tuple[Collection[str], List[UserPresenceState]]]: """Given a list of presence states figure out which remote servers should be sent which. @@ -1864,7 +1859,6 @@ async def get_interested_remotes( store: The homeserver's data store. presence_router: A module for augmenting the destinations for presence updates. states: A list of incoming user presence updates. - state_handler: Returns: A list of 2-tuples of destinations and states, where for @@ -1881,7 +1875,8 @@ async def get_interested_remotes( ) for room_id, states in room_ids_to_states.items(): - hosts = await state_handler.get_current_hosts_in_room(room_id) + user_ids = await store.get_users_in_room(room_id) + hosts = {get_domain_from_id(user_id) for user_id in user_ids} hosts_and_states.append((hosts, states)) for user_id, states in users_to_states.items(): From 1350b053da45c94722cd8acf9cfd367db787259c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 27 Apr 2021 07:30:34 -0400 Subject: [PATCH 085/222] Pass errors back to the client when trying multiple federation destinations. (#9868) This ensures that something like an auth error (403) will be returned to the requester instead of attempting to try more servers, which will likely result in the same error, and then passing back a generic 400 error. --- changelog.d/9868.bugfix | 1 + synapse/federation/federation_client.py | 118 ++++++++++++------------ 2 files changed, 61 insertions(+), 58 deletions(-) create mode 100644 changelog.d/9868.bugfix diff --git a/changelog.d/9868.bugfix b/changelog.d/9868.bugfix new file mode 100644 index 000000000000..e2b4f97ad51f --- /dev/null +++ b/changelog.d/9868.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where errors from federation did not propagate to the client. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f93335edaa1c..a5b6a611952b 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -451,6 +451,28 @@ async def get_event_auth( return signed_auth + def _is_unknown_endpoint( + self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None + ) -> bool: + """ + Returns true if the response was due to an endpoint being unimplemented. + + Args: + e: The error response received from the remote server. + synapse_error: The above error converted to a SynapseError. This is + automatically generated if not provided. + + """ + if synapse_error is None: + synapse_error = e.to_synapse_error() + # There is no good way to detect an "unknown" endpoint. + # + # Dendrite returns a 404 (with no body); synapse returns a 400 + # with M_UNRECOGNISED. + return e.code == 404 or ( + e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED + ) + async def _try_destination_list( self, description: str, @@ -468,9 +490,9 @@ async def _try_destination_list( callback: Function to run for each server. Passed a single argument: the server_name to try. - If the callback raises a CodeMessageException with a 300/400 code, - attempts to perform the operation stop immediately and the exception is - reraised. + If the callback raises a CodeMessageException with a 300/400 code or + an UnsupportedRoomVersionError, attempts to perform the operation + stop immediately and the exception is reraised. Otherwise, if the callback raises an Exception the error is logged and the next server tried. Normally the stacktrace is logged but this is @@ -492,8 +514,7 @@ async def _try_destination_list( continue try: - res = await callback(destination) - return res + return await callback(destination) except InvalidResponseError as e: logger.warning("Failed to %s via %s: %s", description, destination, e) except UnsupportedRoomVersionError: @@ -502,17 +523,15 @@ async def _try_destination_list( synapse_error = e.to_synapse_error() failover = False + # Failover on an internal server error, or if the destination + # doesn't implemented the endpoint for some reason. if 500 <= e.code < 600: failover = True - elif failover_on_unknown_endpoint: - # there is no good way to detect an "unknown" endpoint. Dendrite - # returns a 404 (with no body); synapse returns a 400 - # with M_UNRECOGNISED. - if e.code == 404 or ( - e.code == 400 and synapse_error.errcode == Codes.UNRECOGNIZED - ): - failover = True + elif failover_on_unknown_endpoint and self._is_unknown_endpoint( + e, synapse_error + ): + failover = True if not failover: raise synapse_error from e @@ -570,9 +589,8 @@ async def make_membership_event( UnsupportedRoomVersionError: if remote responds with a room version we don't understand. - SynapseError: if the chosen remote server returns a 300/400 code. - - RuntimeError: if no servers were reachable. + SynapseError: if the chosen remote server returns a 300/400 code, or + no servers successfully handle the request. """ valid_memberships = {Membership.JOIN, Membership.LEAVE} if membership not in valid_memberships: @@ -642,9 +660,8 @@ async def send_join( ``auth_chain``. Raises: - SynapseError: if the chosen remote server returns a 300/400 code. - - RuntimeError: if no servers were reachable. + SynapseError: if the chosen remote server returns a 300/400 code, or + no servers successfully handle the request. """ async def send_request(destination) -> Dict[str, Any]: @@ -673,7 +690,7 @@ async def send_request(destination) -> Dict[str, Any]: if create_event is None: # If the state doesn't have a create event then the room is # invalid, and it would fail auth checks anyway. - raise SynapseError(400, "No create event in state") + raise InvalidResponseError("No create event in state") # the room version should be sane. create_room_version = create_event.content.get( @@ -746,16 +763,11 @@ async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict: content=pdu.get_pdu_json(time_now), ) except HttpResponseException as e: - if e.code in [400, 404]: - err = e.to_synapse_error() - - # If we receive an error response that isn't a generic error, or an - # unrecognised endpoint error, we assume that the remote understands - # the v2 invite API and this is a legitimate error. - if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: - raise err - else: - raise e.to_synapse_error() + # If an error is received that is due to an unrecognised endpoint, + # fallback to the v1 endpoint. Otherwise consider it a legitmate error + # and raise. + if not self._is_unknown_endpoint(e): + raise logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") @@ -802,6 +814,11 @@ async def _do_send_invite( Returns: The event as a dict as returned by the remote server + + Raises: + SynapseError: if the remote server returns an error or if the server + only supports the v1 endpoint and a room version other than "1" + or "2" is requested. """ time_now = self._clock.time_msec() @@ -817,28 +834,19 @@ async def _do_send_invite( }, ) except HttpResponseException as e: - if e.code in [400, 404]: - err = e.to_synapse_error() - - # If we receive an error response that isn't a generic error, we - # assume that the remote understands the v2 invite API and this - # is a legitimate error. - if err.errcode != Codes.UNKNOWN: - raise err - - # Otherwise, we assume that the remote server doesn't understand - # the v2 invite API. That's ok provided the room uses old-style event - # IDs. + # If an error is received that is due to an unrecognised endpoint, + # fallback to the v1 endpoint if the room uses old-style event IDs. + # Otherwise consider it a legitmate error and raise. + err = e.to_synapse_error() + if self._is_unknown_endpoint(e, err): if room_version.event_format != EventFormatVersions.V1: raise SynapseError( 400, "User's homeserver does not support this room version", Codes.UNSUPPORTED_ROOM_VERSION, ) - elif e.code in (403, 429): - raise e.to_synapse_error() else: - raise + raise err # Didn't work, try v1 API. # Note the v1 API returns a tuple of `(200, content)` @@ -865,9 +873,8 @@ async def send_leave(self, destinations: Iterable[str], pdu: EventBase) -> None: pdu: event to be sent Raises: - SynapseError if the chosen remote server returns a 300/400 code. - - RuntimeError if no servers were reachable. + SynapseError: if the chosen remote server returns a 300/400 code, or + no servers successfully handle the request. """ async def send_request(destination: str) -> None: @@ -889,16 +896,11 @@ async def _do_send_leave(self, destination: str, pdu: EventBase) -> JsonDict: content=pdu.get_pdu_json(time_now), ) except HttpResponseException as e: - if e.code in [400, 404]: - err = e.to_synapse_error() - - # If we receive an error response that isn't a generic error, or an - # unrecognised endpoint error, we assume that the remote understands - # the v2 invite API and this is a legitimate error. - if err.errcode not in [Codes.UNKNOWN, Codes.UNRECOGNIZED]: - raise err - else: - raise e.to_synapse_error() + # If an error is received that is due to an unrecognised endpoint, + # fallback to the v1 endpoint. Otherwise consider it a legitmate error + # and raise. + if not self._is_unknown_endpoint(e): + raise logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API") From fe604a022a7142157da7e90a40330beb2a11af7a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 27 Apr 2021 13:13:07 +0100 Subject: [PATCH 086/222] Remove various bits of compatibility code for Python <3.6 (#9879) I went through and removed a bunch of cruft that was lying around for compatibility with old Python versions. This PR also will now prevent Synapse from starting unless you're running Python 3.6+. --- changelog.d/9879.misc | 1 + mypy.ini | 1 - synapse/__init__.py | 4 +-- synapse/python_dependencies.py | 9 ++--- synapse/rest/admin/users.py | 3 +- synapse/rest/consent/consent_resource.py | 10 +----- synapse/rest/media/v1/filepath.py | 2 +- synapse/secrets.py | 44 ------------------------ synapse/server.py | 5 --- synapse/storage/_base.py | 2 +- synapse/storage/database.py | 15 ++++---- synapse/util/caches/response_cache.py | 2 +- tests/rest/admin/test_user.py | 15 ++++---- tests/storage/test__base.py | 3 +- tests/unittest.py | 2 +- tox.ini | 9 ++--- 16 files changed, 29 insertions(+), 98 deletions(-) create mode 100644 changelog.d/9879.misc delete mode 100644 synapse/secrets.py diff --git a/changelog.d/9879.misc b/changelog.d/9879.misc new file mode 100644 index 000000000000..c9ca37cf4835 --- /dev/null +++ b/changelog.d/9879.misc @@ -0,0 +1 @@ +Remove backwards-compatibility code for Python versions < 3.6. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 32e619740980..a40f705b76f4 100644 --- a/mypy.ini +++ b/mypy.ini @@ -41,7 +41,6 @@ files = synapse/push, synapse/replication, synapse/rest, - synapse/secrets.py, synapse/server.py, synapse/server_notices, synapse/spam_checker_api, diff --git a/synapse/__init__.py b/synapse/__init__.py index 837e938f569c..fbd49a93e137 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -21,8 +21,8 @@ import sys # Check that we're not running on an unsupported Python version. -if sys.version_info < (3, 5): - print("Synapse requires Python 3.5 or above.") +if sys.version_info < (3, 6): + print("Synapse requires Python 3.6 or above.") sys.exit(1) # Twisted and canonicaljson will fail to import when this file is executed to diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2a1c925ee8b9..2de946f46404 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -85,7 +85,7 @@ "typing-extensions>=3.7.4", # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. - "cryptography>=3.4.7;python_version>='3.6'", + "cryptography>=3.4.7", ] CONDITIONAL_REQUIREMENTS = { @@ -100,14 +100,9 @@ # that use the protocol, such as Let's Encrypt. "acme": [ "txacme>=0.9.2", - # txacme depends on eliot. Eliot 1.8.0 is incompatible with - # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418 - "eliot<1.8.0;python_version<'3.5.3'", ], "saml2": [ - # pysaml2 6.4.0 is incompatible with Python 3.5 (see https://github.com/IdentityPython/pysaml2/issues/749) - "pysaml2>=4.5.0,<6.4.0;python_version<'3.6'", - "pysaml2>=4.5.0;python_version>='3.6'", + "pysaml2>=4.5.0", ], "oidc": ["authlib>=0.14.0"], # systemd-python is necessary for logging to the systemd journal via diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index edda7861fae5..8c9d21d3ea1b 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -14,6 +14,7 @@ import hashlib import hmac import logging +import secrets from http import HTTPStatus from typing import TYPE_CHECKING, Dict, List, Optional, Tuple @@ -375,7 +376,7 @@ def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: """ self._clear_old_nonces() - nonce = self.hs.get_secrets().token_hex(64) + nonce = secrets.token_hex(64) self.nonces[nonce] = int(self.reactor.seconds()) return 200, {"nonce": nonce} diff --git a/synapse/rest/consent/consent_resource.py b/synapse/rest/consent/consent_resource.py index c4550d3cf06b..b19cd8afc53e 100644 --- a/synapse/rest/consent/consent_resource.py +++ b/synapse/rest/consent/consent_resource.py @@ -32,14 +32,6 @@ logger = logging.getLogger(__name__) -# use hmac.compare_digest if we have it (python 2.7.7), else just use equality -if hasattr(hmac, "compare_digest"): - compare_digest = hmac.compare_digest -else: - - def compare_digest(a, b): - return a == b - class ConsentResource(DirectServeHtmlResource): """A twisted Resource to display a privacy policy and gather consent to it @@ -209,5 +201,5 @@ def _check_hash(self, userid, userhmac): .encode("ascii") ) - if not compare_digest(want_mac, userhmac): + if not hmac.compare_digest(want_mac, userhmac): raise SynapseError(HTTPStatus.FORBIDDEN, "HMAC incorrect") diff --git a/synapse/rest/media/v1/filepath.py b/synapse/rest/media/v1/filepath.py index 4088e7a059ad..09531ebf548d 100644 --- a/synapse/rest/media/v1/filepath.py +++ b/synapse/rest/media/v1/filepath.py @@ -21,7 +21,7 @@ NEW_FORMAT_ID_RE = re.compile(r"^\d\d\d\d-\d\d-\d\d") -def _wrap_in_base_path(func: "Callable[..., str]") -> "Callable[..., str]": +def _wrap_in_base_path(func: Callable[..., str]) -> Callable[..., str]: """Takes a function that returns a relative path and turns it into an absolute path based on the location of the primary media store """ diff --git a/synapse/secrets.py b/synapse/secrets.py deleted file mode 100644 index bf829251fd0a..000000000000 --- a/synapse/secrets.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright 2018 New Vector Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Injectable secrets module for Synapse. - -See https://docs.python.org/3/library/secrets.html#module-secrets for the API -used in Python 3.6, and the API emulated in Python 2.7. -""" -import sys - -# secrets is available since python 3.6 -if sys.version_info[0:2] >= (3, 6): - import secrets - - class Secrets: - def token_bytes(self, nbytes: int = 32) -> bytes: - return secrets.token_bytes(nbytes) - - def token_hex(self, nbytes: int = 32) -> str: - return secrets.token_hex(nbytes) - - -else: - import binascii - import os - - class Secrets: - def token_bytes(self, nbytes: int = 32) -> bytes: - return os.urandom(nbytes) - - def token_hex(self, nbytes: int = 32) -> str: - return binascii.hexlify(self.token_bytes(nbytes)).decode("ascii") diff --git a/synapse/server.py b/synapse/server.py index 06570bb1ce6d..2337d2d9b450 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -126,7 +126,6 @@ MediaRepository, MediaRepositoryResource, ) -from synapse.secrets import Secrets from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.server_notices.worker_server_notices_sender import ( @@ -641,10 +640,6 @@ def get_groups_attestation_signing(self) -> GroupAttestationSigning: def get_groups_attestation_renewer(self) -> GroupAttestionRenewer: return GroupAttestionRenewer(self) - @cache_in_self - def get_secrets(self) -> Secrets: - return Secrets() - @cache_in_self def get_stats_handler(self) -> StatsHandler: return StatsHandler(self) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index d472676acfe1..6b68d8720c92 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -114,7 +114,7 @@ def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any: db_content = db_content.tobytes() # Decode it to a Unicode string before feeding it to the JSON decoder, since - # Python 3.5 does not support deserializing bytes. + # it only supports handling strings if isinstance(db_content, (bytes, bytearray)): db_content = db_content.decode("utf8") diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 9452368bf08a..bd39c095af6b 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -171,10 +171,7 @@ def __getattr__(self, name): # The type of entry which goes on our after_callbacks and exception_callbacks lists. -# -# Python 3.5.2 doesn't support Callable with an ellipsis, so we wrap it in quotes so -# that mypy sees the type but the runtime python doesn't. -_CallbackListEntry = Tuple["Callable[..., None]", Iterable[Any], Dict[str, Any]] +_CallbackListEntry = Tuple[Callable[..., None], Iterable[Any], Dict[str, Any]] R = TypeVar("R") @@ -221,7 +218,7 @@ def __init__( self.after_callbacks = after_callbacks self.exception_callbacks = exception_callbacks - def call_after(self, callback: "Callable[..., None]", *args: Any, **kwargs: Any): + def call_after(self, callback: Callable[..., None], *args: Any, **kwargs: Any): """Call the given callback on the main twisted thread after the transaction has finished. Used to invalidate the caches on the correct thread. @@ -233,7 +230,7 @@ def call_after(self, callback: "Callable[..., None]", *args: Any, **kwargs: Any) self.after_callbacks.append((callback, args, kwargs)) def call_on_exception( - self, callback: "Callable[..., None]", *args: Any, **kwargs: Any + self, callback: Callable[..., None], *args: Any, **kwargs: Any ): # if self.exception_callbacks is None, that means that whatever constructed the # LoggingTransaction isn't expecting there to be any callbacks; assert that @@ -485,7 +482,7 @@ def new_transaction( desc: str, after_callbacks: List[_CallbackListEntry], exception_callbacks: List[_CallbackListEntry], - func: "Callable[..., R]", + func: Callable[..., R], *args: Any, **kwargs: Any, ) -> R: @@ -618,7 +615,7 @@ def new_transaction( async def runInteraction( self, desc: str, - func: "Callable[..., R]", + func: Callable[..., R], *args: Any, db_autocommit: bool = False, **kwargs: Any, @@ -678,7 +675,7 @@ async def runInteraction( async def runWithConnection( self, - func: "Callable[..., R]", + func: Callable[..., R], *args: Any, db_autocommit: bool = False, **kwargs: Any, diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index 2529845c9ef0..25ea1bcc915e 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -110,7 +110,7 @@ def remove(r): return result.observe() def wrap( - self, key: T, callback: "Callable[..., Any]", *args: Any, **kwargs: Any + self, key: T, callback: Callable[..., Any], *args: Any, **kwargs: Any ) -> defer.Deferred: """Wrap together a *get* and *set* call, taking care of logcontexts diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index b3afd51522d6..d599a4c984d9 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -18,7 +18,7 @@ import urllib.parse from binascii import unhexlify from typing import List, Optional -from unittest.mock import Mock +from unittest.mock import Mock, patch import synapse.rest.admin from synapse.api.constants import UserTypes @@ -54,8 +54,6 @@ def make_homeserver(self, reactor, clock): self.datastore = Mock(return_value=Mock()) self.datastore.get_current_state_deltas = Mock(return_value=(0, [])) - self.secrets = Mock() - self.hs = self.setup_test_homeserver() self.hs.config.registration_shared_secret = "shared" @@ -84,14 +82,13 @@ def test_get_nonce(self): Calling GET on the endpoint will return a randomised nonce, using the homeserver's secrets provider. """ - secrets = Mock() - secrets.token_hex = Mock(return_value="abcd") - - self.hs.get_secrets = Mock(return_value=secrets) + with patch("secrets.token_hex") as token_hex: + # Patch secrets.token_hex for the duration of this context + token_hex.return_value = "abcd" - channel = self.make_request("GET", self.url) + channel = self.make_request("GET", self.url) - self.assertEqual(channel.json_body, {"nonce": "abcd"}) + self.assertEqual(channel.json_body, {"nonce": "abcd"}) def test_expired_nonce(self): """ diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 6339a43f0cb3..200b9198f910 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import secrets from tests import unittest @@ -21,7 +22,7 @@ class UpsertManyTests(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): self.storage = hs.get_datastore() - self.table_name = "table_" + hs.get_secrets().token_hex(6) + self.table_name = "table_" + secrets.token_hex(6) self.get_success( self.storage.db_pool.runInteraction( "create", diff --git a/tests/unittest.py b/tests/unittest.py index 9bd02bd9c423..74db7c08f1ee 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -18,6 +18,7 @@ import hmac import inspect import logging +import secrets import time from typing import Callable, Dict, Iterable, Optional, Tuple, Type, TypeVar, Union from unittest.mock import Mock, patch @@ -626,7 +627,6 @@ def create_and_send_event( str: The new event's ID. """ event_creator = self.hs.get_event_creation_handler() - secrets = self.hs.get_secrets() requester = create_requester(user) event, context = self.get_success( diff --git a/tox.ini b/tox.ini index 998b04b22442..ecd609271d1d 100644 --- a/tox.ini +++ b/tox.ini @@ -21,13 +21,11 @@ deps = # installed on that). # # anyway, make sure that we have a recent enough setuptools. - setuptools>=18.5 ; python_version >= '3.6' - setuptools>=18.5,<51.0.0 ; python_version < '3.6' + setuptools>=18.5 # we also need a semi-recent version of pip, because old ones fail to # install the "enum34" dependency of cryptography. - pip>=10 ; python_version >= '3.6' - pip>=10,<21.0 ; python_version < '3.6' + pip>=10 # directories/files we run the linters on. # if you update this list, make sure to do the same in scripts-dev/lint.sh @@ -168,8 +166,7 @@ skip_install = true usedevelop = false deps = coverage - pip>=10 ; python_version >= '3.6' - pip>=10,<21.0 ; python_version < '3.6' + pip>=10 commands= coverage combine coverage report From dd2d32dcdb3238735aeeeaff18e5c754b1d50be9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Apr 2021 11:07:47 +0100 Subject: [PATCH 087/222] Add type hints to presence handler (#9885) --- changelog.d/9885.misc | 1 + synapse/handlers/presence.py | 159 ++++++++++++++++++++--------------- 2 files changed, 90 insertions(+), 70 deletions(-) create mode 100644 changelog.d/9885.misc diff --git a/changelog.d/9885.misc b/changelog.d/9885.misc new file mode 100644 index 000000000000..492fccea46e3 --- /dev/null +++ b/changelog.d/9885.misc @@ -0,0 +1 @@ +Add type hints to presence handler. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 969c73c1e7a1..e9f618bb5ab2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -28,6 +28,7 @@ from contextlib import contextmanager from typing import ( TYPE_CHECKING, + Callable, Collection, Dict, FrozenSet, @@ -232,23 +233,23 @@ async def bump_presence_active_time(self, user: UserID): """ async def update_external_syncs_row( - self, process_id, user_id, is_syncing, sync_time_msec - ): + self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + ) -> None: """Update the syncing users for an external process as a delta. This is a no-op when presence is handled by a different worker. Args: - process_id (str): An identifier for the process the users are + process_id: An identifier for the process the users are syncing against. This allows synapse to process updates as user start and stop syncing against a given process. - user_id (str): The user who has started or stopped syncing - is_syncing (bool): Whether or not the user is now syncing - sync_time_msec(int): Time in ms when the user was last syncing + user_id: The user who has started or stopped syncing + is_syncing: Whether or not the user is now syncing + sync_time_msec: Time in ms when the user was last syncing """ pass - async def update_external_syncs_clear(self, process_id): + async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process as offline. @@ -304,7 +305,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class WorkerPresenceHandler(BasePresenceHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.hs = hs @@ -327,7 +328,7 @@ def __init__(self, hs): # user_id -> last_sync_ms. Lists the users that have stopped syncing but # we haven't notified the presence writer of that yet - self.users_going_offline = {} + self.users_going_offline = {} # type: Dict[str, int] self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) @@ -346,24 +347,21 @@ def __init__(self, hs): self._on_shutdown, ) - def _on_shutdown(self): + def _on_shutdown(self) -> None: if self._presence_enabled: self.hs.get_tcp_replication().send_command( ClearUserSyncsCommand(self.instance_id) ) - def send_user_sync(self, user_id, is_syncing, last_sync_ms): + def send_user_sync(self, user_id: str, is_syncing: bool, last_sync_ms: int) -> None: if self._presence_enabled: self.hs.get_tcp_replication().send_user_sync( self.instance_id, user_id, is_syncing, last_sync_ms ) - def mark_as_coming_online(self, user_id): + def mark_as_coming_online(self, user_id: str) -> None: """A user has started syncing. Send a UserSync to the presence writer, unless they had recently stopped syncing. - - Args: - user_id (str) """ going_offline = self.users_going_offline.pop(user_id, None) if not going_offline: @@ -371,18 +369,15 @@ def mark_as_coming_online(self, user_id): # were offline self.send_user_sync(user_id, True, self.clock.time_msec()) - def mark_as_going_offline(self, user_id): + def mark_as_going_offline(self, user_id: str) -> None: """A user has stopped syncing. We wait before notifying the presence writer as its likely they'll come back soon. This allows us to avoid sending a stopped syncing immediately followed by a started syncing notification to the presence writer - - Args: - user_id (str) """ self.users_going_offline[user_id] = self.clock.time_msec() - def send_stop_syncing(self): + def send_stop_syncing(self) -> None: """Check if there are any users who have stopped syncing a while ago and haven't come back yet. If there are poke the presence writer about them. """ @@ -430,7 +425,9 @@ def _user_syncing(): return _user_syncing() - async def notify_from_replication(self, states, stream_id): + async def notify_from_replication( + self, states: List[UserPresenceState], stream_id: int + ) -> None: parties = await get_interested_parties(self.store, self.presence_router, states) room_ids_to_states, users_to_states = parties @@ -478,7 +475,12 @@ def get_currently_syncing_users_for_replication(self) -> Iterable[str]: if count > 0 ] - async def set_state(self, target_user, state, ignore_status_msg=False): + async def set_state( + self, + target_user: UserID, + state: JsonDict, + ignore_status_msg: bool = False, + ) -> None: """Set the presence state of the user.""" presence = state["presence"] @@ -508,7 +510,7 @@ async def set_state(self, target_user, state, ignore_status_msg=False): ignore_status_msg=ignore_status_msg, ) - async def bump_presence_active_time(self, user): + async def bump_presence_active_time(self, user: UserID) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -592,8 +594,8 @@ def __init__(self, hs: "HomeServer"): # we assume that all the sync requests on that process have stopped. # Stored as a dict from process_id to set of user_id, and a dict of # process_id to millisecond timestamp last updated. - self.external_process_to_current_syncs = {} # type: Dict[int, Set[str]] - self.external_process_last_updated_ms = {} # type: Dict[int, int] + self.external_process_to_current_syncs = {} # type: Dict[str, Set[str]] + self.external_process_last_updated_ms = {} # type: Dict[str, int] self.external_sync_linearizer = Linearizer(name="external_sync_linearizer") @@ -633,7 +635,7 @@ def run_persister(): self._event_pos = self.store.get_current_events_token() self._event_processing = False - async def _on_shutdown(self): + async def _on_shutdown(self) -> None: """Gets called when shutting down. This lets us persist any updates that we haven't yet persisted, e.g. updates that only changes some internal timers. This allows changes to persist across startup without having to @@ -662,7 +664,7 @@ async def _on_shutdown(self): ) logger.info("Finished _on_shutdown") - async def _persist_unpersisted_changes(self): + async def _persist_unpersisted_changes(self) -> None: """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. """ @@ -762,7 +764,7 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: states, destinations ) - async def _handle_timeouts(self): + async def _handle_timeouts(self) -> None: """Checks the presence of users that have timed out and updates as appropriate. """ @@ -814,7 +816,7 @@ async def _handle_timeouts(self): return await self._update_states(changes) - async def bump_presence_active_time(self, user): + async def bump_presence_active_time(self, user: UserID) -> None: """We've seen the user do something that indicates they're interacting with the app. """ @@ -911,17 +913,17 @@ def get_currently_syncing_users_for_replication(self) -> Iterable[str]: return [] async def update_external_syncs_row( - self, process_id, user_id, is_syncing, sync_time_msec - ): + self, process_id: str, user_id: str, is_syncing: bool, sync_time_msec: int + ) -> None: """Update the syncing users for an external process as a delta. Args: - process_id (str): An identifier for the process the users are + process_id: An identifier for the process the users are syncing against. This allows synapse to process updates as user start and stop syncing against a given process. - user_id (str): The user who has started or stopped syncing - is_syncing (bool): Whether or not the user is now syncing - sync_time_msec(int): Time in ms when the user was last syncing + user_id: The user who has started or stopped syncing + is_syncing: Whether or not the user is now syncing + sync_time_msec: Time in ms when the user was last syncing """ with (await self.external_sync_linearizer.queue(process_id)): prev_state = await self.current_state_for_user(user_id) @@ -958,7 +960,7 @@ async def update_external_syncs_row( self.external_process_last_updated_ms[process_id] = self.clock.time_msec() - async def update_external_syncs_clear(self, process_id): + async def update_external_syncs_clear(self, process_id: str) -> None: """Marks all users that had been marked as syncing by a given process as offline. @@ -979,12 +981,12 @@ async def update_external_syncs_clear(self, process_id): ) self.external_process_last_updated_ms.pop(process_id, None) - async def current_state_for_user(self, user_id): + async def current_state_for_user(self, user_id: str) -> UserPresenceState: """Get the current presence state for a user.""" res = await self.current_state_for_users([user_id]) return res[user_id] - async def _persist_and_notify(self, states): + async def _persist_and_notify(self, states: List[UserPresenceState]) -> None: """Persist states in the database, poke the notifier and send to interested remote servers """ @@ -1005,7 +1007,7 @@ async def _persist_and_notify(self, states): # stream (which is updated by `store.update_presence`). await self.maybe_send_presence_to_interested_destinations(states) - async def incoming_presence(self, origin, content): + async def incoming_presence(self, origin: str, content: JsonDict) -> None: """Called when we receive a `m.presence` EDU from a remote server.""" if not self._presence_enabled: return @@ -1055,7 +1057,9 @@ async def incoming_presence(self, origin, content): federation_presence_counter.inc(len(updates)) await self._update_states(updates) - async def set_state(self, target_user, state, ignore_status_msg=False): + async def set_state( + self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False + ) -> None: """Set the presence state of the user.""" status_msg = state.get("status_msg", None) presence = state["presence"] @@ -1089,7 +1093,7 @@ async def set_state(self, target_user, state, ignore_status_msg=False): await self._update_states([prev_state.copy_and_replace(**new_fields)]) - async def is_visible(self, observed_user, observer_user): + async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool: """Returns whether a user can see another user's presence.""" observer_room_ids = await self.store.get_rooms_for_user( observer_user.to_string() @@ -1144,7 +1148,7 @@ async def get_all_presence_updates( ) return rows - def notify_new_event(self): + def notify_new_event(self) -> None: """Called when new events have happened. Handles users and servers joining rooms and require being sent presence. """ @@ -1163,7 +1167,7 @@ async def _process_presence(): run_as_background_process("presence.notify_new_event", _process_presence) - async def _unsafe_process(self): + async def _unsafe_process(self) -> None: # Loop round handling deltas until we're up to date while True: with Measure(self.clock, "presence_delta"): @@ -1188,7 +1192,7 @@ async def _unsafe_process(self): max_pos ) - async def _handle_state_delta(self, deltas): + async def _handle_state_delta(self, deltas: List[JsonDict]) -> None: """Process current state deltas to find new joins that need to be handled. """ @@ -1311,7 +1315,7 @@ async def _on_user_joined_room( return [remote_host], states -def should_notify(old_state, new_state): +def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool: """Decides if a presence state change should be sent to interested parties.""" if old_state == new_state: return False @@ -1347,7 +1351,9 @@ def should_notify(old_state, new_state): return False -def format_user_presence_state(state, now, include_user_id=True): +def format_user_presence_state( + state: UserPresenceState, now: int, include_user_id: bool = True +) -> JsonDict: """Convert UserPresenceState to a format that can be sent down to clients and to other servers. @@ -1385,11 +1391,11 @@ def __init__(self, hs: "HomeServer"): @log_function async def get_new_events( self, - user, - from_key, - room_ids=None, - include_offline=True, - explicit_room_id=None, + user: UserID, + from_key: Optional[int], + room_ids: Optional[List[str]] = None, + include_offline: bool = True, + explicit_room_id: Optional[str] = None, **kwargs, ) -> Tuple[List[UserPresenceState], int]: # The process for getting presence events are: @@ -1594,7 +1600,7 @@ def _filter_offline_presence_state( if update.state != PresenceState.OFFLINE ] - def get_current_key(self): + def get_current_key(self) -> int: return self.store.get_current_presence_token() @cached(num_args=2, cache_context=True) @@ -1654,15 +1660,20 @@ async def _get_interested_in( return users_interested_in -def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): +def handle_timeouts( + user_states: List[UserPresenceState], + is_mine_fn: Callable[[str], bool], + syncing_user_ids: Set[str], + now: int, +) -> List[UserPresenceState]: """Checks the presence of users that have timed out and updates as appropriate. Args: - user_states(list): List of UserPresenceState's to check. - is_mine_fn (fn): Function that returns if a user_id is ours - syncing_user_ids (set): Set of user_ids with active syncs. - now (int): Current time in ms. + user_states: List of UserPresenceState's to check. + is_mine_fn: Function that returns if a user_id is ours + syncing_user_ids: Set of user_ids with active syncs. + now: Current time in ms. Returns: List of UserPresenceState updates @@ -1679,14 +1690,16 @@ def handle_timeouts(user_states, is_mine_fn, syncing_user_ids, now): return list(changes.values()) -def handle_timeout(state, is_mine, syncing_user_ids, now): +def handle_timeout( + state: UserPresenceState, is_mine: bool, syncing_user_ids: Set[str], now: int +) -> Optional[UserPresenceState]: """Checks the presence of the user to see if any of the timers have elapsed Args: - state (UserPresenceState) - is_mine (bool): Whether the user is ours - syncing_user_ids (set): Set of user_ids with active syncs. - now (int): Current time in ms. + state + is_mine: Whether the user is ours + syncing_user_ids: Set of user_ids with active syncs. + now: Current time in ms. Returns: A UserPresenceState update or None if no update. @@ -1738,23 +1751,29 @@ def handle_timeout(state, is_mine, syncing_user_ids, now): return state if changed else None -def handle_update(prev_state, new_state, is_mine, wheel_timer, now): +def handle_update( + prev_state: UserPresenceState, + new_state: UserPresenceState, + is_mine: bool, + wheel_timer: WheelTimer, + now: int, +) -> Tuple[UserPresenceState, bool, bool]: """Given a presence update: 1. Add any appropriate timers. 2. Check if we should notify anyone. Args: - prev_state (UserPresenceState) - new_state (UserPresenceState) - is_mine (bool): Whether the user is ours - wheel_timer (WheelTimer) - now (int): Time now in ms + prev_state + new_state + is_mine: Whether the user is ours + wheel_timer + now: Time now in ms Returns: 3-tuple: `(new_state, persist_and_notify, federation_ping)` where: - new_state: is the state to actually persist - - persist_and_notify (bool): whether to persist and notify people - - federation_ping (bool): whether we should send a ping over federation + - persist_and_notify: whether to persist and notify people + - federation_ping: whether we should send a ping over federation """ user_id = new_state.user_id From 4e0fd35bc918b6901fcd29371ab6d89db8ce1b5e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 28 Apr 2021 11:04:38 +0100 Subject: [PATCH 088/222] Revert "Experimental Federation Speedup (#9702)" This reverts commit 05e8c70c059f8ebb066e029bc3aa3e0cefef1019. --- changelog.d/9702.misc | 1 - contrib/experiments/test_messaging.py | 42 +++-- synapse/federation/sender/__init__.py | 145 +++++++----------- .../sender/per_destination_queue.py | 15 +- .../storage/databases/main/transactions.py | 28 ++-- 5 files changed, 93 insertions(+), 138 deletions(-) delete mode 100644 changelog.d/9702.misc diff --git a/changelog.d/9702.misc b/changelog.d/9702.misc deleted file mode 100644 index c6e63450a971..000000000000 --- a/changelog.d/9702.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up federation transmission by using fewer database calls. Contributed by @ShadowJonathan. diff --git a/contrib/experiments/test_messaging.py b/contrib/experiments/test_messaging.py index 5dd172052b9e..31b8a6822504 100644 --- a/contrib/experiments/test_messaging.py +++ b/contrib/experiments/test_messaging.py @@ -224,16 +224,14 @@ def send_message(self, room_name, sender, body): destinations = yield self.get_servers_for_context(room_name) try: - yield self.replication_layer.send_pdus( - [ - Pdu.create_new( - context=room_name, - pdu_type="sy.room.message", - content={"sender": sender, "body": body}, - origin=self.server_name, - destinations=destinations, - ) - ] + yield self.replication_layer.send_pdu( + Pdu.create_new( + context=room_name, + pdu_type="sy.room.message", + content={"sender": sender, "body": body}, + origin=self.server_name, + destinations=destinations, + ) ) except Exception as e: logger.exception(e) @@ -255,7 +253,7 @@ def join_room(self, room_name, sender, joinee): origin=self.server_name, destinations=destinations, ) - yield self.replication_layer.send_pdus([pdu]) + yield self.replication_layer.send_pdu(pdu) except Exception as e: logger.exception(e) @@ -267,18 +265,16 @@ def invite_to_room(self, room_name, sender, invitee): destinations = yield self.get_servers_for_context(room_name) try: - yield self.replication_layer.send_pdus( - [ - Pdu.create_new( - context=room_name, - is_state=True, - pdu_type="sy.room.member", - state_key=invitee, - content={"membership": "invite"}, - origin=self.server_name, - destinations=destinations, - ) - ] + yield self.replication_layer.send_pdu( + Pdu.create_new( + context=room_name, + is_state=True, + pdu_type="sy.room.member", + state_key=invitee, + content={"membership": "invite"}, + origin=self.server_name, + destinations=destinations, + ) ) except Exception as e: logger.exception(e) diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 022bbf7dad44..deb40f461096 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -14,26 +14,19 @@ import abc import logging -from typing import ( - TYPE_CHECKING, - Collection, - Dict, - Hashable, - Iterable, - List, - Optional, - Set, - Tuple, -) +from typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Set, Tuple from prometheus_client import Counter +from twisted.internet import defer + import synapse.metrics from synapse.api.presence import UserPresenceState from synapse.events import EventBase from synapse.federation.sender.per_destination_queue import PerDestinationQueue from synapse.federation.sender.transaction_manager import TransactionManager from synapse.federation.units import Edu +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics import ( LaterGauge, event_processing_loop_counter, @@ -262,27 +255,15 @@ async def _process_event_queue_loop(self) -> None: if not events and next_token >= self._last_poked_id: break - async def get_destinations_for_event( - event: EventBase, - ) -> Collection[str]: - """Computes the destinations to which this event must be sent. - - This returns an empty tuple when there are no destinations to send to, - or if this event is not from this homeserver and it is not sending - it on behalf of another server. - - Will also filter out destinations which this sender is not responsible for, - if multiple federation senders exist. - """ - + async def handle_event(event: EventBase) -> None: # Only send events for this server. send_on_behalf_of = event.internal_metadata.get_send_on_behalf_of() is_mine = self.is_mine_id(event.sender) if not is_mine and send_on_behalf_of is None: - return () + return if not event.internal_metadata.should_proactively_send(): - return () + return destinations = None # type: Optional[Set[str]] if not event.prev_event_ids(): @@ -317,7 +298,7 @@ async def get_destinations_for_event( "Failed to calculate hosts in room for event: %s", event.event_id, ) - return () + return destinations = { d @@ -327,15 +308,17 @@ async def get_destinations_for_event( ) } - destinations.discard(self.server_name) - if send_on_behalf_of is not None: # If we are sending the event on behalf of another server # then it already has the event and there is no reason to # send the event to it. destinations.discard(send_on_behalf_of) + logger.debug("Sending %s to %r", event, destinations) + if destinations: + await self._send_pdu(event, destinations) + now = self.clock.time_msec() ts = await self.store.get_received_ts(event.event_id) @@ -343,29 +326,24 @@ async def get_destinations_for_event( "federation_sender" ).observe((now - ts) / 1000) - return destinations - return () - - async def get_federatable_events_and_destinations( - events: Iterable[EventBase], - ) -> List[Tuple[EventBase, Collection[str]]]: - with Measure(self.clock, "get_destinations_for_events"): - # Fetch federation destinations per event, - # skip if get_destinations_for_event returns an empty collection, - # return list of event->destinations pairs. - return [ - (event, dests) - for (event, dests) in [ - (event, await get_destinations_for_event(event)) - for event in events - ] - if dests - ] - - events_and_dests = await get_federatable_events_and_destinations(events) - - # Send corresponding events to each destination queue - await self._distribute_events(events_and_dests) + async def handle_room_events(events: Iterable[EventBase]) -> None: + with Measure(self.clock, "handle_room_events"): + for event in events: + await handle_event(event) + + events_by_room = {} # type: Dict[str, List[EventBase]] + for event in events: + events_by_room.setdefault(event.room_id, []).append(event) + + await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(handle_room_events, evs) + for evs in events_by_room.values() + ], + consumeErrors=True, + ) + ) await self.store.update_federation_out_pos("events", next_token) @@ -383,7 +361,7 @@ async def get_federatable_events_and_destinations( events_processed_counter.inc(len(events)) event_processing_loop_room_count.labels("federation_sender").inc( - len({event.room_id for event in events}) + len(events_by_room) ) event_processing_loop_counter.labels("federation_sender").inc() @@ -395,53 +373,34 @@ async def get_federatable_events_and_destinations( finally: self._is_processing = False - async def _distribute_events( - self, - events_and_dests: Iterable[Tuple[EventBase, Collection[str]]], - ) -> None: - """Distribute events to the respective per_destination queues. - - Also persists last-seen per-room stream_ordering to 'destination_rooms'. - - Args: - events_and_dests: A list of tuples, which are (event: EventBase, destinations: Collection[str]). - Every event is paired with its intended destinations (in federation). - """ - # Tuples of room_id + destination to their max-seen stream_ordering - room_with_dest_stream_ordering = {} # type: Dict[Tuple[str, str], int] - - # List of events to send to each destination - events_by_dest = {} # type: Dict[str, List[EventBase]] + async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None: + # We loop through all destinations to see whether we already have + # a transaction in progress. If we do, stick it in the pending_pdus + # table and we'll get back to it later. - # For each event-destinations pair... - for event, destinations in events_and_dests: + destinations = set(destinations) + destinations.discard(self.server_name) + logger.debug("Sending to: %s", str(destinations)) - # (we got this from the database, it's filled) - assert event.internal_metadata.stream_ordering - - sent_pdus_destination_dist_total.inc(len(destinations)) - sent_pdus_destination_dist_count.inc() + if not destinations: + return - # ...iterate over those destinations.. - for destination in destinations: - # ...update their stream-ordering... - room_with_dest_stream_ordering[(event.room_id, destination)] = max( - event.internal_metadata.stream_ordering, - room_with_dest_stream_ordering.get((event.room_id, destination), 0), - ) + sent_pdus_destination_dist_total.inc(len(destinations)) + sent_pdus_destination_dist_count.inc() - # ...and add the event to each destination queue. - events_by_dest.setdefault(destination, []).append(event) + assert pdu.internal_metadata.stream_ordering - # Bulk-store destination_rooms stream_ids - await self.store.bulk_store_destination_rooms_entries( - room_with_dest_stream_ordering + # track the fact that we have a PDU for these destinations, + # to allow us to perform catch-up later on if the remote is unreachable + # for a while. + await self.store.store_destination_rooms_entries( + destinations, + pdu.room_id, + pdu.internal_metadata.stream_ordering, ) - for destination, pdus in events_by_dest.items(): - logger.debug("Sending %d pdus to %s", len(pdus), destination) - - self._get_per_destination_queue(destination).send_pdus(pdus) + for destination in destinations: + self._get_per_destination_queue(destination).send_pdu(pdu) async def send_read_receipt(self, receipt: ReadReceipt) -> None: """Send a RR to any other servers in the room diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 3bb66bce324e..3b053ebcfb08 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -154,22 +154,19 @@ def pending_edu_count(self) -> int: + len(self._pending_edus_keyed) ) - def send_pdus(self, pdus: Iterable[EventBase]) -> None: - """Add PDUs to the queue, and start the transmission loop if necessary + def send_pdu(self, pdu: EventBase) -> None: + """Add a PDU to the queue, and start the transmission loop if necessary Args: - pdus: pdus to send + pdu: pdu to send """ if not self._catching_up or self._last_successful_stream_ordering is None: # only enqueue the PDU if we are not catching up (False) or do not # yet know if we have anything to catch up (None) - self._pending_pdus.extend(pdus) + self._pending_pdus.append(pdu) else: - self._catchup_last_skipped = max( - pdu.internal_metadata.stream_ordering - for pdu in pdus - if pdu.internal_metadata.stream_ordering is not None - ) + assert pdu.internal_metadata.stream_ordering + self._catchup_last_skipped = pdu.internal_metadata.stream_ordering self.attempt_new_transaction() diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index b28ca61f8064..82335e7a9dad 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -14,7 +14,7 @@ import logging from collections import namedtuple -from typing import Dict, List, Optional, Tuple +from typing import Iterable, List, Optional, Tuple from canonicaljson import encode_canonical_json @@ -295,33 +295,37 @@ def _set_destination_retry_timings_emulated( }, ) - async def bulk_store_destination_rooms_entries( - self, room_and_destination_to_ordering: Dict[Tuple[str, str], int] - ): + async def store_destination_rooms_entries( + self, + destinations: Iterable[str], + room_id: str, + stream_ordering: int, + ) -> None: """ - Updates or creates `destination_rooms` entries for a number of events. + Updates or creates `destination_rooms` entries in batch for a single event. Args: - room_and_destination_to_ordering: A mapping of (room, destination) -> stream_id + destinations: list of destinations + room_id: the room_id of the event + stream_ordering: the stream_ordering of the event """ await self.db_pool.simple_upsert_many( table="destinations", key_names=("destination",), - key_values={(d,) for _, d in room_and_destination_to_ordering.keys()}, + key_values=[(d,) for d in destinations], value_names=[], value_values=[], desc="store_destination_rooms_entries_dests", ) + rows = [(destination, room_id) for destination in destinations] await self.db_pool.simple_upsert_many( table="destination_rooms", - key_names=("room_id", "destination"), - key_values=list(room_and_destination_to_ordering.keys()), + key_names=("destination", "room_id"), + key_values=rows, value_names=["stream_ordering"], - value_values=[ - (stream_id,) for stream_id in room_and_destination_to_ordering.values() - ], + value_values=[(stream_ordering,)] * len(rows), desc="store_destination_rooms_entries_rooms", ) From 787de3190f70d952b0d6589e9335aa16cacc41f2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 28 Apr 2021 11:43:33 +0100 Subject: [PATCH 089/222] 1.33.0rc1 --- CHANGES.md | 53 ++++++++++++++++++++++++++++++++++++++++ changelog.d/9162.misc | 1 - changelog.d/9726.bugfix | 1 - changelog.d/9786.misc | 1 - changelog.d/9788.bugfix | 1 - changelog.d/9796.misc | 1 - changelog.d/9800.feature | 1 - changelog.d/9801.doc | 1 - changelog.d/9802.bugfix | 1 - changelog.d/9814.feature | 1 - changelog.d/9815.misc | 1 - changelog.d/9816.misc | 1 - changelog.d/9817.misc | 1 - changelog.d/9819.feature | 1 - changelog.d/9820.feature | 1 - changelog.d/9821.misc | 1 - changelog.d/9825.misc | 1 - changelog.d/9828.feature | 1 - changelog.d/9832.feature | 1 - changelog.d/9833.bugfix | 1 - changelog.d/9838.misc | 1 - changelog.d/9845.misc | 1 - changelog.d/9850.feature | 1 - changelog.d/9855.misc | 1 - changelog.d/9856.misc | 1 - changelog.d/9858.misc | 1 - changelog.d/9867.bugfix | 1 - changelog.d/9868.bugfix | 1 - changelog.d/9871.misc | 1 - changelog.d/9874.misc | 1 - changelog.d/9875.misc | 1 - changelog.d/9876.misc | 1 - changelog.d/9878.misc | 1 - changelog.d/9879.misc | 1 - changelog.d/9887.misc | 1 - synapse/__init__.py | 2 +- 36 files changed, 54 insertions(+), 35 deletions(-) delete mode 100644 changelog.d/9162.misc delete mode 100644 changelog.d/9726.bugfix delete mode 100644 changelog.d/9786.misc delete mode 100644 changelog.d/9788.bugfix delete mode 100644 changelog.d/9796.misc delete mode 100644 changelog.d/9800.feature delete mode 100644 changelog.d/9801.doc delete mode 100644 changelog.d/9802.bugfix delete mode 100644 changelog.d/9814.feature delete mode 100644 changelog.d/9815.misc delete mode 100644 changelog.d/9816.misc delete mode 100644 changelog.d/9817.misc delete mode 100644 changelog.d/9819.feature delete mode 100644 changelog.d/9820.feature delete mode 100644 changelog.d/9821.misc delete mode 100644 changelog.d/9825.misc delete mode 100644 changelog.d/9828.feature delete mode 100644 changelog.d/9832.feature delete mode 100644 changelog.d/9833.bugfix delete mode 100644 changelog.d/9838.misc delete mode 100644 changelog.d/9845.misc delete mode 100644 changelog.d/9850.feature delete mode 100644 changelog.d/9855.misc delete mode 100644 changelog.d/9856.misc delete mode 100644 changelog.d/9858.misc delete mode 100644 changelog.d/9867.bugfix delete mode 100644 changelog.d/9868.bugfix delete mode 100644 changelog.d/9871.misc delete mode 100644 changelog.d/9874.misc delete mode 100644 changelog.d/9875.misc delete mode 100644 changelog.d/9876.misc delete mode 100644 changelog.d/9878.misc delete mode 100644 changelog.d/9879.misc delete mode 100644 changelog.d/9887.misc diff --git a/CHANGES.md b/CHANGES.md index 532b30e2323f..a1f5376ff208 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,56 @@ +Synapse 1.33.0rc1 (2021-04-28) +============================== + +Features +-------- + +- Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9800](https://github.com/matrix-org/synapse/issues/9800), [\#9814](https://github.com/matrix-org/synapse/issues/9814)) +- Add experimental support for handling presence on a worker. ([\#9819](https://github.com/matrix-org/synapse/issues/9819), [\#9820](https://github.com/matrix-org/synapse/issues/9820), [\#9828](https://github.com/matrix-org/synapse/issues/9828), [\#9850](https://github.com/matrix-org/synapse/issues/9850)) +- Don't return an error when a user attempts to renew their account multiple times with the same token. Instead, state when their account is set to expire. This change concerns the optional account validity feature. ([\#9832](https://github.com/matrix-org/synapse/issues/9832)) + + +Bugfixes +-------- + +- Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path. ([\#9726](https://github.com/matrix-org/synapse/issues/9726)) +- Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg. ([\#9788](https://github.com/matrix-org/synapse/issues/9788)) +- Add some sanity checks to identity server passed to 3PID bind/unbind endpoints. ([\#9802](https://github.com/matrix-org/synapse/issues/9802)) +- Limit the size of HTTP responses read over federation. ([\#9833](https://github.com/matrix-org/synapse/issues/9833)) +- Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists. ([\#9867](https://github.com/matrix-org/synapse/issues/9867)) +- Fix a long-standing bug where errors from federation did not propagate to the client. ([\#9868](https://github.com/matrix-org/synapse/issues/9868)) + + +Improved Documentation +---------------------- + +- Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms. ([\#9801](https://github.com/matrix-org/synapse/issues/9801)) + + +Internal Changes +---------------- + +- Add a dockerfile for running Synapse in worker-mode under Complement. ([\#9162](https://github.com/matrix-org/synapse/issues/9162)) +- Apply `pyupgrade` across the codebase. ([\#9786](https://github.com/matrix-org/synapse/issues/9786)) +- Move some replication processing out of `generic_worker`. ([\#9796](https://github.com/matrix-org/synapse/issues/9796)) +- Replace `HomeServer.get_config()` with inline references. ([\#9815](https://github.com/matrix-org/synapse/issues/9815)) +- Rename some handlers and config modules to not duplicate the top-level module. ([\#9816](https://github.com/matrix-org/synapse/issues/9816)) +- Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced. ([\#9817](https://github.com/matrix-org/synapse/issues/9817)) +- Reduce CPU usage of the user directory by reusing existing calculated room membership. ([\#9821](https://github.com/matrix-org/synapse/issues/9821)) +- Small speed up for joining large remote rooms. ([\#9825](https://github.com/matrix-org/synapse/issues/9825)) +- Introduce flake8-bugbear to the test suite and fix some of its lint violations. ([\#9838](https://github.com/matrix-org/synapse/issues/9838)) +- Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores. ([\#9845](https://github.com/matrix-org/synapse/issues/9845)) +- Limit length of accepted email addresses. ([\#9855](https://github.com/matrix-org/synapse/issues/9855)) +- Remove redundant `synapse.types.Collection` type definition. ([\#9856](https://github.com/matrix-org/synapse/issues/9856)) +- Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts. ([\#9858](https://github.com/matrix-org/synapse/issues/9858)) +- Disable invite rate-limiting by default when running the unit tests. ([\#9871](https://github.com/matrix-org/synapse/issues/9871)) +- Pass a reactor into `SynapseSite` to make testing easier. ([\#9874](https://github.com/matrix-org/synapse/issues/9874)) +- Make `DomainSpecificString` an `attrs` class. ([\#9875](https://github.com/matrix-org/synapse/issues/9875)) +- Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules. ([\#9876](https://github.com/matrix-org/synapse/issues/9876)) +- Remove redundant `_PushHTTPChannel` test class. ([\#9878](https://github.com/matrix-org/synapse/issues/9878)) +- Remove backwards-compatibility code for Python versions < 3.6. ([\#9879](https://github.com/matrix-org/synapse/issues/9879)) +- Small performance improvement around handling new local presence updates. ([\#9887](https://github.com/matrix-org/synapse/issues/9887)) + + Synapse 1.32.2 (2021-04-22) =========================== diff --git a/changelog.d/9162.misc b/changelog.d/9162.misc deleted file mode 100644 index 1083da8a7af8..000000000000 --- a/changelog.d/9162.misc +++ /dev/null @@ -1 +0,0 @@ -Add a dockerfile for running Synapse in worker-mode under Complement. \ No newline at end of file diff --git a/changelog.d/9726.bugfix b/changelog.d/9726.bugfix deleted file mode 100644 index 4ba0b24327cb..000000000000 --- a/changelog.d/9726.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes the OIDC SSO flow when using a `public_baseurl` value including a non-root URL path. \ No newline at end of file diff --git a/changelog.d/9786.misc b/changelog.d/9786.misc deleted file mode 100644 index cf265db749e2..000000000000 --- a/changelog.d/9786.misc +++ /dev/null @@ -1 +0,0 @@ -Apply `pyupgrade` across the codebase. \ No newline at end of file diff --git a/changelog.d/9788.bugfix b/changelog.d/9788.bugfix deleted file mode 100644 index edb58fbd5b37..000000000000 --- a/changelog.d/9788.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix thumbnail generation for some sites with non-standard content types. Contributed by @rkfg. diff --git a/changelog.d/9796.misc b/changelog.d/9796.misc deleted file mode 100644 index 59bb1813c32f..000000000000 --- a/changelog.d/9796.misc +++ /dev/null @@ -1 +0,0 @@ -Move some replication processing out of `generic_worker`. diff --git a/changelog.d/9800.feature b/changelog.d/9800.feature deleted file mode 100644 index 9404ad2fc047..000000000000 --- a/changelog.d/9800.feature +++ /dev/null @@ -1 +0,0 @@ -Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/changelog.d/9801.doc b/changelog.d/9801.doc deleted file mode 100644 index 8b8b9d01d493..000000000000 --- a/changelog.d/9801.doc +++ /dev/null @@ -1 +0,0 @@ -Add a note to the docker docs mentioning that we mirror upstream's supported Docker platforms. diff --git a/changelog.d/9802.bugfix b/changelog.d/9802.bugfix deleted file mode 100644 index 0c72f7be473f..000000000000 --- a/changelog.d/9802.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add some sanity checks to identity server passed to 3PID bind/unbind endpoints. diff --git a/changelog.d/9814.feature b/changelog.d/9814.feature deleted file mode 100644 index 9404ad2fc047..000000000000 --- a/changelog.d/9814.feature +++ /dev/null @@ -1 +0,0 @@ -Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. diff --git a/changelog.d/9815.misc b/changelog.d/9815.misc deleted file mode 100644 index e33d012d3d28..000000000000 --- a/changelog.d/9815.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `HomeServer.get_config()` with inline references. diff --git a/changelog.d/9816.misc b/changelog.d/9816.misc deleted file mode 100644 index d0981225006e..000000000000 --- a/changelog.d/9816.misc +++ /dev/null @@ -1 +0,0 @@ -Rename some handlers and config modules to not duplicate the top-level module. diff --git a/changelog.d/9817.misc b/changelog.d/9817.misc deleted file mode 100644 index 8aa8895f05f0..000000000000 --- a/changelog.d/9817.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug which caused `max_upload_size` to not be correctly enforced. diff --git a/changelog.d/9819.feature b/changelog.d/9819.feature deleted file mode 100644 index f56b0bb3bdeb..000000000000 --- a/changelog.d/9819.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for handling presence on a worker. diff --git a/changelog.d/9820.feature b/changelog.d/9820.feature deleted file mode 100644 index f56b0bb3bdeb..000000000000 --- a/changelog.d/9820.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for handling presence on a worker. diff --git a/changelog.d/9821.misc b/changelog.d/9821.misc deleted file mode 100644 index 03b2d2ed4dbb..000000000000 --- a/changelog.d/9821.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce CPU usage of the user directory by reusing existing calculated room membership. \ No newline at end of file diff --git a/changelog.d/9825.misc b/changelog.d/9825.misc deleted file mode 100644 index 42f3f1561936..000000000000 --- a/changelog.d/9825.misc +++ /dev/null @@ -1 +0,0 @@ -Small speed up for joining large remote rooms. diff --git a/changelog.d/9828.feature b/changelog.d/9828.feature deleted file mode 100644 index f56b0bb3bdeb..000000000000 --- a/changelog.d/9828.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for handling presence on a worker. diff --git a/changelog.d/9832.feature b/changelog.d/9832.feature deleted file mode 100644 index e76395fbe886..000000000000 --- a/changelog.d/9832.feature +++ /dev/null @@ -1 +0,0 @@ -Don't return an error when a user attempts to renew their account multiple times with the same token. Instead, state when their account is set to expire. This change concerns the optional account validity feature. \ No newline at end of file diff --git a/changelog.d/9833.bugfix b/changelog.d/9833.bugfix deleted file mode 100644 index 56f9c9626b5c..000000000000 --- a/changelog.d/9833.bugfix +++ /dev/null @@ -1 +0,0 @@ -Limit the size of HTTP responses read over federation. diff --git a/changelog.d/9838.misc b/changelog.d/9838.misc deleted file mode 100644 index b98ce563093c..000000000000 --- a/changelog.d/9838.misc +++ /dev/null @@ -1 +0,0 @@ -Introduce flake8-bugbear to the test suite and fix some of its lint violations. \ No newline at end of file diff --git a/changelog.d/9845.misc b/changelog.d/9845.misc deleted file mode 100644 index 875dd6d13156..000000000000 --- a/changelog.d/9845.misc +++ /dev/null @@ -1 +0,0 @@ -Only store the raw data in the in-memory caches, rather than objects that include references to e.g. the data stores. diff --git a/changelog.d/9850.feature b/changelog.d/9850.feature deleted file mode 100644 index f56b0bb3bdeb..000000000000 --- a/changelog.d/9850.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for handling presence on a worker. diff --git a/changelog.d/9855.misc b/changelog.d/9855.misc deleted file mode 100644 index 6a3d700fde71..000000000000 --- a/changelog.d/9855.misc +++ /dev/null @@ -1 +0,0 @@ -Limit length of accepted email addresses. diff --git a/changelog.d/9856.misc b/changelog.d/9856.misc deleted file mode 100644 index d67e8c386a3e..000000000000 --- a/changelog.d/9856.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant `synapse.types.Collection` type definition. diff --git a/changelog.d/9858.misc b/changelog.d/9858.misc deleted file mode 100644 index f7e286fa69bf..000000000000 --- a/changelog.d/9858.misc +++ /dev/null @@ -1 +0,0 @@ -Handle recently added rate limits correctly when using `--no-rate-limit` with the demo scripts. diff --git a/changelog.d/9867.bugfix b/changelog.d/9867.bugfix deleted file mode 100644 index f236de247d8b..000000000000 --- a/changelog.d/9867.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which could cause Synapse to get stuck in a loop of resyncing device lists. diff --git a/changelog.d/9868.bugfix b/changelog.d/9868.bugfix deleted file mode 100644 index e2b4f97ad51f..000000000000 --- a/changelog.d/9868.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where errors from federation did not propagate to the client. diff --git a/changelog.d/9871.misc b/changelog.d/9871.misc deleted file mode 100644 index b19acfab6298..000000000000 --- a/changelog.d/9871.misc +++ /dev/null @@ -1 +0,0 @@ -Disable invite rate-limiting by default when running the unit tests. \ No newline at end of file diff --git a/changelog.d/9874.misc b/changelog.d/9874.misc deleted file mode 100644 index ba1097e65eb4..000000000000 --- a/changelog.d/9874.misc +++ /dev/null @@ -1 +0,0 @@ -Pass a reactor into `SynapseSite` to make testing easier. diff --git a/changelog.d/9875.misc b/changelog.d/9875.misc deleted file mode 100644 index 9345c0bf4530..000000000000 --- a/changelog.d/9875.misc +++ /dev/null @@ -1 +0,0 @@ -Make `DomainSpecificString` an `attrs` class. diff --git a/changelog.d/9876.misc b/changelog.d/9876.misc deleted file mode 100644 index 28390e32e67e..000000000000 --- a/changelog.d/9876.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to `synapse.api.auth` and `synapse.api.auth_blocking` modules. diff --git a/changelog.d/9878.misc b/changelog.d/9878.misc deleted file mode 100644 index 927876852db3..000000000000 --- a/changelog.d/9878.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant `_PushHTTPChannel` test class. diff --git a/changelog.d/9879.misc b/changelog.d/9879.misc deleted file mode 100644 index c9ca37cf4835..000000000000 --- a/changelog.d/9879.misc +++ /dev/null @@ -1 +0,0 @@ -Remove backwards-compatibility code for Python versions < 3.6. \ No newline at end of file diff --git a/changelog.d/9887.misc b/changelog.d/9887.misc deleted file mode 100644 index 650ebf85e6ce..000000000000 --- a/changelog.d/9887.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement around handling new local presence updates. diff --git a/synapse/__init__.py b/synapse/__init__.py index fbd49a93e137..5bbaa62de2b2 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.32.2" +__version__ = "1.33.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 391bfe9a7b7b22c3dbee9f9e02071fd5c1730ab5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Apr 2021 11:59:28 +0100 Subject: [PATCH 090/222] Reduce memory footprint of caches (#9886) --- changelog.d/9886.misc | 1 + synapse/util/caches/lrucache.py | 77 +++++++++++++++++++++++++-------- 2 files changed, 60 insertions(+), 18 deletions(-) create mode 100644 changelog.d/9886.misc diff --git a/changelog.d/9886.misc b/changelog.d/9886.misc new file mode 100644 index 000000000000..8ff869e6598d --- /dev/null +++ b/changelog.d/9886.misc @@ -0,0 +1 @@ +Reduce memory usage of the LRU caches. diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index a21d34fcb4d7..10b0ec6b75bc 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -17,8 +17,10 @@ from typing import ( Any, Callable, + Collection, Generic, Iterable, + List, Optional, Type, TypeVar, @@ -57,13 +59,56 @@ class _Node: __slots__ = ["prev_node", "next_node", "key", "value", "callbacks"] def __init__( - self, prev_node, next_node, key, value, callbacks: Optional[set] = None + self, + prev_node, + next_node, + key, + value, + callbacks: Collection[Callable[[], None]] = (), ): self.prev_node = prev_node self.next_node = next_node self.key = key self.value = value - self.callbacks = callbacks or set() + + # Set of callbacks to run when the node gets deleted. We store as a list + # rather than a set to keep memory usage down (and since we expect few + # entries per node, the performance of checking for duplication in a + # list vs using a set is negligible). + # + # Note that we store this as an optional list to keep the memory + # footprint down. Storing `None` is free as its a singleton, while empty + # lists are 56 bytes (and empty sets are 216 bytes, if we did the naive + # thing and used sets). + self.callbacks = None # type: Optional[List[Callable[[], None]]] + + self.add_callbacks(callbacks) + + def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None: + """Add to stored list of callbacks, removing duplicates.""" + + if not callbacks: + return + + if not self.callbacks: + self.callbacks = [] + + for callback in callbacks: + if callback not in self.callbacks: + self.callbacks.append(callback) + + def run_and_clear_callbacks(self) -> None: + """Run all callbacks and clear the stored list of callbacks. Used when + the node is being deleted. + """ + + if not self.callbacks: + return + + for callback in self.callbacks: + callback() + + self.callbacks = None class LruCache(Generic[KT, VT]): @@ -177,10 +222,10 @@ def cache_len(): self.len = synchronized(cache_len) - def add_node(key, value, callbacks: Optional[set] = None): + def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()): prev_node = list_root next_node = prev_node.next_node - node = _Node(prev_node, next_node, key, value, callbacks or set()) + node = _Node(prev_node, next_node, key, value, callbacks) prev_node.next_node = node next_node.prev_node = node cache[key] = node @@ -211,16 +256,15 @@ def delete_node(node): deleted_len = size_callback(node.value) cached_cache_len[0] -= deleted_len - for cb in node.callbacks: - cb() - node.callbacks.clear() + node.run_and_clear_callbacks() + return deleted_len @overload def cache_get( key: KT, default: Literal[None] = None, - callbacks: Iterable[Callable[[], None]] = ..., + callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., ) -> Optional[VT]: ... @@ -229,7 +273,7 @@ def cache_get( def cache_get( key: KT, default: T, - callbacks: Iterable[Callable[[], None]] = ..., + callbacks: Collection[Callable[[], None]] = ..., update_metrics: bool = ..., ) -> Union[T, VT]: ... @@ -238,13 +282,13 @@ def cache_get( def cache_get( key: KT, default: Optional[T] = None, - callbacks: Iterable[Callable[[], None]] = (), + callbacks: Collection[Callable[[], None]] = (), update_metrics: bool = True, ): node = cache.get(key, None) if node is not None: move_node_to_front(node) - node.callbacks.update(callbacks) + node.add_callbacks(callbacks) if update_metrics and metrics: metrics.inc_hits() return node.value @@ -260,10 +304,8 @@ def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()): # We sometimes store large objects, e.g. dicts, which cause # the inequality check to take a long time. So let's only do # the check if we have some callbacks to call. - if node.callbacks and value != node.value: - for cb in node.callbacks: - cb() - node.callbacks.clear() + if value != node.value: + node.run_and_clear_callbacks() # We don't bother to protect this by value != node.value as # generally size_callback will be cheap compared with equality @@ -273,7 +315,7 @@ def cache_set(key: KT, value: VT, callbacks: Iterable[Callable[[], None]] = ()): cached_cache_len[0] -= size_callback(node.value) cached_cache_len[0] += size_callback(value) - node.callbacks.update(callbacks) + node.add_callbacks(callbacks) move_node_to_front(node) node.value = value @@ -326,8 +368,7 @@ def cache_clear() -> None: list_root.next_node = list_root list_root.prev_node = list_root for node in cache.values(): - for cb in node.callbacks: - cb() + node.run_and_clear_callbacks() cache.clear() if size_callback: cached_cache_len[0] = 0 From 8ba086980dbe4272a6ad2f529ae7b955b93bb9b0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 28 Apr 2021 12:07:49 +0100 Subject: [PATCH 091/222] Reword account validity template change to sound less like a bugfix --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index a1f5376ff208..9a41607679fd 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ Features - Update experimental support for [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083): restricting room access via group membership. ([\#9800](https://github.com/matrix-org/synapse/issues/9800), [\#9814](https://github.com/matrix-org/synapse/issues/9814)) - Add experimental support for handling presence on a worker. ([\#9819](https://github.com/matrix-org/synapse/issues/9819), [\#9820](https://github.com/matrix-org/synapse/issues/9820), [\#9828](https://github.com/matrix-org/synapse/issues/9828), [\#9850](https://github.com/matrix-org/synapse/issues/9850)) -- Don't return an error when a user attempts to renew their account multiple times with the same token. Instead, state when their account is set to expire. This change concerns the optional account validity feature. ([\#9832](https://github.com/matrix-org/synapse/issues/9832)) +- Return a new template when an user attempts to renew their account multiple times with the same token, stating that their account is set to expire. This replaces the invalid token template that would previously be shown in this case. This change concerns the optional account validity feature. ([\#9832](https://github.com/matrix-org/synapse/issues/9832)) Bugfixes From 10a08ab88ad423bfca86983808c47f34a601ec9c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 28 Apr 2021 07:44:52 -0400 Subject: [PATCH 092/222] Use the parent's logging context name for runWithConnection. (#9895) This fixes a regression where the logging context for runWithConnection was reported as runWithConnection instead of the connection name, e.g. "POST-XYZ". --- changelog.d/9895.bugfix | 1 + synapse/storage/database.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9895.bugfix diff --git a/changelog.d/9895.bugfix b/changelog.d/9895.bugfix new file mode 100644 index 000000000000..1053f975bf18 --- /dev/null +++ b/changelog.d/9895.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index bd39c095af6b..a761ad603bbb 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -715,7 +715,9 @@ def inner_func(conn, *args, **kwargs): # pool). assert not self.engine.in_transaction(conn) - with LoggingContext("runWithConnection", parent_context) as context: + with LoggingContext( + str(curr_context), parent_context=parent_context + ) as context: sched_duration_sec = monotonic_time() - start_time sql_scheduling_timer.observe(sched_duration_sec) context.add_database_scheduled(sched_duration_sec) From e4ab8676b4b5a3336ef49bb68a0e6dabbf030df4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 28 Apr 2021 14:42:50 +0100 Subject: [PATCH 093/222] Fix tight loop handling presence replication. (#9900) Only affects workers. Introduced in #9819. Fixes #9899. --- changelog.d/9900.bugfix | 1 + synapse/handlers/presence.py | 24 +++++++++++++++++++++++- tests/handlers/test_presence.py | 22 ++++++++++++++++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9900.bugfix diff --git a/changelog.d/9900.bugfix b/changelog.d/9900.bugfix new file mode 100644 index 000000000000..a8470fca3f86 --- /dev/null +++ b/changelog.d/9900.bugfix @@ -0,0 +1 @@ +Fix tight loop handling presence replication when using workers. Introduced in v1.33.0rc1. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 969c73c1e7a1..12df35f26ef7 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -2026,18 +2026,40 @@ async def get_replication_rows( ) return result["updates"], result["upto_token"], result["limited"] + # If the from_token is the current token then there's nothing to return + # and we can trivially no-op. + if from_token == self._next_id - 1: + return [], upto_token, False + # We can find the correct position in the queue by noting that there is # exactly one entry per stream ID, and that the last entry has an ID of # `self._next_id - 1`, so we can count backwards from the end. # + # Since we are returning all states in the range `from_token < stream_id + # <= upto_token` we look for the index with a `stream_id` of `from_token + # + 1`. + # # Since the start of the queue is periodically truncated we need to # handle the case where `from_token` stream ID has already been dropped. - start_idx = max(from_token - self._next_id, -len(self._queue)) + start_idx = max(from_token + 1 - self._next_id, -len(self._queue)) to_send = [] # type: List[Tuple[int, Tuple[str, str]]] limited = False new_id = upto_token for _, stream_id, destinations, user_ids in self._queue[start_idx:]: + if stream_id <= from_token: + # Paranoia check that we are actually only sending states that + # are have stream_id strictly greater than from_token. We should + # never hit this. + logger.warning( + "Tried returning presence federation stream ID: %d less than from_token: %d (next_id: %d, len: %d)", + stream_id, + from_token, + self._next_id, + len(self._queue), + ) + continue + if stream_id > upto_token: break diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 61271cd08498..ce330e79cca0 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -509,6 +509,14 @@ def test_send_and_get(self): self.assertCountEqual(rows, expected_rows) + now_token = self.queue.get_current_token(self.instance_name) + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", upto_token, now_token, 10) + ) + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + self.assertCountEqual(rows, []) + def test_send_and_get_split(self): state1 = UserPresenceState.default("@user1:test") state2 = UserPresenceState.default("@user2:test") @@ -538,6 +546,20 @@ def test_send_and_get_split(self): self.assertCountEqual(rows, expected_rows) + now_token = self.queue.get_current_token(self.instance_name) + rows, upto_token, limited = self.get_success( + self.queue.get_replication_rows("master", upto_token, now_token, 10) + ) + + self.assertEqual(upto_token, now_token) + self.assertFalse(limited) + + expected_rows = [ + (2, ("dest3", "@user3:test")), + ] + + self.assertCountEqual(rows, expected_rows) + def test_clear_queue_all(self): state1 = UserPresenceState.default("@user1:test") state2 = UserPresenceState.default("@user2:test") From 0085dc5abc614579f3adbd9e6d2cbdd41facef00 Mon Sep 17 00:00:00 2001 From: ThibF Date: Thu, 29 Apr 2021 09:31:45 +0000 Subject: [PATCH 094/222] Delete room endpoint (#9889) Support the delete of a room through DELETE request and mark previous request as deprecated through documentation. Signed-off-by: Thibault Ferrante --- changelog.d/9889.feature | 1 + changelog.d/9889.removal | 1 + docs/admin_api/rooms.md | 11 ++- synapse/rest/admin/rooms.py | 134 +++++++++++++++++++++++----------- tests/rest/admin/test_room.py | 45 +++++++----- 5 files changed, 128 insertions(+), 64 deletions(-) create mode 100644 changelog.d/9889.feature create mode 100644 changelog.d/9889.removal diff --git a/changelog.d/9889.feature b/changelog.d/9889.feature new file mode 100644 index 000000000000..74d46f222ee1 --- /dev/null +++ b/changelog.d/9889.feature @@ -0,0 +1 @@ +Add support for `DELETE /_synapse/admin/v1/rooms/`. \ No newline at end of file diff --git a/changelog.d/9889.removal b/changelog.d/9889.removal new file mode 100644 index 000000000000..398b9e129b48 --- /dev/null +++ b/changelog.d/9889.removal @@ -0,0 +1 @@ +Mark as deprecated `POST /_synapse/admin/v1/rooms//delete`. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index bc737b30f59e..01d3882426b3 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -427,7 +427,7 @@ the new room. Users on other servers will be unaffected. The API is: ``` -POST /_synapse/admin/v1/rooms//delete +DELETE /_synapse/admin/v1/rooms/ ``` with a body of: @@ -528,6 +528,15 @@ You will have to manually handle, if you so choose, the following: * Users that would have been booted from the room (and will have been force-joined to the Content Violation room). * Removal of the Content Violation room if desired. +## Deprecated endpoint + +The previous deprecated API will be removed in a future release, it was: + +``` +POST /_synapse/admin/v1/rooms//delete +``` + +It behaves the same way than the current endpoint except the path and the method. # Make Room Admin API diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index d0cf12174360..f289ffe3d084 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -37,9 +37,11 @@ from synapse.util import json_decoder if TYPE_CHECKING: + from synapse.api.auth import Auth + from synapse.handlers.pagination import PaginationHandler + from synapse.handlers.room import RoomShutdownHandler from synapse.server import HomeServer - logger = logging.getLogger(__name__) @@ -146,50 +148,14 @@ def __init__(self, hs: "HomeServer"): async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) - - content = parse_json_object_from_request(request) - - block = content.get("block", False) - if not isinstance(block, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'block' must be a boolean, if given", - Codes.BAD_JSON, - ) - - purge = content.get("purge", True) - if not isinstance(purge, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'purge' must be a boolean, if given", - Codes.BAD_JSON, - ) - - force_purge = content.get("force_purge", False) - if not isinstance(force_purge, bool): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Param 'force_purge' must be a boolean, if given", - Codes.BAD_JSON, - ) - - ret = await self.room_shutdown_handler.shutdown_room( - room_id=room_id, - new_room_user_id=content.get("new_room_user_id"), - new_room_name=content.get("room_name"), - message=content.get("message"), - requester_user_id=requester.user.to_string(), - block=block, + return await _delete_room( + request, + room_id, + self.auth, + self.room_shutdown_handler, + self.pagination_handler, ) - # Purge room - if purge: - await self.pagination_handler.purge_room(room_id, force=force_purge) - - return (200, ret) - class ListRoomRestServlet(RestServlet): """ @@ -282,7 +248,22 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: class RoomRestServlet(RestServlet): - """Get room details. + """Manage a room. + + On GET : Get details of a room. + + On DELETE : Delete a room from server. + + It is a combination and improvement of shutdown and purge room. + + Shuts down a room by removing all local users from the room. + Blocking all future invites and joins to the room is optional. + + If desired any local aliases will be repointed to a new room + created by `new_room_user_id` and kicked users will be auto- + joined to the new room. + + If 'purge' is true, it will remove all traces of a room from the database. TODO: Add on_POST to allow room creation without joining the room """ @@ -293,6 +274,8 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() + self.room_shutdown_handler = hs.get_room_shutdown_handler() + self.pagination_handler = hs.get_pagination_handler() async def on_GET( self, request: SynapseRequest, room_id: str @@ -308,6 +291,17 @@ async def on_GET( return (200, ret) + async def on_DELETE( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + return await _delete_room( + request, + room_id, + self.auth, + self.room_shutdown_handler, + self.pagination_handler, + ) + class RoomMembersRestServlet(RestServlet): """ @@ -694,3 +688,55 @@ async def on_GET( ) return 200, results + + +async def _delete_room( + request: SynapseRequest, + room_id: str, + auth: "Auth", + room_shutdown_handler: "RoomShutdownHandler", + pagination_handler: "PaginationHandler", +) -> Tuple[int, JsonDict]: + requester = await auth.get_user_by_req(request) + await assert_user_is_admin(auth, requester.user) + + content = parse_json_object_from_request(request) + + block = content.get("block", False) + if not isinstance(block, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'block' must be a boolean, if given", + Codes.BAD_JSON, + ) + + purge = content.get("purge", True) + if not isinstance(purge, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'purge' must be a boolean, if given", + Codes.BAD_JSON, + ) + + force_purge = content.get("force_purge", False) + if not isinstance(force_purge, bool): + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "Param 'force_purge' must be a boolean, if given", + Codes.BAD_JSON, + ) + + ret = await room_shutdown_handler.shutdown_room( + room_id=room_id, + new_room_user_id=content.get("new_room_user_id"), + new_room_name=content.get("room_name"), + message=content.get("message"), + requester_user_id=requester.user.to_string(), + block=block, + ) + + # Purge room + if purge: + await pagination_handler.purge_room(room_id, force=force_purge) + + return (200, ret) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 6b841881200f..ee071c2477b4 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -17,6 +17,8 @@ from typing import List, Optional from unittest.mock import Mock +from parameterized import parameterized_class + import synapse.rest.admin from synapse.api.constants import EventTypes, Membership from synapse.api.errors import Codes @@ -144,6 +146,13 @@ def _assert_peek(self, room_id, expect_code): ) +@parameterized_class( + ("method", "url_template"), + [ + ("POST", "/_synapse/admin/v1/rooms/%s/delete"), + ("DELETE", "/_synapse/admin/v1/rooms/%s"), + ], +) class DeleteRoomTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, @@ -175,7 +184,7 @@ def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as( self.other_user, tok=self.other_user_tok ) - self.url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id + self.url = self.url_template % self.room_id def test_requester_is_no_admin(self): """ @@ -183,7 +192,7 @@ def test_requester_is_no_admin(self): """ channel = self.make_request( - "POST", + self.method, self.url, json.dumps({}), access_token=self.other_user_tok, @@ -196,10 +205,10 @@ def test_room_does_not_exist(self): """ Check that unknown rooms/server return error 404. """ - url = "/_synapse/admin/v1/rooms/!unknown:test/delete" + url = self.url_template % "!unknown:test" channel = self.make_request( - "POST", + self.method, url, json.dumps({}), access_token=self.admin_user_tok, @@ -212,10 +221,10 @@ def test_room_is_not_valid(self): """ Check that invalid room names, return an error 400. """ - url = "/_synapse/admin/v1/rooms/invalidroom/delete" + url = self.url_template % "invalidroom" channel = self.make_request( - "POST", + self.method, url, json.dumps({}), access_token=self.admin_user_tok, @@ -234,7 +243,7 @@ def test_new_room_user_does_not_exist(self): body = json.dumps({"new_room_user_id": "@unknown:test"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -253,7 +262,7 @@ def test_new_room_user_is_not_local(self): body = json.dumps({"new_room_user_id": "@not:exist.bla"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -272,7 +281,7 @@ def test_block_is_not_bool(self): body = json.dumps({"block": "NotBool"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -288,7 +297,7 @@ def test_purge_is_not_bool(self): body = json.dumps({"purge": "NotBool"}) channel = self.make_request( - "POST", + self.method, self.url, content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -314,7 +323,7 @@ def test_purge_room_and_block(self): body = json.dumps({"block": True, "purge": True}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -347,7 +356,7 @@ def test_purge_room_and_not_block(self): body = json.dumps({"block": False, "purge": True}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -381,7 +390,7 @@ def test_block_room_and_not_purge(self): body = json.dumps({"block": False, "purge": False}) channel = self.make_request( - "POST", + self.method, self.url.encode("ascii"), content=body.encode(encoding="utf_8"), access_token=self.admin_user_tok, @@ -426,10 +435,9 @@ def test_shutdown_room_consent(self): self._is_member(room_id=self.room_id, user_id=self.other_user) # Test that the admin can still send shutdown - url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id channel = self.make_request( - "POST", - url.encode("ascii"), + self.method, + self.url, json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) @@ -473,10 +481,9 @@ def test_shutdown_room_block_peek(self): self._is_member(room_id=self.room_id, user_id=self.other_user) # Test that the admin can still send shutdown - url = "/_synapse/admin/v1/rooms/%s/delete" % self.room_id channel = self.make_request( - "POST", - url.encode("ascii"), + self.method, + self.url, json.dumps({"new_room_user_id": self.admin_user}), access_token=self.admin_user_tok, ) From e9444cc74d73f6367dedcfe406e3f1d9ff3d5414 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 29 Apr 2021 11:45:37 +0100 Subject: [PATCH 095/222] 1.33.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/9900.bugfix | 1 - synapse/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/9900.bugfix diff --git a/CHANGES.md b/CHANGES.md index 9a41607679fd..629d4a180d1f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.33.0rc2 (2021-04-29) +============================== + +Bugfixes +-------- + +- Fix tight loop handling presence replication when using workers. Introduced in v1.33.0rc1. ([\#9900](https://github.com/matrix-org/synapse/issues/9900)) + + Synapse 1.33.0rc1 (2021-04-28) ============================== diff --git a/changelog.d/9900.bugfix b/changelog.d/9900.bugfix deleted file mode 100644 index a8470fca3f86..000000000000 --- a/changelog.d/9900.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tight loop handling presence replication when using workers. Introduced in v1.33.0rc1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5bbaa62de2b2..319c52be2c49 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.33.0rc1" +__version__ = "1.33.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From bb4b11846f3bdd539a1671eb8f1db8ee1a0bf57a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 29 Apr 2021 07:17:28 -0400 Subject: [PATCH 096/222] Add missing type hints to handlers and fix a Spam Checker type hint. (#9896) The user_may_create_room_alias method on spam checkers declared the room_alias parameter as a str when in reality it is passed a RoomAlias object. --- changelog.d/9896.bugfix | 1 + changelog.d/9896.misc | 1 + synapse/events/spamcheck.py | 5 ++- synapse/handlers/directory.py | 59 ++++++++++++++++------------ synapse/handlers/identity.py | 9 +++-- synapse/handlers/message.py | 24 +++++++---- synapse/handlers/room_member.py | 2 +- synapse/handlers/ui_auth/checkers.py | 35 +++++++++-------- 8 files changed, 82 insertions(+), 54 deletions(-) create mode 100644 changelog.d/9896.bugfix create mode 100644 changelog.d/9896.misc diff --git a/changelog.d/9896.bugfix b/changelog.d/9896.bugfix new file mode 100644 index 000000000000..07a8e87f9f98 --- /dev/null +++ b/changelog.d/9896.bugfix @@ -0,0 +1 @@ +Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. diff --git a/changelog.d/9896.misc b/changelog.d/9896.misc new file mode 100644 index 000000000000..e41c7d1f02b4 --- /dev/null +++ b/changelog.d/9896.misc @@ -0,0 +1 @@ +Add type hints to the `synapse.handlers` module. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 7118d5f52d12..d5fa19509498 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -20,6 +20,7 @@ from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour +from synapse.types import RoomAlias from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: @@ -113,7 +114,9 @@ async def user_may_create_room(self, userid: str) -> bool: return True - async def user_may_create_room_alias(self, userid: str, room_alias: str) -> bool: + async def user_may_create_room_alias( + self, userid: str, room_alias: RoomAlias + ) -> bool: """Checks if a given user may create a room alias If this method returns false, the association request will be rejected. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 90932316f3b7..de1b14cde39a 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -14,7 +14,7 @@ import logging import string -from typing import Iterable, List, Optional +from typing import TYPE_CHECKING, Iterable, List, Optional from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( @@ -27,15 +27,19 @@ SynapseError, ) from synapse.appservice import ApplicationService -from synapse.types import Requester, RoomAlias, UserID, get_domain_from_id +from synapse.storage.databases.main.directory import RoomAliasMapping +from synapse.types import JsonDict, Requester, RoomAlias, UserID, get_domain_from_id from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class DirectoryHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.state = hs.get_state_handler() @@ -60,7 +64,7 @@ async def _create_association( room_id: str, servers: Optional[Iterable[str]] = None, creator: Optional[str] = None, - ): + ) -> None: # general association creation for both human users and app services for wchar in string.whitespace: @@ -104,8 +108,9 @@ async def create_association( """ user_id = requester.user.to_string() + room_alias_str = room_alias.to_string() - if len(room_alias.to_string()) > MAX_ALIAS_LENGTH: + if len(room_alias_str) > MAX_ALIAS_LENGTH: raise SynapseError( 400, "Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH, @@ -114,7 +119,7 @@ async def create_association( service = requester.app_service if service: - if not service.is_interested_in_alias(room_alias.to_string()): + if not service.is_interested_in_alias(room_alias_str): raise SynapseError( 400, "This application service has not reserved this kind of alias.", @@ -138,7 +143,7 @@ async def create_association( raise AuthError(403, "This user is not permitted to create this alias") if not self.config.is_alias_creation_allowed( - user_id, room_id, room_alias.to_string() + user_id, room_id, room_alias_str ): # Lets just return a generic message, as there may be all sorts of # reasons why we said no. TODO: Allow configurable error messages @@ -211,7 +216,7 @@ async def delete_association( async def delete_appservice_association( self, service: ApplicationService, room_alias: RoomAlias - ): + ) -> None: if not service.is_interested_in_alias(room_alias.to_string()): raise SynapseError( 400, @@ -220,7 +225,7 @@ async def delete_appservice_association( ) await self._delete_association(room_alias) - async def _delete_association(self, room_alias: RoomAlias): + async def _delete_association(self, room_alias: RoomAlias) -> str: if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room alias must be local") @@ -228,17 +233,19 @@ async def _delete_association(self, room_alias: RoomAlias): return room_id - async def get_association(self, room_alias: RoomAlias): + async def get_association(self, room_alias: RoomAlias) -> JsonDict: room_id = None if self.hs.is_mine(room_alias): - result = await self.get_association_from_room_alias(room_alias) + result = await self.get_association_from_room_alias( + room_alias + ) # type: Optional[RoomAliasMapping] if result: room_id = result.room_id servers = result.servers else: try: - result = await self.federation.make_query( + fed_result = await self.federation.make_query( destination=room_alias.domain, query_type="directory", args={"room_alias": room_alias.to_string()}, @@ -248,13 +255,13 @@ async def get_association(self, room_alias: RoomAlias): except CodeMessageException as e: logging.warning("Error retrieving alias") if e.code == 404: - result = None + fed_result = None else: raise - if result and "room_id" in result and "servers" in result: - room_id = result["room_id"] - servers = result["servers"] + if fed_result and "room_id" in fed_result and "servers" in fed_result: + room_id = fed_result["room_id"] + servers = fed_result["servers"] if not room_id: raise SynapseError( @@ -275,7 +282,7 @@ async def get_association(self, room_alias: RoomAlias): return {"room_id": room_id, "servers": servers} - async def on_directory_query(self, args): + async def on_directory_query(self, args: JsonDict) -> JsonDict: room_alias = RoomAlias.from_string(args["room_alias"]) if not self.hs.is_mine(room_alias): raise SynapseError(400, "Room Alias is not hosted on this homeserver") @@ -293,7 +300,7 @@ async def on_directory_query(self, args): async def _update_canonical_alias( self, requester: Requester, user_id: str, room_id: str, room_alias: RoomAlias - ): + ) -> None: """ Send an updated canonical alias event if the removed alias was set as the canonical alias or listed in the alt_aliases field. @@ -344,7 +351,9 @@ async def _update_canonical_alias( ratelimit=False, ) - async def get_association_from_room_alias(self, room_alias: RoomAlias): + async def get_association_from_room_alias( + self, room_alias: RoomAlias + ) -> Optional[RoomAliasMapping]: result = await self.store.get_association_from_room_alias(room_alias) if not result: # Query AS to see if it exists @@ -372,7 +381,7 @@ def can_modify_alias(self, alias: RoomAlias, user_id: Optional[str] = None) -> b # either no interested services, or no service with an exclusive lock return True - async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): + async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str) -> bool: """Determine whether a user can delete an alias. One of the following must be true: @@ -394,14 +403,13 @@ async def _user_can_delete_alias(self, alias: RoomAlias, user_id: str): if not room_id: return False - res = await self.auth.check_can_change_room_list( + return await self.auth.check_can_change_room_list( room_id, UserID.from_string(user_id) ) - return res async def edit_published_room_list( self, requester: Requester, room_id: str, visibility: str - ): + ) -> None: """Edit the entry of the room in the published room list. requester @@ -469,7 +477,7 @@ async def edit_published_room_list( async def edit_published_appservice_room_list( self, appservice_id: str, network_id: str, room_id: str, visibility: str - ): + ) -> None: """Add or remove a room from the appservice/network specific public room list. @@ -499,5 +507,4 @@ async def get_aliases_for_room( room_id, requester.user.to_string() ) - aliases = await self.store.get_aliases_for_room(room_id) - return aliases + return await self.store.get_aliases_for_room(room_id) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 0b3b1fadb5df..33d16fbf9c36 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -17,7 +17,7 @@ """Utilities for interacting with Identity Servers""" import logging import urllib.parse -from typing import Awaitable, Callable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, List, Optional, Tuple from synapse.api.errors import ( CodeMessageException, @@ -41,13 +41,16 @@ from ._base import BaseHandler +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) id_server_scheme = "https://" class IdentityHandler(BaseHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) # An HTTP client for contacting trusted URLs. @@ -80,7 +83,7 @@ async def ratelimit_request_token_requests( request: SynapseRequest, medium: str, address: str, - ): + ) -> None: """Used to ratelimit requests to `/requestToken` by IP and address. Args: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ec8eb2167463..49f8aa25ea7b 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -15,7 +15,7 @@ # limitations under the License. import logging import random -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple from canonicaljson import encode_canonical_json @@ -66,7 +66,7 @@ class MessageHandler: """Contains some read only APIs to get state about a room""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.clock = hs.get_clock() self.state = hs.get_state_handler() @@ -91,7 +91,7 @@ async def get_room_data( room_id: str, event_type: str, state_key: str, - ) -> dict: + ) -> Optional[EventBase]: """Get data from a room. Args: @@ -115,6 +115,10 @@ async def get_room_data( data = await self.state.get_current_state(room_id, event_type, state_key) elif membership == Membership.LEAVE: key = (event_type, state_key) + # If the membership is not JOIN, then the event ID should exist. + assert ( + membership_event_id is not None + ), "check_user_in_room_or_world_readable returned invalid data" room_state = await self.state_store.get_state_for_events( [membership_event_id], StateFilter.from_types([key]) ) @@ -186,10 +190,12 @@ async def get_state_events( event = last_events[0] if visible_events: - room_state = await self.state_store.get_state_for_events( + room_state_events = await self.state_store.get_state_for_events( [event.event_id], state_filter=state_filter ) - room_state = room_state[event.event_id] + room_state = room_state_events[ + event.event_id + ] # type: Mapping[Any, EventBase] else: raise AuthError( 403, @@ -210,10 +216,14 @@ async def get_state_events( ) room_state = await self.store.get_events(state_ids.values()) elif membership == Membership.LEAVE: - room_state = await self.state_store.get_state_for_events( + # If the membership is not JOIN, then the event ID should exist. + assert ( + membership_event_id is not None + ), "check_user_in_room_or_world_readable returned invalid data" + room_state_events = await self.state_store.get_state_for_events( [membership_event_id], state_filter=state_filter ) - room_state = room_state[membership_event_id] + room_state = room_state_events[membership_event_id] now = self.clock.time_msec() events = await self._event_serializer.serialize_events( diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 2c5bada1d89b..20700fc5a8a4 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1044,7 +1044,7 @@ async def _is_server_notice_room(self, room_id: str) -> bool: class RoomMemberMasterHandler(RoomMemberHandler): - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self.distributor = hs.get_distributor() diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 0eeb7c03f2d5..5414ce77d83c 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import Any +from typing import TYPE_CHECKING, Any from twisted.web.client import PartialDownloadError @@ -22,13 +22,16 @@ from synapse.config.emailconfig import ThreepidBehaviour from synapse.util import json_decoder +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) class UserInteractiveAuthChecker: """Abstract base class for an interactive auth checker""" - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): pass def is_enabled(self) -> bool: @@ -57,10 +60,10 @@ async def check_auth(self, authdict: dict, clientip: str) -> Any: class DummyAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.DUMMY - def is_enabled(self): + def is_enabled(self) -> bool: return True - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return True @@ -70,24 +73,24 @@ class TermsAuthChecker(UserInteractiveAuthChecker): def is_enabled(self): return True - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return True class RecaptchaAuthChecker(UserInteractiveAuthChecker): AUTH_TYPE = LoginType.RECAPTCHA - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): super().__init__(hs) self._enabled = bool(hs.config.recaptcha_private_key) self._http_client = hs.get_proxied_http_client() self._url = hs.config.recaptcha_siteverify_api self._secret = hs.config.recaptcha_private_key - def is_enabled(self): + def is_enabled(self) -> bool: return self._enabled - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: try: user_response = authdict["response"] except KeyError: @@ -132,11 +135,11 @@ async def check_auth(self, authdict, clientip): class _BaseThreepidAuthChecker: - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): self.hs = hs self.store = hs.get_datastore() - async def _check_threepid(self, medium, authdict): + async def _check_threepid(self, medium: str, authdict: dict) -> dict: if "threepid_creds" not in authdict: raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) @@ -206,31 +209,31 @@ async def _check_threepid(self, medium, authdict): class EmailIdentityAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): AUTH_TYPE = LoginType.EMAIL_IDENTITY - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) - def is_enabled(self): + def is_enabled(self) -> bool: return self.hs.config.threepid_behaviour_email in ( ThreepidBehaviour.REMOTE, ThreepidBehaviour.LOCAL, ) - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("email", authdict) class MsisdnAuthChecker(UserInteractiveAuthChecker, _BaseThreepidAuthChecker): AUTH_TYPE = LoginType.MSISDN - def __init__(self, hs): + def __init__(self, hs: "HomeServer"): UserInteractiveAuthChecker.__init__(self, hs) _BaseThreepidAuthChecker.__init__(self, hs) - def is_enabled(self): + def is_enabled(self) -> bool: return bool(self.hs.config.account_threepid_delegate_msisdn) - async def check_auth(self, authdict, clientip): + async def check_auth(self, authdict: dict, clientip: str) -> Any: return await self._check_threepid("msisdn", authdict) From d11f2dfee519a4136def4169cef0ef218ebf1e19 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 29 Apr 2021 14:31:14 +0100 Subject: [PATCH 097/222] typo in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 629d4a180d1f..bdeb614b9e9b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.33.0rc2 (2021-04-29) Bugfixes -------- -- Fix tight loop handling presence replication when using workers. Introduced in v1.33.0rc1. ([\#9900](https://github.com/matrix-org/synapse/issues/9900)) +- Fix tight loop when handling presence replication when using workers. Introduced in v1.33.0rc1. ([\#9900](https://github.com/matrix-org/synapse/issues/9900)) Synapse 1.33.0rc1 (2021-04-28) From 56c4b47df36a99d2fc57a64013a4f482e25ee097 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Fri, 30 Apr 2021 15:36:05 +0100 Subject: [PATCH 098/222] Build Debian packages for Ubuntu 21.04 Hirsute (#9909) Signed-off-by: Dan Callahan --- changelog.d/9909.feature | 1 + scripts-dev/build_debian_packages | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9909.feature diff --git a/changelog.d/9909.feature b/changelog.d/9909.feature new file mode 100644 index 000000000000..41aba5cca6b3 --- /dev/null +++ b/changelog.d/9909.feature @@ -0,0 +1 @@ +Build Debian packages for Ubuntu 21.04 (Hirsute Hippo). diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index 3bb6e2c7ea80..07d018db9985 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -21,9 +21,10 @@ DISTS = ( "debian:buster", "debian:bullseye", "debian:sid", - "ubuntu:bionic", - "ubuntu:focal", - "ubuntu:groovy", + "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23) + "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) + "ubuntu:groovy", # 20.10 (EOL 2021-07-07) + "ubuntu:hirsute", # 21.04 (EOL 2022-01-05) ) DESC = '''\ From b85821aca2c3cfc1c732dfcc0c1d6758a263487a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 4 May 2021 13:28:59 +0100 Subject: [PATCH 099/222] Add port parameter to the sample config for psycopg2 args (#9911) Adds the `port` option with the default value to the sample config file. --- changelog.d/9911.doc | 1 + docs/sample_config.yaml | 1 + synapse/config/database.py | 1 + 3 files changed, 3 insertions(+) create mode 100644 changelog.d/9911.doc diff --git a/changelog.d/9911.doc b/changelog.d/9911.doc new file mode 100644 index 000000000000..f7fd9f1ba9f2 --- /dev/null +++ b/changelog.d/9911.doc @@ -0,0 +1 @@ +Add `port` argument to the Postgres database sample config section. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index e0350279ad39..d013725cdc11 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -810,6 +810,7 @@ caches: # password: secretpassword # database: synapse # host: localhost +# port: 5432 # cp_min: 5 # cp_max: 10 # diff --git a/synapse/config/database.py b/synapse/config/database.py index 79a02706b4f6..c76ef1e1de8d 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -58,6 +58,7 @@ # password: secretpassword # database: synapse # host: localhost +# port: 5432 # cp_min: 5 # cp_max: 10 # From e3bc4617fcd5c858ff02cf2d443b898db87ae8a5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 4 May 2021 15:14:22 +0100 Subject: [PATCH 100/222] Time external cache response time (#9904) --- changelog.d/9904.misc | 1 + synapse/replication/tcp/external_cache.py | 36 ++++++++++++++++------- 2 files changed, 27 insertions(+), 10 deletions(-) create mode 100644 changelog.d/9904.misc diff --git a/changelog.d/9904.misc b/changelog.d/9904.misc new file mode 100644 index 000000000000..3db1e625aed6 --- /dev/null +++ b/changelog.d/9904.misc @@ -0,0 +1 @@ +Time response time for external cache requests. diff --git a/synapse/replication/tcp/external_cache.py b/synapse/replication/tcp/external_cache.py index 1a3b051e3cb2..b402f82810fa 100644 --- a/synapse/replication/tcp/external_cache.py +++ b/synapse/replication/tcp/external_cache.py @@ -15,7 +15,7 @@ import logging from typing import TYPE_CHECKING, Any, Optional -from prometheus_client import Counter +from prometheus_client import Counter, Histogram from synapse.logging.context import make_deferred_yieldable from synapse.util import json_decoder, json_encoder @@ -35,6 +35,20 @@ labelnames=["cache_name", "hit"], ) +response_timer = Histogram( + "synapse_external_cache_response_time_seconds", + "Time taken to get a response from Redis for a cache get/set request", + labelnames=["method"], + buckets=( + 0.001, + 0.002, + 0.005, + 0.01, + 0.02, + 0.05, + ), +) + logger = logging.getLogger(__name__) @@ -72,13 +86,14 @@ async def set(self, cache_name: str, key: str, value: Any, expiry_ms: int) -> No logger.debug("Caching %s %s: %r", cache_name, key, encoded_value) - return await make_deferred_yieldable( - self._redis_connection.set( - self._get_redis_key(cache_name, key), - encoded_value, - pexpire=expiry_ms, + with response_timer.labels("set").time(): + return await make_deferred_yieldable( + self._redis_connection.set( + self._get_redis_key(cache_name, key), + encoded_value, + pexpire=expiry_ms, + ) ) - ) async def get(self, cache_name: str, key: str) -> Optional[Any]: """Look up a key/value in the named cache.""" @@ -86,9 +101,10 @@ async def get(self, cache_name: str, key: str) -> Optional[Any]: if self._redis_connection is None: return None - result = await make_deferred_yieldable( - self._redis_connection.get(self._get_redis_key(cache_name, key)) - ) + with response_timer.labels("get").time(): + result = await make_deferred_yieldable( + self._redis_connection.get(self._get_redis_key(cache_name, key)) + ) logger.debug("Got cache result %s %s: %r", cache_name, key, result) From 0644ac0989d06fc0ba5d08a5412d65d75f7d8db2 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 May 2021 14:15:54 +0100 Subject: [PATCH 101/222] 1.33.0 --- CHANGES.md | 9 +++++++++ changelog.d/9909.feature | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/9909.feature diff --git a/CHANGES.md b/CHANGES.md index bdeb614b9e9b..206859cff7ee 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.33.0 (2021-05-05) +=========================== + +Features +-------- + +- Build Debian packages for Ubuntu 21.04 (Hirsute Hippo). ([\#9909](https://github.com/matrix-org/synapse/issues/9909)) + + Synapse 1.33.0rc2 (2021-04-29) ============================== diff --git a/changelog.d/9909.feature b/changelog.d/9909.feature deleted file mode 100644 index 41aba5cca6b3..000000000000 --- a/changelog.d/9909.feature +++ /dev/null @@ -1 +0,0 @@ -Build Debian packages for Ubuntu 21.04 (Hirsute Hippo). diff --git a/debian/changelog b/debian/changelog index fd33bfda5c1c..b54d3f2bc640 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.33.0) stable; urgency=medium + + * New synapse release 1.33.0. + + -- Synapse Packaging team Wed, 05 May 2021 14:15:27 +0100 + matrix-synapse-py3 (1.32.2) stable; urgency=medium * New synapse release 1.32.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 319c52be2c49..5eac40730a77 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.33.0rc2" +__version__ = "1.33.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From e9eb3549d32a6f93d07de8dbd5e1ebe54c8d8278 Mon Sep 17 00:00:00 2001 From: "DeepBlueV7.X" Date: Wed, 5 May 2021 13:37:56 +0000 Subject: [PATCH 102/222] Leave out optional keys from /sync (#9919) This leaves out all optional keys from /sync. This should be fine for all clients tested against conduit already, but it may break some clients, as such we should check, that at least most of them don't break horribly and maybe back out some of the individual changes. (We can probably always leave out groups for example, while the others may cause more issues.) Signed-off-by: Nicolas Werner --- changelog.d/9919.feature | 1 + synapse/rest/client/v2_alpha/sync.py | 62 +++++++++++++------ tests/rest/client/v2_alpha/test_sync.py | 30 +-------- .../test_resource_limits_server_notices.py | 8 ++- 4 files changed, 51 insertions(+), 50 deletions(-) create mode 100644 changelog.d/9919.feature diff --git a/changelog.d/9919.feature b/changelog.d/9919.feature new file mode 100644 index 000000000000..07747505d243 --- /dev/null +++ b/changelog.d/9919.feature @@ -0,0 +1 @@ +Omit empty fields from the `/sync` response. Contributed by @deepbluev7. diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 95ee3f1b84f3..5f8565333068 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -14,6 +14,7 @@ import itertools import logging +from collections import defaultdict from typing import TYPE_CHECKING, Tuple from synapse.api.constants import PresenceState @@ -229,24 +230,49 @@ async def encode_response(self, time_now, sync_result, access_token_id, filter): ) logger.debug("building sync response dict") - return { - "account_data": {"events": sync_result.account_data}, - "to_device": {"events": sync_result.to_device}, - "device_lists": { - "changed": list(sync_result.device_lists.changed), - "left": list(sync_result.device_lists.left), - }, - "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now), - "rooms": {"join": joined, "invite": invited, "leave": archived}, - "groups": { - "join": sync_result.groups.join, - "invite": sync_result.groups.invite, - "leave": sync_result.groups.leave, - }, - "device_one_time_keys_count": sync_result.device_one_time_keys_count, - "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types, - "next_batch": await sync_result.next_batch.to_string(self.store), - } + + response: dict = defaultdict(dict) + response["next_batch"] = await sync_result.next_batch.to_string(self.store) + + if sync_result.account_data: + response["account_data"] = {"events": sync_result.account_data} + if sync_result.presence: + response["presence"] = SyncRestServlet.encode_presence( + sync_result.presence, time_now + ) + + if sync_result.to_device: + response["to_device"] = {"events": sync_result.to_device} + + if sync_result.device_lists.changed: + response["device_lists"]["changed"] = list(sync_result.device_lists.changed) + if sync_result.device_lists.left: + response["device_lists"]["left"] = list(sync_result.device_lists.left) + + if sync_result.device_one_time_keys_count: + response[ + "device_one_time_keys_count" + ] = sync_result.device_one_time_keys_count + if sync_result.device_unused_fallback_key_types: + response[ + "org.matrix.msc2732.device_unused_fallback_key_types" + ] = sync_result.device_unused_fallback_key_types + + if joined: + response["rooms"]["join"] = joined + if invited: + response["rooms"]["invite"] = invited + if archived: + response["rooms"]["leave"] = archived + + if sync_result.groups.join: + response["groups"]["join"] = sync_result.groups.join + if sync_result.groups.invite: + response["groups"]["invite"] = sync_result.groups.invite + if sync_result.groups.leave: + response["groups"]["leave"] = sync_result.groups.leave + + return response @staticmethod def encode_presence(events, time_now): diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index dbcbdf159a1a..74be5176d054 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -37,35 +37,7 @@ def test_sync_argless(self): channel = self.make_request("GET", "/sync") self.assertEqual(channel.code, 200) - self.assertTrue( - { - "next_batch", - "rooms", - "presence", - "account_data", - "to_device", - "device_lists", - }.issubset(set(channel.json_body.keys())) - ) - - def test_sync_presence_disabled(self): - """ - When presence is disabled, the key does not appear in /sync. - """ - self.hs.config.use_presence = False - - channel = self.make_request("GET", "/sync") - - self.assertEqual(channel.code, 200) - self.assertTrue( - { - "next_batch", - "rooms", - "account_data", - "to_device", - "device_lists", - }.issubset(set(channel.json_body.keys())) - ) + self.assertIn("next_batch", channel.json_body) class SyncFilterTestCase(unittest.HomeserverTestCase): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index d46521ccdc0f..3245aa91ca6e 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -306,8 +306,9 @@ def test_no_invite_without_notice(self): channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) - invites = channel.json_body["rooms"]["invite"] - self.assertEqual(len(invites), 0, invites) + self.assertNotIn( + "rooms", channel.json_body, "Got invites without server notice" + ) def test_invite_with_notice(self): """Tests that, if the MAU limit is hit, the server notices user invites each user @@ -364,7 +365,8 @@ def _trigger_notice_and_join(self): # We could also pick another user and sync with it, which would return an # invite to a system notices room, but it doesn't matter which user we're # using so we use the last one because it saves us an extra sync. - invites = channel.json_body["rooms"]["invite"] + if "rooms" in channel.json_body: + invites = channel.json_body["rooms"]["invite"] # Make sure we have an invite to process. self.assertEqual(len(invites), 1, invites) From d5305000f1c5799ffb6fcd64ad27e7bfd8ba2113 Mon Sep 17 00:00:00 2001 From: Christopher May-Townsend Date: Wed, 5 May 2021 16:33:04 +0100 Subject: [PATCH 103/222] Docker healthcheck timings - add startup delay and changed interval (#9913) * Add healthcheck startup delay by 5secs and reduced interval check to 15s to reduce waiting time for docker aware edge routers bringing an instance online --- changelog.d/9913.docker | 1 + docker/Dockerfile | 2 +- docker/README.md | 17 ++++++++++++++--- 3 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 changelog.d/9913.docker diff --git a/changelog.d/9913.docker b/changelog.d/9913.docker new file mode 100644 index 000000000000..93835e14cba0 --- /dev/null +++ b/changelog.d/9913.docker @@ -0,0 +1 @@ +Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. diff --git a/docker/Dockerfile b/docker/Dockerfile index 4f5cd06d7294..2bdc607e66e6 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -88,5 +88,5 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp ENTRYPOINT ["/start.py"] -HEALTHCHECK --interval=1m --timeout=5s \ +HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ CMD curl -fSs http://localhost:8008/health || exit 1 diff --git a/docker/README.md b/docker/README.md index a7d1e670fe91..c8d3c4b3daf3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -191,6 +191,16 @@ whilst running the above `docker run` commands. ``` --no-healthcheck ``` + +## Disabling the healthcheck in docker-compose file + +If you wish to disable the healthcheck via docker-compose, append the following to your service configuration. + +``` + healthcheck: + disable: true +``` + ## Setting custom healthcheck on docker run If you wish to point the healthcheck at a different port with docker command, add the following @@ -202,14 +212,15 @@ If you wish to point the healthcheck at a different port with docker command, ad ## Setting the healthcheck in docker-compose file You can add the following to set a custom healthcheck in a docker compose file. -You will need version >2.1 for this to work. +You will need docker-compose version >2.1 for this to work. ``` healthcheck: test: ["CMD", "curl", "-fSs", "http://localhost:8008/health"] - interval: 1m - timeout: 10s + interval: 15s + timeout: 5s retries: 3 + start_period: 5s ``` ## Using jemalloc From d0aee697ac0587c005bc1048f5036979331f1101 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:49:34 +0100 Subject: [PATCH 104/222] Use get_current_users_in_room from store and not StateHandler (#9910) --- changelog.d/9910.bugfix | 1 + changelog.d/9910.feature | 1 + synapse/handlers/directory.py | 4 ++-- synapse/handlers/events.py | 2 +- synapse/handlers/message.py | 2 +- synapse/handlers/presence.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 6 +++--- synapse/state/__init__.py | 10 +++++++--- synapse/storage/_base.py | 1 + synapse/storage/databases/main/roommember.py | 8 ++++++-- synapse/storage/databases/main/user_directory.py | 4 +--- 12 files changed, 26 insertions(+), 17 deletions(-) create mode 100644 changelog.d/9910.bugfix create mode 100644 changelog.d/9910.feature diff --git a/changelog.d/9910.bugfix b/changelog.d/9910.bugfix new file mode 100644 index 000000000000..06d523fd46a8 --- /dev/null +++ b/changelog.d/9910.bugfix @@ -0,0 +1 @@ +Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. diff --git a/changelog.d/9910.feature b/changelog.d/9910.feature new file mode 100644 index 000000000000..54165cce18af --- /dev/null +++ b/changelog.d/9910.feature @@ -0,0 +1 @@ +Improve performance after joining a large room when presence is enabled. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index de1b14cde39a..4064a2b85913 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -78,7 +78,7 @@ async def _create_association( # TODO(erikj): Add transactions. # TODO(erikj): Check if there is a current association. if not servers: - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) servers = {get_domain_from_id(u) for u in users} if not servers: @@ -270,7 +270,7 @@ async def get_association(self, room_alias: RoomAlias) -> JsonDict: Codes.NOT_FOUND, ) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) extra_servers = {get_domain_from_id(u) for u in users} servers = set(extra_servers) | set(servers) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index d82144d7fa8d..f134f1e234b6 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -103,7 +103,7 @@ async def get_stream( # Send down presence. if event.state_key == auth_user_id: # Send down presence for everyone in the room. - users = await self.state.get_current_users_in_room( + users = await self.store.get_users_in_room( event.room_id ) # type: Iterable[str] else: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 49f8aa25ea7b..393f17c3a346 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -258,7 +258,7 @@ async def get_joined_members(self, requester: Requester, room_id: str) -> dict: "Getting joined members after leaving is not implemented" ) - users_with_profile = await self.state.get_current_users_in_room(room_id) + users_with_profile = await self.store.get_users_in_room_with_profiles(room_id) # If this is an AS, double check that they are allowed to see the members. # This can either be because the AS user is in the room or because there diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index ebbc2343343d..8e085dfbec22 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1293,7 +1293,7 @@ async def _on_user_joined_room( remote_host = get_domain_from_id(user_id) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) user_ids = list(filter(self.is_mine_id, users)) states_d = await self.current_state_for_users(user_ids) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 5a888b794168..fb4823a5cc6e 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1327,7 +1327,7 @@ async def shutdown_room( new_room_id = None logger.info("Shutting down room %r", room_id) - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) kicked_users = [] failed_to_kick_users = [] for user_id in users: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index a9a3ee05c3f3..0fcc1532da8d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1190,7 +1190,7 @@ async def _generate_sync_entry_for_device_list( # Step 1b, check for newly joined rooms for room_id in newly_joined_rooms: - joined_users = await self.state.get_current_users_in_room(room_id) + joined_users = await self.store.get_users_in_room(room_id) newly_joined_or_invited_users.update(joined_users) # TODO: Check that these users are actually new, i.e. either they @@ -1206,7 +1206,7 @@ async def _generate_sync_entry_for_device_list( # Now find users that we no longer track for room_id in newly_left_rooms: - left_users = await self.state.get_current_users_in_room(room_id) + left_users = await self.store.get_users_in_room(room_id) newly_left_users.update(left_users) # Remove any users that we still share a room with. @@ -1361,7 +1361,7 @@ async def _generate_sync_entry_for_presence( extra_users_ids = set(newly_joined_or_invited_users) for room_id in newly_joined_rooms: - users = await self.state.get_current_users_in_room(room_id) + users = await self.store.get_users_in_room(room_id) extra_users_ids.update(users) extra_users_ids.discard(user.to_string()) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index b3bd92d37c02..a1770f620e59 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -213,19 +213,23 @@ async def get_current_state_ids( return ret.state async def get_current_users_in_room( - self, room_id: str, latest_event_ids: Optional[List[str]] = None + self, room_id: str, latest_event_ids: List[str] ) -> Dict[str, ProfileInfo]: """ Get the users who are currently in a room. + Note: This is much slower than using the equivalent method + `DataStore.get_users_in_room` or `DataStore.get_users_in_room_with_profiles`, + so this should only be used when wanting the users at a particular point + in the room. + Args: room_id: The ID of the room. latest_event_ids: Precomputed list of latest event IDs. Will be computed if None. Returns: Dictionary of user IDs to their profileinfo. """ - if not latest_event_ids: - latest_event_ids = await self.store.get_latest_event_ids_in_room(room_id) + assert latest_event_ids is not None logger.debug("calling resolve_state_groups from get_current_users_in_room") diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 6b68d8720c92..3d98d3f5f822 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -69,6 +69,7 @@ def _invalidate_state_caches( self._attempt_to_invalidate_cache("is_host_joined", (room_id, host)) self._attempt_to_invalidate_cache("get_users_in_room", (room_id,)) + self._attempt_to_invalidate_cache("get_users_in_room_with_profiles", (room_id,)) self._attempt_to_invalidate_cache("get_room_summary", (room_id,)) self._attempt_to_invalidate_cache("get_current_state_ids", (room_id,)) diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 2a8532f8c1f9..5fc3bb5a7d7b 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -205,8 +205,12 @@ async def get_users_in_room_with_profiles( def _get_users_in_room_with_profiles(txn) -> Dict[str, ProfileInfo]: sql = """ - SELECT user_id, display_name, avatar_url FROM room_memberships - WHERE room_id = ? AND membership = ? + SELECT state_key, display_name, avatar_url FROM room_memberships as m + INNER JOIN current_state_events as c + ON m.event_id = c.event_id + AND m.room_id = c.room_id + AND m.user_id = c.state_key + WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ? """ txn.execute(sql, (room_id, Membership.JOIN)) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 7a082fdd217f..a6bfb4902a1c 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -142,8 +142,6 @@ async def _populate_user_directory_process_rooms(self, progress, batch_size): batch_size (int): Maximum number of state events to process per cycle. """ - state = self.hs.get_state_handler() - # If we don't have progress filed, delete everything. if not progress: await self.delete_all_from_user_dir() @@ -197,7 +195,7 @@ def _get_next_batch(txn): room_id ) - users_with_profile = await state.get_current_users_in_room(room_id) + users_with_profile = await self.get_users_in_room_with_profiles(room_id) user_ids = set(users_with_profile) # Update each user in the user directory. From de8f0a03a3cc3a2327dfd3058c99e48067965079 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:53:22 +0100 Subject: [PATCH 105/222] Don't set the external cache if its been done recently (#9905) --- changelog.d/9905.feature | 1 + synapse/handlers/federation.py | 4 +++- synapse/handlers/message.py | 34 ++++++++++++++++++++++++++++++---- 3 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9905.feature diff --git a/changelog.d/9905.feature b/changelog.d/9905.feature new file mode 100644 index 000000000000..96a0e7f09fbc --- /dev/null +++ b/changelog.d/9905.feature @@ -0,0 +1 @@ +Improve performance of sending events for worker-based deployments using Redis. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 9d867aaf4d55..e8330a2b50db 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2446,7 +2446,9 @@ async def _check_event_auth( # If we are going to send this event over federation we precaclculate # the joined hosts. if event.internal_metadata.get_send_on_behalf_of(): - await self.event_creation_handler.cache_joined_hosts_for_event(event) + await self.event_creation_handler.cache_joined_hosts_for_event( + event, context + ) return context diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 393f17c3a346..8729332d4b86 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -51,6 +51,7 @@ from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester from synapse.util import json_decoder, json_encoder from synapse.util.async_helpers import Linearizer +from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client @@ -457,6 +458,19 @@ def __init__(self, hs: "HomeServer"): self._external_cache = hs.get_external_cache() + # Stores the state groups we've recently added to the joined hosts + # external cache. Note that the timeout must be significantly less than + # the TTL on the external cache. + self._external_cache_joined_hosts_updates = ( + None + ) # type: Optional[ExpiringCache] + if self._external_cache.is_enabled(): + self._external_cache_joined_hosts_updates = ExpiringCache( + "_external_cache_joined_hosts_updates", + self.clock, + expiry_ms=30 * 60 * 1000, + ) + async def create_event( self, requester: Requester, @@ -967,7 +981,7 @@ async def handle_new_client_event( await self.action_generator.handle_push_actions_for_event(event, context) - await self.cache_joined_hosts_for_event(event) + await self.cache_joined_hosts_for_event(event, context) try: # If we're a worker we need to hit out to the master. @@ -1008,7 +1022,9 @@ async def handle_new_client_event( await self.store.remove_push_actions_from_staging(event.event_id) raise - async def cache_joined_hosts_for_event(self, event: EventBase) -> None: + async def cache_joined_hosts_for_event( + self, event: EventBase, context: EventContext + ) -> None: """Precalculate the joined hosts at the event, when using Redis, so that external federation senders don't have to recalculate it themselves. """ @@ -1016,6 +1032,9 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: if not self._external_cache.is_enabled(): return + # If external cache is enabled we should always have this. + assert self._external_cache_joined_hosts_updates is not None + # We actually store two mappings, event ID -> prev state group, # state group -> joined hosts, which is much more space efficient # than event ID -> joined hosts. @@ -1023,16 +1042,21 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: # Note: We have to cache event ID -> prev state group, as we don't # store that in the DB. # - # Note: We always set the state group -> joined hosts cache, even if - # we already set it, so that the expiry time is reset. + # Note: We set the state group -> joined hosts cache if it hasn't been + # set for a while, so that the expiry time is reset. state_entry = await self.state.resolve_state_groups_for_events( event.room_id, event_ids=event.prev_event_ids() ) if state_entry.state_group: + if state_entry.state_group in self._external_cache_joined_hosts_updates: + return + joined_hosts = await self.store.get_joined_hosts(event.room_id, state_entry) + # Note that the expiry times must be larger than the expiry time in + # _external_cache_joined_hosts_updates. await self._external_cache.set( "event_to_prev_state_group", event.event_id, @@ -1046,6 +1070,8 @@ async def cache_joined_hosts_for_event(self, event: EventBase) -> None: expiry_ms=60 * 60 * 1000, ) + self._external_cache_joined_hosts_updates[state_entry.state_group] = None + async def _validate_canonical_alias( self, directory_handler, room_alias_str: str, expected_room_id: str ) -> None: From 1fb9a2d0bf2506ca6e5343cb340a441585ca1c07 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:53:45 +0100 Subject: [PATCH 106/222] Limit how often GC happens by time. (#9902) Synapse can be quite memory intensive, and unless care is taken to tune the GC thresholds it can end up thrashing, causing noticable performance problems for large servers. We fix this by limiting how often we GC a given generation, regardless of current counts/thresholds. This does not help with the reverse problem where the thresholds are set too high, but that should only happen in situations where they've been manually configured. Adds a `gc_min_seconds_between` config option to override the defaults. Fixes #9890. --- changelog.d/9902.feature | 1 + docs/sample_config.yaml | 10 ++++++++++ synapse/app/generic_worker.py | 3 +++ synapse/app/homeserver.py | 3 +++ synapse/config/server.py | 31 ++++++++++++++++++++++++++++++- synapse/metrics/__init__.py | 18 ++++++++++++++++-- 6 files changed, 63 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9902.feature diff --git a/changelog.d/9902.feature b/changelog.d/9902.feature new file mode 100644 index 000000000000..4d9f324d4e29 --- /dev/null +++ b/changelog.d/9902.feature @@ -0,0 +1 @@ +Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d013725cdc11..f469d6e54f66 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -152,6 +152,16 @@ presence: # #gc_thresholds: [700, 10, 10] +# The minimum time in seconds between each GC for a generation, regardless of +# the GC thresholds. This ensures that we don't do GC too frequently. +# +# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive +# generation 0 GCs, etc. +# +# Defaults to `[1s, 10s, 30s]`. +# +#gc_min_interval: [0.5s, 30s, 1m] + # Set the limit on the returned events in the timeline in the get # and sync operations. The default value is 100. -1 means no upper limit. # diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 1a15ceee8112..a3fe9a3f381b 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -455,6 +455,9 @@ def start(config_options): synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + if config.server.gc_seconds: + synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds + hs = GenericWorkerServer( config.server_name, config=config, diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 8e78134bbedc..6a823da10d55 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -342,6 +342,9 @@ def setup(config_options): events.USE_FROZEN_DICTS = config.use_frozen_dicts + if config.server.gc_seconds: + synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds + hs = SynapseHomeServer( config.server_name, config=config, diff --git a/synapse/config/server.py b/synapse/config/server.py index 21ca7b33e38d..c290a35a9285 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,7 +19,7 @@ import os.path import re from textwrap import indent -from typing import Any, Dict, Iterable, List, Optional, Set +from typing import Any, Dict, Iterable, List, Optional, Set, Tuple import attr import yaml @@ -572,6 +572,7 @@ def read_config(self, config, **kwargs): _warn_if_webclient_configured(self.listeners) self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None)) + self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None)) @attr.s class LimitRemoteRoomsConfig: @@ -917,6 +918,16 @@ def generate_config_section( # #gc_thresholds: [700, 10, 10] + # The minimum time in seconds between each GC for a generation, regardless of + # the GC thresholds. This ensures that we don't do GC too frequently. + # + # A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive + # generation 0 GCs, etc. + # + # Defaults to `[1s, 10s, 30s]`. + # + #gc_min_interval: [0.5s, 30s, 1m] + # Set the limit on the returned events in the timeline in the get # and sync operations. The default value is 100. -1 means no upper limit. # @@ -1305,6 +1316,24 @@ def add_arguments(parser): help="Turn on the twisted telnet manhole service on the given port.", ) + def read_gc_intervals(self, durations) -> Optional[Tuple[float, float, float]]: + """Reads the three durations for the GC min interval option, returning seconds.""" + if durations is None: + return None + + try: + if len(durations) != 3: + raise ValueError() + return ( + self.parse_duration(durations[0]) / 1000, + self.parse_duration(durations[1]) / 1000, + self.parse_duration(durations[2]) / 1000, + ) + except Exception: + raise ConfigError( + "Value of `gc_min_interval` must be a list of three durations if set" + ) + def is_threepid_reserved(reserved_threepids, threepid): """Check the threepid against the reserved threepid config diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 31b7b3c256a7..e671da26d51d 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -535,6 +535,13 @@ def collect(self): REGISTRY.register(ReactorLastSeenMetric()) +# The minimum time in seconds between GCs for each generation, regardless of the current GC +# thresholds and counts. +MIN_TIME_BETWEEN_GCS = (1.0, 10.0, 30.0) + +# The time (in seconds since the epoch) of the last time we did a GC for each generation. +_last_gc = [0.0, 0.0, 0.0] + def runUntilCurrentTimer(reactor, func): @functools.wraps(func) @@ -575,11 +582,16 @@ def f(*args, **kwargs): return ret # Check if we need to do a manual GC (since its been disabled), and do - # one if necessary. + # one if necessary. Note we go in reverse order as e.g. a gen 1 GC may + # promote an object into gen 2, and we don't want to handle the same + # object multiple times. threshold = gc.get_threshold() counts = gc.get_count() for i in (2, 1, 0): - if threshold[i] < counts[i]: + # We check if we need to do one based on a straightforward + # comparison between the threshold and count. We also do an extra + # check to make sure that we don't a GC too often. + if threshold[i] < counts[i] and MIN_TIME_BETWEEN_GCS[i] < end - _last_gc[i]: if i == 0: logger.debug("Collecting gc %d", i) else: @@ -589,6 +601,8 @@ def f(*args, **kwargs): unreachable = gc.collect(i) end = time.time() + _last_gc[i] = end + gc_time.labels(i).observe(end - start) gc_unreachable.labels(i).set(unreachable) From ef889c98a6cde0cfa95f7fdaf7f99ec3c1e9bb7f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 16:54:36 +0100 Subject: [PATCH 107/222] Optionally track memory usage of each LruCache (#9881) This will double count slightly in the presence of interned strings. It's off by default as it can consume a lot of resources. --- changelog.d/9881.feature | 1 + mypy.ini | 3 +++ synapse/app/generic_worker.py | 1 + synapse/app/homeserver.py | 1 + synapse/config/cache.py | 11 ++++++++ synapse/python_dependencies.py | 2 ++ synapse/util/caches/__init__.py | 31 +++++++++++++++++++++ synapse/util/caches/lrucache.py | 48 ++++++++++++++++++++++++++++++++- 8 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9881.feature diff --git a/changelog.d/9881.feature b/changelog.d/9881.feature new file mode 100644 index 000000000000..088a517e0255 --- /dev/null +++ b/changelog.d/9881.feature @@ -0,0 +1 @@ +Add experimental option to track memory usage of the caches. diff --git a/mypy.ini b/mypy.ini index a40f705b76f4..ea655a0d4d92 100644 --- a/mypy.ini +++ b/mypy.ini @@ -171,3 +171,6 @@ ignore_missing_imports = True [mypy-txacme.*] ignore_missing_imports = True + +[mypy-pympler.*] +ignore_missing_imports = True diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index a3fe9a3f381b..f730cdbd78f4 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -454,6 +454,7 @@ def start(config_options): config.server.update_user_directory = False synapse.events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6a823da10d55..b2501ee4d7f0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -341,6 +341,7 @@ def setup(config_options): sys.exit(0) events.USE_FROZEN_DICTS = config.use_frozen_dicts + synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage if config.server.gc_seconds: synapse.metrics.MIN_TIME_BETWEEN_GCS = config.server.gc_seconds diff --git a/synapse/config/cache.py b/synapse/config/cache.py index 41b9b3f51f78..91165ee1cee0 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -17,6 +17,8 @@ import threading from typing import Callable, Dict +from synapse.python_dependencies import DependencyException, check_requirements + from ._base import Config, ConfigError # The prefix for all cache factor-related environment variables @@ -189,6 +191,15 @@ def read_config(self, config, **kwargs): ) self.cache_factors[cache] = factor + self.track_memory_usage = cache_config.get("track_memory_usage", False) + if self.track_memory_usage: + try: + check_requirements("cache_memory") + except DependencyException as e: + raise ConfigError( + e.message # noqa: B306, DependencyException.message is a property + ) + # Resize all caches (if necessary) with the new factors we've loaded self.resize_all_caches() diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2de946f46404..d58eeeaa7469 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -116,6 +116,8 @@ # hiredis is not a *strict* dependency, but it makes things much faster. # (if it is not installed, we fall back to slow code.) "redis": ["txredisapi>=1.4.7", "hiredis"], + # Required to use experimental `caches.track_memory_usage` config option. + "cache_memory": ["pympler"], } ALL_OPTIONAL_REQUIREMENTS = set() # type: Set[str] diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 46af7fa47312..ca36f07c205b 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -24,6 +24,11 @@ logger = logging.getLogger(__name__) + +# Whether to track estimated memory usage of the LruCaches. +TRACK_MEMORY_USAGE = False + + caches_by_name = {} # type: Dict[str, Sized] collectors_by_name = {} # type: Dict[str, CacheMetric] @@ -32,6 +37,11 @@ cache_evicted = Gauge("synapse_util_caches_cache:evicted_size", "", ["name"]) cache_total = Gauge("synapse_util_caches_cache:total", "", ["name"]) cache_max_size = Gauge("synapse_util_caches_cache_max_size", "", ["name"]) +cache_memory_usage = Gauge( + "synapse_util_caches_cache_size_bytes", + "Estimated memory usage of the caches", + ["name"], +) response_cache_size = Gauge("synapse_util_caches_response_cache:size", "", ["name"]) response_cache_hits = Gauge("synapse_util_caches_response_cache:hits", "", ["name"]) @@ -52,6 +62,7 @@ class CacheMetric: hits = attr.ib(default=0) misses = attr.ib(default=0) evicted_size = attr.ib(default=0) + memory_usage = attr.ib(default=None) def inc_hits(self): self.hits += 1 @@ -62,6 +73,19 @@ def inc_misses(self): def inc_evictions(self, size=1): self.evicted_size += size + def inc_memory_usage(self, memory: int): + if self.memory_usage is None: + self.memory_usage = 0 + + self.memory_usage += memory + + def dec_memory_usage(self, memory: int): + self.memory_usage -= memory + + def clear_memory_usage(self): + if self.memory_usage is not None: + self.memory_usage = 0 + def describe(self): return [] @@ -81,6 +105,13 @@ def collect(self): cache_total.labels(self._cache_name).set(self.hits + self.misses) if getattr(self._cache, "max_size", None): cache_max_size.labels(self._cache_name).set(self._cache.max_size) + + if TRACK_MEMORY_USAGE: + # self.memory_usage can be None if nothing has been inserted + # into the cache yet. + cache_memory_usage.labels(self._cache_name).set( + self.memory_usage or 0 + ) if self._collect_callback: self._collect_callback() except Exception as e: diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 10b0ec6b75bc..1be675e014af 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -32,9 +32,36 @@ from typing_extensions import Literal from synapse.config import cache as cache_config +from synapse.util import caches from synapse.util.caches import CacheMetric, register_cache from synapse.util.caches.treecache import TreeCache +try: + from pympler.asizeof import Asizer + + def _get_size_of(val: Any, *, recurse=True) -> int: + """Get an estimate of the size in bytes of the object. + + Args: + val: The object to size. + recurse: If true will include referenced values in the size, + otherwise only sizes the given object. + """ + # Ignore singleton values when calculating memory usage. + if val in ((), None, ""): + return 0 + + sizer = Asizer() + sizer.exclude_refs((), None, "") + return sizer.asizeof(val, limit=100 if recurse else 0) + + +except ImportError: + + def _get_size_of(val: Any, *, recurse=True) -> int: + return 0 + + # Function type: the type used for invalidation callbacks FT = TypeVar("FT", bound=Callable[..., Any]) @@ -56,7 +83,7 @@ def enumerate_leaves(node, depth): class _Node: - __slots__ = ["prev_node", "next_node", "key", "value", "callbacks"] + __slots__ = ["prev_node", "next_node", "key", "value", "callbacks", "memory"] def __init__( self, @@ -84,6 +111,16 @@ def __init__( self.add_callbacks(callbacks) + self.memory = 0 + if caches.TRACK_MEMORY_USAGE: + self.memory = ( + _get_size_of(key) + + _get_size_of(value) + + _get_size_of(self.callbacks, recurse=False) + + _get_size_of(self, recurse=False) + ) + self.memory += _get_size_of(self.memory, recurse=False) + def add_callbacks(self, callbacks: Collection[Callable[[], None]]) -> None: """Add to stored list of callbacks, removing duplicates.""" @@ -233,6 +270,9 @@ def add_node(key, value, callbacks: Collection[Callable[[], None]] = ()): if size_callback: cached_cache_len[0] += size_callback(node.value) + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.inc_memory_usage(node.memory) + def move_node_to_front(node): prev_node = node.prev_node next_node = node.next_node @@ -258,6 +298,9 @@ def delete_node(node): node.run_and_clear_callbacks() + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.dec_memory_usage(node.memory) + return deleted_len @overload @@ -373,6 +416,9 @@ def cache_clear() -> None: if size_callback: cached_cache_len[0] = 0 + if caches.TRACK_MEMORY_USAGE and metrics: + metrics.clear_memory_usage() + @synchronized def cache_contains(key: KT) -> bool: return key in cache From e2a443550e7b47bf8fe1b5fbd76f9ca95e81cbad Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 May 2021 11:56:51 -0400 Subject: [PATCH 108/222] Support stable MSC1772 spaces identifiers. (#9915) Support both the unstable and stable identifiers. A future release will disable the unstable identifiers. --- changelog.d/9915.feature | 1 + synapse/api/constants.py | 3 +++ synapse/handlers/space_summary.py | 8 ++++++-- 3 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9915.feature diff --git a/changelog.d/9915.feature b/changelog.d/9915.feature new file mode 100644 index 000000000000..832916cb01af --- /dev/null +++ b/changelog.d/9915.feature @@ -0,0 +1 @@ +Support stable identifiers from [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 936b6534b41f..bff750e5fb10 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -110,6 +110,8 @@ class EventTypes: Dummy = "org.matrix.dummy_event" + SpaceChild = "m.space.child" + SpaceParent = "m.space.parent" MSC1772_SPACE_CHILD = "org.matrix.msc1772.space.child" MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent" @@ -174,6 +176,7 @@ class EventContentFields: SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after" # cf https://github.com/matrix-org/matrix-doc/pull/1772 + ROOM_TYPE = "m.type" MSC1772_ROOM_TYPE = "org.matrix.msc1772.type" diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 01e3e050f93c..d32452747c34 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -288,6 +288,7 @@ async def _summarize_remote_room( ev.data for ev in res.events if ev.event_type == EventTypes.MSC1772_SPACE_CHILD + or ev.event_type == EventTypes.SpaceChild ) async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> bool: @@ -331,7 +332,9 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: ) # TODO: update once MSC1772 lands - room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE) + room_type = create_event.content.get(EventContentFields.ROOM_TYPE) + if not room_type: + room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE) entry = { "room_id": stats["room_id"], @@ -360,8 +363,9 @@ async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: [ event_id for key, event_id in current_state_ids.items() - # TODO: update once MSC1772 lands + # TODO: update once MSC1772 has been FCP for a period of time. if key[0] == EventTypes.MSC1772_SPACE_CHILD + or key[0] == EventTypes.SpaceChild ] ) From 37623e33822d4e032ba6d1f523fb09b12fe27aab Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 May 2021 17:27:05 +0100 Subject: [PATCH 109/222] Increase perf of handling presence when joining large rooms. (#9916) --- changelog.d/9916.feature | 1 + synapse/handlers/presence.py | 154 +++++++++++++++++--------------- tests/handlers/test_presence.py | 14 +-- 3 files changed, 87 insertions(+), 82 deletions(-) create mode 100644 changelog.d/9916.feature diff --git a/changelog.d/9916.feature b/changelog.d/9916.feature new file mode 100644 index 000000000000..54165cce18af --- /dev/null +++ b/changelog.d/9916.feature @@ -0,0 +1 @@ +Improve performance after joining a large room when presence is enabled. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 8e085dfbec22..6fd1f34289f8 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -1183,7 +1183,16 @@ async def _unsafe_process(self) -> None: max_pos, deltas = await self.store.get_current_state_deltas( self._event_pos, room_max_stream_ordering ) - await self._handle_state_delta(deltas) + + # We may get multiple deltas for different rooms, but we want to + # handle them on a room by room basis, so we batch them up by + # room. + deltas_by_room: Dict[str, List[JsonDict]] = {} + for delta in deltas: + deltas_by_room.setdefault(delta["room_id"], []).append(delta) + + for room_id, deltas_for_room in deltas_by_room.items(): + await self._handle_state_delta(room_id, deltas_for_room) self._event_pos = max_pos @@ -1192,17 +1201,21 @@ async def _unsafe_process(self) -> None: max_pos ) - async def _handle_state_delta(self, deltas: List[JsonDict]) -> None: - """Process current state deltas to find new joins that need to be - handled. + async def _handle_state_delta(self, room_id: str, deltas: List[JsonDict]) -> None: + """Process current state deltas for the room to find new joins that need + to be handled. """ - # A map of destination to a set of user state that they should receive - presence_destinations = {} # type: Dict[str, Set[UserPresenceState]] + + # Sets of newly joined users. Note that if the local server is + # joining a remote room for the first time we'll see both the joining + # user and all remote users as newly joined. + newly_joined_users = set() for delta in deltas: + assert room_id == delta["room_id"] + typ = delta["type"] state_key = delta["state_key"] - room_id = delta["room_id"] event_id = delta["event_id"] prev_event_id = delta["prev_event_id"] @@ -1231,72 +1244,55 @@ async def _handle_state_delta(self, deltas: List[JsonDict]) -> None: # Ignore changes to join events. continue - # Retrieve any user presence state updates that need to be sent as a result, - # and the destinations that need to receive it - destinations, user_presence_states = await self._on_user_joined_room( - room_id, state_key - ) - - # Insert the destinations and respective updates into our destinations dict - for destination in destinations: - presence_destinations.setdefault(destination, set()).update( - user_presence_states - ) - - # Send out user presence updates for each destination - for destination, user_state_set in presence_destinations.items(): - self._federation_queue.send_presence_to_destinations( - destinations=[destination], states=user_state_set - ) - - async def _on_user_joined_room( - self, room_id: str, user_id: str - ) -> Tuple[List[str], List[UserPresenceState]]: - """Called when we detect a user joining the room via the current state - delta stream. Returns the destinations that need to be updated and the - presence updates to send to them. - - Args: - room_id: The ID of the room that the user has joined. - user_id: The ID of the user that has joined the room. - - Returns: - A tuple of destinations and presence updates to send to them. - """ - if self.is_mine_id(user_id): - # If this is a local user then we need to send their presence - # out to hosts in the room (who don't already have it) - - # TODO: We should be able to filter the hosts down to those that - # haven't previously seen the user - - remote_hosts = await self.state.get_current_hosts_in_room(room_id) + newly_joined_users.add(state_key) - # Filter out ourselves. - filtered_remote_hosts = [ - host for host in remote_hosts if host != self.server_name - ] - - state = await self.current_state_for_user(user_id) - return filtered_remote_hosts, [state] - else: - # A remote user has joined the room, so we need to: - # 1. Check if this is a new server in the room - # 2. If so send any presence they don't already have for - # local users in the room. - - # TODO: We should be able to filter the users down to those that - # the server hasn't previously seen - - # TODO: Check that this is actually a new server joining the - # room. - - remote_host = get_domain_from_id(user_id) + if not newly_joined_users: + # If nobody has joined then there's nothing to do. + return - users = await self.store.get_users_in_room(room_id) - user_ids = list(filter(self.is_mine_id, users)) + # We want to send: + # 1. presence states of all local users in the room to newly joined + # remote servers + # 2. presence states of newly joined users to all remote servers in + # the room. + # + # TODO: Only send presence states to remote hosts that don't already + # have them (because they already share rooms). + + # Get all the users who were already in the room, by fetching the + # current users in the room and removing the newly joined users. + users = await self.store.get_users_in_room(room_id) + prev_users = set(users) - newly_joined_users + + # Construct sets for all the local users and remote hosts that were + # already in the room + prev_local_users = [] + prev_remote_hosts = set() + for user_id in prev_users: + if self.is_mine_id(user_id): + prev_local_users.append(user_id) + else: + prev_remote_hosts.add(get_domain_from_id(user_id)) + + # Similarly, construct sets for all the local users and remote hosts + # that were *not* already in the room. Care needs to be taken with the + # calculating the remote hosts, as a host may have already been in the + # room even if there is a newly joined user from that host. + newly_joined_local_users = [] + newly_joined_remote_hosts = set() + for user_id in newly_joined_users: + if self.is_mine_id(user_id): + newly_joined_local_users.append(user_id) + else: + host = get_domain_from_id(user_id) + if host not in prev_remote_hosts: + newly_joined_remote_hosts.add(host) - states_d = await self.current_state_for_users(user_ids) + # Send presence states of all local users in the room to newly joined + # remote servers. (We actually only send states for local users already + # in the room, as we'll send states for newly joined local users below.) + if prev_local_users and newly_joined_remote_hosts: + local_states = await self.current_state_for_users(prev_local_users) # Filter out old presence, i.e. offline presence states where # the user hasn't been active for a week. We can change this @@ -1306,13 +1302,27 @@ async def _on_user_joined_room( now = self.clock.time_msec() states = [ state - for state in states_d.values() + for state in local_states.values() if state.state != PresenceState.OFFLINE or now - state.last_active_ts < 7 * 24 * 60 * 60 * 1000 or state.status_msg is not None ] - return [remote_host], states + self._federation_queue.send_presence_to_destinations( + destinations=newly_joined_remote_hosts, + states=states, + ) + + # Send presence states of newly joined users to all remote servers in + # the room + if newly_joined_local_users and ( + prev_remote_hosts or newly_joined_remote_hosts + ): + local_states = await self.current_state_for_users(newly_joined_local_users) + self._federation_queue.send_presence_to_destinations( + destinations=prev_remote_hosts | newly_joined_remote_hosts, + states=list(local_states.values()), + ) def should_notify(old_state: UserPresenceState, new_state: UserPresenceState) -> bool: diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index ce330e79cca0..1ffab709fcb8 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -729,7 +729,7 @@ def test_remote_joins(self): ) self.assertEqual(expected_state.state, PresenceState.ONLINE) self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=["server2"], states={expected_state} + destinations={"server2"}, states=[expected_state] ) # @@ -740,7 +740,7 @@ def test_remote_joins(self): self._add_new_user(room_id, "@bob:server3") self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=["server3"], states={expected_state} + destinations={"server3"}, states=[expected_state] ) def test_remote_gets_presence_when_local_user_joins(self): @@ -788,14 +788,8 @@ def test_remote_gets_presence_when_local_user_joins(self): self.presence_handler.current_state_for_user("@test2:server") ) self.assertEqual(expected_state.state, PresenceState.ONLINE) - self.assertEqual( - self.federation_sender.send_presence_to_destinations.call_count, 2 - ) - self.federation_sender.send_presence_to_destinations.assert_any_call( - destinations=["server3"], states={expected_state} - ) - self.federation_sender.send_presence_to_destinations.assert_any_call( - destinations=["server2"], states={expected_state} + self.federation_sender.send_presence_to_destinations.assert_called_once_with( + destinations={"server2", "server3"}, states=[expected_state] ) def _add_new_user(self, room_id, user_id): From d783880083733a694ed4c7b15ca53be00e06f8a7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 May 2021 13:33:05 -0400 Subject: [PATCH 110/222] Include the time of the create event in Spaces Summary. (#9928) This is an update based on changes to MSC2946. The origin_server_ts of the m.room.create event is copied into the creation_ts field for each room returned from the spaces summary. --- changelog.d/9928.bugfix | 1 + synapse/handlers/space_summary.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/9928.bugfix diff --git a/changelog.d/9928.bugfix b/changelog.d/9928.bugfix new file mode 100644 index 000000000000..7b74cd9fb650 --- /dev/null +++ b/changelog.d/9928.bugfix @@ -0,0 +1 @@ +Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index d32452747c34..2e997841f166 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -347,6 +347,7 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: stats["history_visibility"] == HistoryVisibility.WORLD_READABLE ), "guest_can_join": stats["guest_access"] == "can_join", + "creation_ts": create_event.origin_server_ts, "room_type": room_type, } From 70f0ffd2fcd815a065b4734ac606654a2e11dd28 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 5 May 2021 16:31:16 -0400 Subject: [PATCH 111/222] Follow-up to #9915 to correct the identifier for room types. --- synapse/api/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index bff750e5fb10..ab628b2be7fd 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -176,7 +176,7 @@ class EventContentFields: SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after" # cf https://github.com/matrix-org/matrix-doc/pull/1772 - ROOM_TYPE = "m.type" + ROOM_TYPE = "type" MSC1772_ROOM_TYPE = "org.matrix.msc1772.type" From 24f07a83e604a7ee93214ef0bf7d7c56a14a534d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 May 2021 14:06:06 +0100 Subject: [PATCH 112/222] Pin attrs to <21.1.0 (#9937) Fixes #9936 --- changelog.d/9937.bugfix | 1 + synapse/python_dependencies.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9937.bugfix diff --git a/changelog.d/9937.bugfix b/changelog.d/9937.bugfix new file mode 100644 index 000000000000..c1cc5ccb43ae --- /dev/null +++ b/changelog.d/9937.bugfix @@ -0,0 +1 @@ +Fix bug where `/sync` would break if using the latest version of `attrs` dependency, by pinning to a previous version. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2de946f46404..7709361f1676 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -78,7 +78,8 @@ # we use attr.validators.deep_iterable, which arrived in 19.1.0 (Note: # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33 # is out in November.) - "attrs>=19.1.0", + # Note: 21.1.0 broke `/sync`, see #9936 + "attrs>=19.1.0,<21.1.0", "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", From ac88aca7f7acc2ce909db230682f93bb4e2ff73b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 May 2021 14:06:38 +0100 Subject: [PATCH 113/222] 1.33.1 --- CHANGES.md | 9 +++++++++ changelog.d/9937.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/9937.bugfix diff --git a/CHANGES.md b/CHANGES.md index 206859cff7ee..a41abbefbaf7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.33.1 (2021-05-06) +=========================== + +Bugfixes +-------- + +- Fix bug where `/sync` would break if using the latest version of `attrs` dependency, by pinning to a previous version. ([\#9937](https://github.com/matrix-org/synapse/issues/9937)) + + Synapse 1.33.0 (2021-05-05) =========================== diff --git a/changelog.d/9937.bugfix b/changelog.d/9937.bugfix deleted file mode 100644 index c1cc5ccb43ae..000000000000 --- a/changelog.d/9937.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where `/sync` would break if using the latest version of `attrs` dependency, by pinning to a previous version. diff --git a/debian/changelog b/debian/changelog index b54d3f2bc640..de50dd14ea80 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.33.1) stable; urgency=medium + + * New synapse release 1.33.1. + + -- Synapse Packaging team Thu, 06 May 2021 14:06:33 +0100 + matrix-synapse-py3 (1.33.0) stable; urgency=medium * New synapse release 1.33.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 5eac40730a77..441cd8b33906 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.33.0" +__version__ = "1.33.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From eba431c539dbe0ca28794d89962d447d1f75938f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 May 2021 15:06:35 +0100 Subject: [PATCH 114/222] Revert "Leave out optional keys from /sync (#9919)" (#9940) This reverts commit e9eb3549d32a6f93d07de8dbd5e1ebe54c8d8278. --- changelog.d/9919.feature | 1 - synapse/rest/client/v2_alpha/sync.py | 62 ++++++------------- tests/rest/client/v2_alpha/test_sync.py | 30 ++++++++- .../test_resource_limits_server_notices.py | 8 +-- 4 files changed, 50 insertions(+), 51 deletions(-) delete mode 100644 changelog.d/9919.feature diff --git a/changelog.d/9919.feature b/changelog.d/9919.feature deleted file mode 100644 index 07747505d243..000000000000 --- a/changelog.d/9919.feature +++ /dev/null @@ -1 +0,0 @@ -Omit empty fields from the `/sync` response. Contributed by @deepbluev7. diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 5f8565333068..95ee3f1b84f3 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -14,7 +14,6 @@ import itertools import logging -from collections import defaultdict from typing import TYPE_CHECKING, Tuple from synapse.api.constants import PresenceState @@ -230,49 +229,24 @@ async def encode_response(self, time_now, sync_result, access_token_id, filter): ) logger.debug("building sync response dict") - - response: dict = defaultdict(dict) - response["next_batch"] = await sync_result.next_batch.to_string(self.store) - - if sync_result.account_data: - response["account_data"] = {"events": sync_result.account_data} - if sync_result.presence: - response["presence"] = SyncRestServlet.encode_presence( - sync_result.presence, time_now - ) - - if sync_result.to_device: - response["to_device"] = {"events": sync_result.to_device} - - if sync_result.device_lists.changed: - response["device_lists"]["changed"] = list(sync_result.device_lists.changed) - if sync_result.device_lists.left: - response["device_lists"]["left"] = list(sync_result.device_lists.left) - - if sync_result.device_one_time_keys_count: - response[ - "device_one_time_keys_count" - ] = sync_result.device_one_time_keys_count - if sync_result.device_unused_fallback_key_types: - response[ - "org.matrix.msc2732.device_unused_fallback_key_types" - ] = sync_result.device_unused_fallback_key_types - - if joined: - response["rooms"]["join"] = joined - if invited: - response["rooms"]["invite"] = invited - if archived: - response["rooms"]["leave"] = archived - - if sync_result.groups.join: - response["groups"]["join"] = sync_result.groups.join - if sync_result.groups.invite: - response["groups"]["invite"] = sync_result.groups.invite - if sync_result.groups.leave: - response["groups"]["leave"] = sync_result.groups.leave - - return response + return { + "account_data": {"events": sync_result.account_data}, + "to_device": {"events": sync_result.to_device}, + "device_lists": { + "changed": list(sync_result.device_lists.changed), + "left": list(sync_result.device_lists.left), + }, + "presence": SyncRestServlet.encode_presence(sync_result.presence, time_now), + "rooms": {"join": joined, "invite": invited, "leave": archived}, + "groups": { + "join": sync_result.groups.join, + "invite": sync_result.groups.invite, + "leave": sync_result.groups.leave, + }, + "device_one_time_keys_count": sync_result.device_one_time_keys_count, + "org.matrix.msc2732.device_unused_fallback_key_types": sync_result.device_unused_fallback_key_types, + "next_batch": await sync_result.next_batch.to_string(self.store), + } @staticmethod def encode_presence(events, time_now): diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 74be5176d054..dbcbdf159a1a 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -37,7 +37,35 @@ def test_sync_argless(self): channel = self.make_request("GET", "/sync") self.assertEqual(channel.code, 200) - self.assertIn("next_batch", channel.json_body) + self.assertTrue( + { + "next_batch", + "rooms", + "presence", + "account_data", + "to_device", + "device_lists", + }.issubset(set(channel.json_body.keys())) + ) + + def test_sync_presence_disabled(self): + """ + When presence is disabled, the key does not appear in /sync. + """ + self.hs.config.use_presence = False + + channel = self.make_request("GET", "/sync") + + self.assertEqual(channel.code, 200) + self.assertTrue( + { + "next_batch", + "rooms", + "account_data", + "to_device", + "device_lists", + }.issubset(set(channel.json_body.keys())) + ) class SyncFilterTestCase(unittest.HomeserverTestCase): diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 3245aa91ca6e..d46521ccdc0f 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -306,9 +306,8 @@ def test_no_invite_without_notice(self): channel = self.make_request("GET", "/sync?timeout=0", access_token=tok) - self.assertNotIn( - "rooms", channel.json_body, "Got invites without server notice" - ) + invites = channel.json_body["rooms"]["invite"] + self.assertEqual(len(invites), 0, invites) def test_invite_with_notice(self): """Tests that, if the MAU limit is hit, the server notices user invites each user @@ -365,8 +364,7 @@ def _trigger_notice_and_join(self): # We could also pick another user and sync with it, which would return an # invite to a system notices room, but it doesn't matter which user we're # using so we use the last one because it saves us an extra sync. - if "rooms" in channel.json_body: - invites = channel.json_body["rooms"]["invite"] + invites = channel.json_body["rooms"]["invite"] # Make sure we have an invite to process. self.assertEqual(len(invites), 1, invites) From 8771b1337da9faa3b60cf0ec0a128a7de856f19e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 May 2021 15:54:07 +0100 Subject: [PATCH 115/222] Export jemalloc stats to prometheus when used (#9882) --- changelog.d/9882.misc | 1 + synapse/app/_base.py | 2 + synapse/metrics/__init__.py | 1 + synapse/metrics/jemalloc.py | 196 ++++++++++++++++++++++++++++++++++++ 4 files changed, 200 insertions(+) create mode 100644 changelog.d/9882.misc create mode 100644 synapse/metrics/jemalloc.py diff --git a/changelog.d/9882.misc b/changelog.d/9882.misc new file mode 100644 index 000000000000..facfa31f38ad --- /dev/null +++ b/changelog.d/9882.misc @@ -0,0 +1 @@ +Export jemalloc stats to Prometheus if it is being used. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 638e01c1b2e5..59918d789ec5 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -37,6 +37,7 @@ from synapse.crypto import context_factory from synapse.logging.context import PreserveLoggingContext from synapse.metrics.background_process_metrics import wrap_as_background_process +from synapse.metrics.jemalloc import setup_jemalloc_stats from synapse.util.async_helpers import Linearizer from synapse.util.daemonize import daemonize_process from synapse.util.rlimit import change_resource_limit @@ -115,6 +116,7 @@ def start_reactor( def run(): logger.info("Running") + setup_jemalloc_stats() change_resource_limit(soft_file_limit) if gc_thresholds: gc.set_threshold(*gc_thresholds) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index e671da26d51d..fef28466694a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -629,6 +629,7 @@ def f(*args, **kwargs): except AttributeError: pass + __all__ = [ "MetricsResource", "generate_latest", diff --git a/synapse/metrics/jemalloc.py b/synapse/metrics/jemalloc.py new file mode 100644 index 000000000000..29ab6c0229df --- /dev/null +++ b/synapse/metrics/jemalloc.py @@ -0,0 +1,196 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ctypes +import logging +import os +import re +from typing import Optional + +from synapse.metrics import REGISTRY, GaugeMetricFamily + +logger = logging.getLogger(__name__) + + +def _setup_jemalloc_stats(): + """Checks to see if jemalloc is loaded, and hooks up a collector to record + statistics exposed by jemalloc. + """ + + # Try to find the loaded jemalloc shared library, if any. We need to + # introspect into what is loaded, rather than loading whatever is on the + # path, as if we load a *different* jemalloc version things will seg fault. + + # We look in `/proc/self/maps`, which only exists on linux. + if not os.path.exists("/proc/self/maps"): + logger.debug("Not looking for jemalloc as no /proc/self/maps exist") + return + + # We're looking for a path at the end of the line that includes + # "libjemalloc". + regex = re.compile(r"/\S+/libjemalloc.*$") + + jemalloc_path = None + with open("/proc/self/maps") as f: + for line in f: + match = regex.search(line.strip()) + if match: + jemalloc_path = match.group() + + if not jemalloc_path: + # No loaded jemalloc was found. + logger.debug("jemalloc not found") + return + + logger.debug("Found jemalloc at %s", jemalloc_path) + + jemalloc = ctypes.CDLL(jemalloc_path) + + def _mallctl( + name: str, read: bool = True, write: Optional[int] = None + ) -> Optional[int]: + """Wrapper around `mallctl` for reading and writing integers to + jemalloc. + + Args: + name: The name of the option to read from/write to. + read: Whether to try and read the value. + write: The value to write, if given. + + Returns: + The value read if `read` is True, otherwise None. + + Raises: + An exception if `mallctl` returns a non-zero error code. + """ + + input_var = None + input_var_ref = None + input_len_ref = None + if read: + input_var = ctypes.c_size_t(0) + input_len = ctypes.c_size_t(ctypes.sizeof(input_var)) + + input_var_ref = ctypes.byref(input_var) + input_len_ref = ctypes.byref(input_len) + + write_var_ref = None + write_len = ctypes.c_size_t(0) + if write is not None: + write_var = ctypes.c_size_t(write) + write_len = ctypes.c_size_t(ctypes.sizeof(write_var)) + + write_var_ref = ctypes.byref(write_var) + + # The interface is: + # + # int mallctl( + # const char *name, + # void *oldp, + # size_t *oldlenp, + # void *newp, + # size_t newlen + # ) + # + # Where oldp/oldlenp is a buffer where the old value will be written to + # (if not null), and newp/newlen is the buffer with the new value to set + # (if not null). Note that they're all references *except* newlen. + result = jemalloc.mallctl( + name.encode("ascii"), + input_var_ref, + input_len_ref, + write_var_ref, + write_len, + ) + + if result != 0: + raise Exception("Failed to call mallctl") + + if input_var is None: + return None + + return input_var.value + + def _jemalloc_refresh_stats() -> None: + """Request that jemalloc updates its internal statistics. This needs to + be called before querying for stats, otherwise it will return stale + values. + """ + try: + _mallctl("epoch", read=False, write=1) + except Exception as e: + logger.warning("Failed to reload jemalloc stats: %s", e) + + class JemallocCollector: + """Metrics for internal jemalloc stats.""" + + def collect(self): + _jemalloc_refresh_stats() + + g = GaugeMetricFamily( + "jemalloc_stats_app_memory_bytes", + "The stats reported by jemalloc", + labels=["type"], + ) + + # Read the relevant global stats from jemalloc. Note that these may + # not be accurate if python is configured to use its internal small + # object allocator (which is on by default, disable by setting the + # env `PYTHONMALLOC=malloc`). + # + # See the jemalloc manpage for details about what each value means, + # roughly: + # - allocated ─ Total number of bytes allocated by the app + # - active ─ Total number of bytes in active pages allocated by + # the application, this is bigger than `allocated`. + # - resident ─ Maximum number of bytes in physically resident data + # pages mapped by the allocator, comprising all pages dedicated + # to allocator metadata, pages backing active allocations, and + # unused dirty pages. This is bigger than `active`. + # - mapped ─ Total number of bytes in active extents mapped by the + # allocator. + # - metadata ─ Total number of bytes dedicated to jemalloc + # metadata. + for t in ( + "allocated", + "active", + "resident", + "mapped", + "metadata", + ): + try: + value = _mallctl(f"stats.{t}") + except Exception as e: + # There was an error fetching the value, skip. + logger.warning("Failed to read jemalloc stats.%s: %s", t, e) + continue + + g.add_metric([t], value=value) + + yield g + + REGISTRY.register(JemallocCollector()) + + logger.debug("Added jemalloc stats") + + +def setup_jemalloc_stats(): + """Try to setup jemalloc stats, if jemalloc is loaded.""" + + try: + _setup_jemalloc_stats() + except Exception as e: + # This should only happen if we find the loaded jemalloc library, but + # fail to load it somehow (e.g. we somehow picked the wrong version). + logger.info("Failed to setup collector to record jemalloc stats: %s", e) From 25f43faa70f7cc58493b636c2702ae63395779dc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 May 2021 10:22:05 +0100 Subject: [PATCH 116/222] Reorganise the database schema directories (#9932) The hope here is that by moving all the schema files into synapse/storage/schema, it gets a bit easier for newcomers to navigate. It certainly got easier for me to write a helpful README. There's more to do on that front, but I'll follow up with other PRs for that. --- changelog.d/9932.misc | 1 + .../main/schema/full_schemas/README.md | 21 -------- synapse/storage/prepare_database.py | 48 ++++++++++--------- synapse/storage/schema/README.md | 37 ++++++++++++++ synapse/storage/schema/__init__.py | 17 +++++++ .../delta/25/00background_updates.sql | 0 .../delta/35/00background_updates_add_col.sql | 0 .../delta/58/00background_update_ordering.sql | 0 .../{ => common}/full_schemas/54/full.sql | 0 .../schema/{ => common}/schema_version.sql | 0 .../schema => schema/main}/delta/12/v12.sql | 0 .../schema => schema/main}/delta/13/v13.sql | 0 .../schema => schema/main}/delta/14/v14.sql | 0 .../main}/delta/15/appservice_txns.sql | 0 .../main}/delta/15/presence_indices.sql | 0 .../schema => schema/main}/delta/15/v15.sql | 0 .../main}/delta/16/events_order_index.sql | 0 .../delta/16/remote_media_cache_index.sql | 0 .../main}/delta/16/remove_duplicates.sql | 0 .../main}/delta/16/room_alias_index.sql | 0 .../main}/delta/16/unique_constraints.sql | 0 .../schema => schema/main}/delta/16/users.sql | 0 .../main}/delta/17/drop_indexes.sql | 0 .../main}/delta/17/server_keys.sql | 0 .../main}/delta/17/user_threepids.sql | 0 .../delta/18/server_keys_bigger_ints.sql | 0 .../main}/delta/19/event_index.sql | 0 .../schema => schema/main}/delta/20/dummy.sql | 0 .../main}/delta/20/pushers.py | 0 .../main}/delta/21/end_to_end_keys.sql | 0 .../main}/delta/21/receipts.sql | 0 .../main}/delta/22/receipts_index.sql | 0 .../main}/delta/22/user_threepids_unique.sql | 0 .../main}/delta/24/stats_reporting.sql | 0 .../schema => schema/main}/delta/25/fts.py | 0 .../main}/delta/25/guest_access.sql | 0 .../main}/delta/25/history_visibility.sql | 0 .../schema => schema/main}/delta/25/tags.sql | 0 .../main}/delta/26/account_data.sql | 0 .../main}/delta/27/account_data.sql | 0 .../main}/delta/27/forgotten_memberships.sql | 0 .../schema => schema/main}/delta/27/ts.py | 0 .../main}/delta/28/event_push_actions.sql | 0 .../main}/delta/28/events_room_stream.sql | 0 .../main}/delta/28/public_roms_index.sql | 0 .../main}/delta/28/receipts_user_id_index.sql | 0 .../main}/delta/28/upgrade_times.sql | 0 .../main}/delta/28/users_is_guest.sql | 0 .../main}/delta/29/push_actions.sql | 0 .../main}/delta/30/alias_creator.sql | 0 .../main}/delta/30/as_users.py | 0 .../main}/delta/30/deleted_pushers.sql | 0 .../main}/delta/30/presence_stream.sql | 0 .../main}/delta/30/public_rooms.sql | 0 .../main}/delta/30/push_rule_stream.sql | 0 .../delta/30/threepid_guest_access_tokens.sql | 0 .../main}/delta/31/invites.sql | 0 .../31/local_media_repository_url_cache.sql | 0 .../main}/delta/31/pushers.py | 0 .../main}/delta/31/pushers_index.sql | 0 .../main}/delta/31/search_update.py | 0 .../main}/delta/32/events.sql | 0 .../main}/delta/32/openid.sql | 0 .../main}/delta/32/pusher_throttle.sql | 0 .../main}/delta/32/remove_indices.sql | 0 .../main}/delta/32/reports.sql | 0 .../delta/33/access_tokens_device_index.sql | 0 .../main}/delta/33/devices.sql | 0 .../main}/delta/33/devices_for_e2e_keys.sql | 0 ...ices_for_e2e_keys_clear_unknown_device.sql | 0 .../main}/delta/33/event_fields.py | 0 .../main}/delta/33/remote_media_ts.py | 0 .../main}/delta/33/user_ips_index.sql | 0 .../main}/delta/34/appservice_stream.sql | 0 .../main}/delta/34/cache_stream.py | 0 .../main}/delta/34/device_inbox.sql | 0 .../delta/34/push_display_name_rename.sql | 0 .../main}/delta/34/received_txn_purge.py | 0 .../main}/delta/35/contains_url.sql | 0 .../main}/delta/35/device_outbox.sql | 0 .../main}/delta/35/device_stream_id.sql | 0 .../delta/35/event_push_actions_index.sql | 0 .../35/public_room_list_change_stream.sql | 0 .../main}/delta/35/stream_order_to_extrem.sql | 0 .../main}/delta/36/readd_public_rooms.sql | 0 .../main}/delta/37/remove_auth_idx.py | 0 .../main}/delta/37/user_threepids.sql | 0 .../main}/delta/38/postgres_fts_gist.sql | 0 .../main}/delta/39/appservice_room_list.sql | 0 .../delta/39/device_federation_stream_idx.sql | 0 .../main}/delta/39/event_push_index.sql | 0 .../delta/39/federation_out_position.sql | 0 .../main}/delta/39/membership_profile.sql | 0 .../main}/delta/40/current_state_idx.sql | 0 .../main}/delta/40/device_inbox.sql | 0 .../main}/delta/40/device_list_streams.sql | 0 .../main}/delta/40/event_push_summary.sql | 0 .../main}/delta/40/pushers.sql | 0 .../main}/delta/41/device_list_stream_idx.sql | 0 .../main}/delta/41/device_outbound_index.sql | 0 .../delta/41/event_search_event_id_idx.sql | 0 .../main}/delta/41/ratelimit.sql | 0 .../main}/delta/42/current_state_delta.sql | 0 .../main}/delta/42/device_list_last_id.sql | 0 .../main}/delta/42/event_auth_state_only.sql | 0 .../main}/delta/42/user_dir.py | 0 .../main}/delta/43/blocked_rooms.sql | 0 .../main}/delta/43/quarantine_media.sql | 0 .../main}/delta/43/url_cache.sql | 0 .../main}/delta/43/user_share.sql | 0 .../main}/delta/44/expire_url_cache.sql | 0 .../main}/delta/45/group_server.sql | 0 .../main}/delta/45/profile_cache.sql | 0 .../main}/delta/46/drop_refresh_tokens.sql | 0 .../delta/46/drop_unique_deleted_pushers.sql | 0 .../main}/delta/46/group_server.sql | 0 .../46/local_media_repository_url_idx.sql | 0 .../main}/delta/46/user_dir_null_room_ids.sql | 0 .../main}/delta/46/user_dir_typos.sql | 0 .../main}/delta/47/last_access_media.sql | 0 .../main}/delta/47/postgres_fts_gin.sql | 0 .../main}/delta/47/push_actions_staging.sql | 0 .../main}/delta/48/add_user_consent.sql | 0 .../delta/48/add_user_ips_last_seen_index.sql | 0 .../main}/delta/48/deactivated_users.sql | 0 .../main}/delta/48/group_unique_indexes.py | 0 .../main}/delta/48/groups_joinable.sql | 0 .../add_user_consent_server_notice_sent.sql | 0 .../main}/delta/49/add_user_daily_visits.sql | 0 .../49/add_user_ips_last_seen_only_index.sql | 0 .../delta/50/add_creation_ts_users_index.sql | 0 .../main}/delta/50/erasure_store.sql | 0 .../delta/50/make_event_content_nullable.py | 0 .../main}/delta/51/e2e_room_keys.sql | 0 .../main}/delta/51/monthly_active_users.sql | 0 .../52/add_event_to_state_group_index.sql | 0 .../52/device_list_streams_unique_idx.sql | 0 .../main}/delta/52/e2e_room_keys.sql | 0 .../main}/delta/53/add_user_type_to_users.sql | 0 .../main}/delta/53/drop_sent_transactions.sql | 0 .../main}/delta/53/event_format_version.sql | 0 .../main}/delta/53/user_dir_populate.sql | 0 .../main}/delta/53/user_ips_index.sql | 0 .../main}/delta/53/user_share.sql | 0 .../main}/delta/53/user_threepid_id.sql | 0 .../main}/delta/53/users_in_public_rooms.sql | 0 .../54/account_validity_with_renewal.sql | 0 .../delta/54/add_validity_to_server_keys.sql | 0 .../delta/54/delete_forward_extremities.sql | 0 .../main}/delta/54/drop_legacy_tables.sql | 0 .../main}/delta/54/drop_presence_list.sql | 0 .../main}/delta/54/relations.sql | 0 .../schema => schema/main}/delta/54/stats.sql | 0 .../main}/delta/54/stats2.sql | 0 .../main}/delta/55/access_token_expiry.sql | 0 .../delta/55/track_threepid_validations.sql | 0 .../delta/55/users_alter_deactivated.sql | 0 .../delta/56/add_spans_to_device_lists.sql | 0 .../56/current_state_events_membership.sql | 0 .../current_state_events_membership_mk2.sql | 0 .../56/delete_keys_from_deleted_backups.sql | 0 .../delta/56/destinations_failure_ts.sql | 0 ...tinations_retry_interval_type.sql.postgres | 0 .../delta/56/device_stream_id_insert.sql | 0 .../main}/delta/56/devices_last_seen.sql | 0 .../delta/56/drop_unused_event_tables.sql | 0 .../main}/delta/56/event_expiry.sql | 0 .../main}/delta/56/event_labels.sql | 0 .../56/event_labels_background_update.sql | 0 .../main}/delta/56/fix_room_keys_index.sql | 0 .../main}/delta/56/hidden_devices.sql | 0 .../delta/56/hidden_devices_fix.sql.sqlite | 0 .../56/nuke_empty_communities_from_db.sql | 0 .../main}/delta/56/public_room_list_idx.sql | 0 .../main}/delta/56/redaction_censor.sql | 0 .../main}/delta/56/redaction_censor2.sql | 0 .../redaction_censor3_fix_update.sql.postgres | 0 .../main}/delta/56/redaction_censor4.sql | 0 ...remove_tombstoned_rooms_from_directory.sql | 0 .../main}/delta/56/room_key_etag.sql | 0 .../main}/delta/56/room_membership_idx.sql | 0 .../main}/delta/56/room_retention.sql | 0 .../main}/delta/56/signing_keys.sql | 0 .../56/signing_keys_nonunique_signatures.sql | 0 .../main}/delta/56/stats_separated.sql | 0 .../delta/56/unique_user_filter_index.py | 0 .../main}/delta/56/user_external_ids.sql | 0 .../delta/56/users_in_public_rooms_idx.sql | 0 .../57/delete_old_current_state_events.sql | 0 .../57/device_list_remote_cache_stale.sql | 0 .../delta/57/local_current_membership.py | 0 .../delta/57/remove_sent_outbound_pokes.sql | 0 .../main}/delta/57/rooms_version_column.sql | 0 .../57/rooms_version_column_2.sql.postgres | 0 .../57/rooms_version_column_2.sql.sqlite | 0 .../57/rooms_version_column_3.sql.postgres | 0 .../57/rooms_version_column_3.sql.sqlite | 0 .../delta/58/02remove_dup_outbound_pokes.sql | 0 .../main}/delta/58/03persist_ui_auth.sql | 0 .../delta/58/05cache_instance.sql.postgres | 0 .../main}/delta/58/06dlols_unique_idx.py | 0 ...ethod_to_thumbnail_constraint.sql.postgres | 0 ..._method_to_thumbnail_constraint.sql.sqlite | 0 .../main}/delta/58/07persist_ui_auth_ips.sql | 0 ...08_media_safe_from_quarantine.sql.postgres | 0 .../08_media_safe_from_quarantine.sql.sqlite | 0 .../main}/delta/58/09shadow_ban.sql | 0 .../10_pushrules_enabled_delete_obsolete.sql | 0 .../58/10drop_local_rejections_stream.sql | 0 .../58/10federation_pos_instance_name.sql | 0 .../main}/delta/58/11dehydration.sql | 0 .../main}/delta/58/11fallback.sql | 0 .../main}/delta/58/11user_id_seq.py | 0 .../main}/delta/58/12room_stats.sql | 0 .../58/13remove_presence_allow_inbound.sql | 0 .../main}/delta/58/14events_instance_name.sql | 0 .../58/14events_instance_name.sql.postgres | 0 .../delta/58/15_catchup_destination_rooms.sql | 0 .../main}/delta/58/15unread_count.sql | 0 .../58/16populate_stats_process_rooms_fix.sql | 0 .../delta/58/17_catchup_last_successful.sql | 0 .../main}/delta/58/18stream_positions.sql | 0 .../delta/58/19instance_map.sql.postgres | 0 .../main}/delta/58/19txn_id.sql | 0 .../delta/58/20instance_name_event_tables.sql | 0 .../main}/delta/58/20user_daily_visits.sql | 0 .../main}/delta/58/21as_device_stream.sql | 0 .../delta/58/21drop_device_max_stream_id.sql | 0 .../main}/delta/58/22puppet_token.sql | 0 .../delta/58/22users_have_local_media.sql | 0 .../delta/58/23e2e_cross_signing_keys_idx.sql | 0 .../delta/58/24drop_event_json_index.sql | 0 .../58/25user_external_ids_user_id_idx.sql | 0 .../58/26access_token_last_validated.sql | 0 .../main}/delta/58/27local_invites.sql | 0 .../58/28drop_last_used_column.sql.postgres | 0 .../58/28drop_last_used_column.sql.sqlite | 0 .../main}/delta/59/01ignored_user.py | 0 .../main}/delta/59/02shard_send_to_device.sql | 0 ...shard_send_to_device_sequence.sql.postgres | 0 .../main}/delta/59/04_event_auth_chains.sql | 0 .../59/04_event_auth_chains.sql.postgres | 0 .../main}/delta/59/04drop_account_data.sql | 0 .../main}/delta/59/05cache_invalidation.sql | 0 .../main}/delta/59/06chain_cover_index.sql | 0 .../main}/delta/59/06shard_account_data.sql | 0 .../59/06shard_account_data.sql.postgres | 0 .../delta/59/07shard_account_data_fix.sql | 0 ...elete_pushers_for_deactivated_accounts.sql | 0 .../main}/delta/59/08delete_stale_pushers.sql | 0 .../delta/59/09rejected_events_metadata.sql | 0 .../delta/59/10delete_purged_chain_cover.sql | 0 .../11drop_thumbnail_constraint.sql.postgres | 0 .../12account_validity_token_used_ts_ms.sql | 0 .../delta/59/12presence_stream_instance.sql | 0 ...2presence_stream_instance_seq.sql.postgres | 0 .../full_schemas/16/application_services.sql | 0 .../main}/full_schemas/16/event_edges.sql | 0 .../full_schemas/16/event_signatures.sql | 0 .../main}/full_schemas/16/im.sql | 0 .../main}/full_schemas/16/keys.sql | 0 .../full_schemas/16/media_repository.sql | 0 .../main}/full_schemas/16/presence.sql | 0 .../main}/full_schemas/16/profiles.sql | 0 .../main}/full_schemas/16/push.sql | 0 .../main}/full_schemas/16/redactions.sql | 0 .../main}/full_schemas/16/room_aliases.sql | 0 .../main}/full_schemas/16/state.sql | 0 .../main}/full_schemas/16/transactions.sql | 0 .../main}/full_schemas/16/users.sql | 0 .../main}/full_schemas/54/full.sql.postgres | 0 .../main}/full_schemas/54/full.sql.sqlite | 0 .../full_schemas/54/stream_positions.sql | 0 .../state}/delta/23/drop_state_index.sql | 0 .../state}/delta/30/state_stream.sql | 0 .../state}/delta/32/remove_state_indices.sql | 0 .../state}/delta/35/add_state_index.sql | 0 .../state}/delta/35/state.sql | 0 .../state}/delta/35/state_dedupe.sql | 0 .../state}/delta/47/state_group_seq.py | 0 .../state}/delta/56/state_group_room_idx.sql | 0 .../state}/full_schemas/54/full.sql | 0 .../full_schemas/54/sequence.sql.postgres | 0 tests/storage/test_cleanup_extrems.py | 4 +- 284 files changed, 81 insertions(+), 47 deletions(-) create mode 100644 changelog.d/9932.misc delete mode 100644 synapse/storage/databases/main/schema/full_schemas/README.md create mode 100644 synapse/storage/schema/README.md create mode 100644 synapse/storage/schema/__init__.py rename synapse/storage/schema/{ => common}/delta/25/00background_updates.sql (100%) rename synapse/storage/schema/{ => common}/delta/35/00background_updates_add_col.sql (100%) rename synapse/storage/schema/{ => common}/delta/58/00background_update_ordering.sql (100%) rename synapse/storage/schema/{ => common}/full_schemas/54/full.sql (100%) rename synapse/storage/schema/{ => common}/schema_version.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/12/v12.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/13/v13.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/14/v14.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/15/appservice_txns.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/15/presence_indices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/15/v15.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/events_order_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/remote_media_cache_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/remove_duplicates.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/room_alias_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/unique_constraints.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/16/users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/17/drop_indexes.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/17/server_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/17/user_threepids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/18/server_keys_bigger_ints.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/19/event_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/20/dummy.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/20/pushers.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/21/end_to_end_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/21/receipts.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/22/receipts_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/22/user_threepids_unique.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/24/stats_reporting.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/fts.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/guest_access.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/history_visibility.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/25/tags.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/26/account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/27/account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/27/forgotten_memberships.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/27/ts.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/event_push_actions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/events_room_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/public_roms_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/receipts_user_id_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/upgrade_times.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/28/users_is_guest.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/29/push_actions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/alias_creator.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/as_users.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/deleted_pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/presence_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/public_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/push_rule_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/30/threepid_guest_access_tokens.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/invites.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/local_media_repository_url_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/pushers.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/pushers_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/31/search_update.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/events.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/openid.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/pusher_throttle.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/remove_indices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/32/reports.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/access_tokens_device_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/devices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/devices_for_e2e_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/devices_for_e2e_keys_clear_unknown_device.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/event_fields.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/remote_media_ts.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/33/user_ips_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/appservice_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/cache_stream.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/device_inbox.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/push_display_name_rename.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/34/received_txn_purge.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/contains_url.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/device_outbox.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/device_stream_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/event_push_actions_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/public_room_list_change_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/35/stream_order_to_extrem.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/36/readd_public_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/37/remove_auth_idx.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/37/user_threepids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/38/postgres_fts_gist.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/appservice_room_list.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/device_federation_stream_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/event_push_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/federation_out_position.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/39/membership_profile.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/current_state_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/device_inbox.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/device_list_streams.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/event_push_summary.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/40/pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/device_list_stream_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/device_outbound_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/event_search_event_id_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/41/ratelimit.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/current_state_delta.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/device_list_last_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/event_auth_state_only.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/42/user_dir.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/blocked_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/quarantine_media.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/url_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/43/user_share.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/44/expire_url_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/45/group_server.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/45/profile_cache.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/drop_refresh_tokens.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/drop_unique_deleted_pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/group_server.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/local_media_repository_url_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/user_dir_null_room_ids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/46/user_dir_typos.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/47/last_access_media.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/47/postgres_fts_gin.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/47/push_actions_staging.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/add_user_consent.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/add_user_ips_last_seen_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/deactivated_users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/group_unique_indexes.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/48/groups_joinable.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/49/add_user_consent_server_notice_sent.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/49/add_user_daily_visits.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/49/add_user_ips_last_seen_only_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/50/add_creation_ts_users_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/50/erasure_store.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/50/make_event_content_nullable.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/51/e2e_room_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/51/monthly_active_users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/52/add_event_to_state_group_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/52/device_list_streams_unique_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/52/e2e_room_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/add_user_type_to_users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/drop_sent_transactions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/event_format_version.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_dir_populate.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_ips_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_share.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/user_threepid_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/53/users_in_public_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/account_validity_with_renewal.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/add_validity_to_server_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/delete_forward_extremities.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/drop_legacy_tables.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/drop_presence_list.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/relations.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/stats.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/54/stats2.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/55/access_token_expiry.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/55/track_threepid_validations.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/55/users_alter_deactivated.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/add_spans_to_device_lists.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/current_state_events_membership.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/current_state_events_membership_mk2.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/delete_keys_from_deleted_backups.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/destinations_failure_ts.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/destinations_retry_interval_type.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/device_stream_id_insert.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/devices_last_seen.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/drop_unused_event_tables.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/event_expiry.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/event_labels.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/event_labels_background_update.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/fix_room_keys_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/hidden_devices.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/hidden_devices_fix.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/nuke_empty_communities_from_db.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/public_room_list_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor2.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor3_fix_update.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/redaction_censor4.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/remove_tombstoned_rooms_from_directory.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/room_key_etag.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/room_membership_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/room_retention.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/signing_keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/signing_keys_nonunique_signatures.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/stats_separated.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/unique_user_filter_index.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/user_external_ids.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/56/users_in_public_rooms_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/delete_old_current_state_events.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/device_list_remote_cache_stale.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/local_current_membership.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/remove_sent_outbound_pokes.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_2.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_2.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_3.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/57/rooms_version_column_3.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/02remove_dup_outbound_pokes.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/03persist_ui_auth.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/05cache_instance.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/06dlols_unique_idx.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/07add_method_to_thumbnail_constraint.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/07persist_ui_auth_ips.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/08_media_safe_from_quarantine.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/08_media_safe_from_quarantine.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/09shadow_ban.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/10_pushrules_enabled_delete_obsolete.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/10drop_local_rejections_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/10federation_pos_instance_name.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/11dehydration.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/11fallback.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/11user_id_seq.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/12room_stats.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/13remove_presence_allow_inbound.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/14events_instance_name.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/14events_instance_name.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/15_catchup_destination_rooms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/15unread_count.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/16populate_stats_process_rooms_fix.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/17_catchup_last_successful.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/18stream_positions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/19instance_map.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/19txn_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/20instance_name_event_tables.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/20user_daily_visits.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/21as_device_stream.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/21drop_device_max_stream_id.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/22puppet_token.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/22users_have_local_media.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/23e2e_cross_signing_keys_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/24drop_event_json_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/25user_external_ids_user_id_idx.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/26access_token_last_validated.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/27local_invites.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/28drop_last_used_column.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/58/28drop_last_used_column.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/01ignored_user.py (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/02shard_send_to_device.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/03shard_send_to_device_sequence.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/04_event_auth_chains.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/04_event_auth_chains.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/04drop_account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/05cache_invalidation.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/06chain_cover_index.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/06shard_account_data.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/06shard_account_data.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/07shard_account_data_fix.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/08delete_pushers_for_deactivated_accounts.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/08delete_stale_pushers.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/09rejected_events_metadata.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/10delete_purged_chain_cover.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/11drop_thumbnail_constraint.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/12account_validity_token_used_ts_ms.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/12presence_stream_instance.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/delta/59/12presence_stream_instance_seq.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/application_services.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/event_edges.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/event_signatures.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/im.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/keys.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/media_repository.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/presence.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/profiles.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/push.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/redactions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/room_aliases.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/state.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/transactions.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/16/users.sql (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/54/full.sql.postgres (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/54/full.sql.sqlite (100%) rename synapse/storage/{databases/main/schema => schema/main}/full_schemas/54/stream_positions.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/23/drop_state_index.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/30/state_stream.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/32/remove_state_indices.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/35/add_state_index.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/35/state.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/35/state_dedupe.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/47/state_group_seq.py (100%) rename synapse/storage/{databases/state/schema => schema/state}/delta/56/state_group_room_idx.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/full_schemas/54/full.sql (100%) rename synapse/storage/{databases/state/schema => schema/state}/full_schemas/54/sequence.sql.postgres (100%) diff --git a/changelog.d/9932.misc b/changelog.d/9932.misc new file mode 100644 index 000000000000..9e16a3617320 --- /dev/null +++ b/changelog.d/9932.misc @@ -0,0 +1 @@ +Move database schema files into a common directory. diff --git a/synapse/storage/databases/main/schema/full_schemas/README.md b/synapse/storage/databases/main/schema/full_schemas/README.md deleted file mode 100644 index c00f28719091..000000000000 --- a/synapse/storage/databases/main/schema/full_schemas/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Synapse Database Schemas - -These schemas are used as a basis to create brand new Synapse databases, on both -SQLite3 and Postgres. - -## Building full schema dumps - -If you want to recreate these schemas, they need to be made from a database that -has had all background updates run. - -To do so, use `scripts-dev/make_full_schema.sh`. This will produce new -`full.sql.postgres ` and `full.sql.sqlite` files. - -Ensure postgres is installed and your user has the ability to run bash commands -such as `createdb`, then call - - ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ - -There are currently two folders with full-schema snapshots. `16` is a snapshot -from 2015, for historical reference. The other contains the most recent full -schema snapshot. diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 7a2cbee42600..3799d46734ae 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -26,16 +26,13 @@ from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import BaseDatabaseEngine from synapse.storage.engines.postgres import PostgresEngine +from synapse.storage.schema import SCHEMA_VERSION from synapse.storage.types import Cursor logger = logging.getLogger(__name__) -# Remember to update this number every time a change is made to database -# schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 59 - -dir_path = os.path.abspath(os.path.dirname(__file__)) +schema_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "schema") class PrepareDatabaseException(Exception): @@ -167,7 +164,14 @@ def _setup_new_database( Example directory structure: - schema/ + schema/ + common/ + delta/ + ... + full_schemas/ + 11/ + foo.sql + main/ delta/ ... full_schemas/ @@ -175,15 +179,14 @@ def _setup_new_database( test.sql ... 11/ - foo.sql bar.sql ... In the example foo.sql and bar.sql would be run, and then any delta files for versions strictly greater than 11. - Note: we apply the full schemas and deltas from the top level `schema/` - folder as well those in the data stores specified. + Note: we apply the full schemas and deltas from the `schema/common` + folder as well those in the databases specified. Args: cur: a database cursor @@ -195,12 +198,12 @@ def _setup_new_database( # configured to our liking. database_engine.check_new_database(cur) - current_dir = os.path.join(dir_path, "schema", "full_schemas") + full_schemas_dir = os.path.join(schema_path, "common", "full_schemas") # First we find the highest full schema version we have valid_versions = [] - for filename in os.listdir(current_dir): + for filename in os.listdir(full_schemas_dir): try: ver = int(filename) except ValueError: @@ -218,15 +221,13 @@ def _setup_new_database( logger.debug("Initialising schema v%d", max_current_ver) - # Now lets find all the full schema files, both in the global schema and - # in data store schemas. - directories = [os.path.join(current_dir, str(max_current_ver))] + # Now let's find all the full schema files, both in the common schema and + # in database schemas. + directories = [os.path.join(full_schemas_dir, str(max_current_ver))] directories.extend( os.path.join( - dir_path, - "databases", + schema_path, database, - "schema", "full_schemas", str(max_current_ver), ) @@ -357,6 +358,9 @@ def _upgrade_existing_database( check_database_before_upgrade(cur, database_engine, config) start_ver = current_version + + # if we got to this schema version by running a full_schema rather than a series + # of deltas, we should not run the deltas for this version. if not upgraded: start_ver += 1 @@ -385,12 +389,10 @@ def _upgrade_existing_database( # directories for schema updates. # First we find the directories to search in - delta_dir = os.path.join(dir_path, "schema", "delta", str(v)) + delta_dir = os.path.join(schema_path, "common", "delta", str(v)) directories = [delta_dir] for database in databases: - directories.append( - os.path.join(dir_path, "databases", database, "schema", "delta", str(v)) - ) + directories.append(os.path.join(schema_path, database, "delta", str(v))) # Used to check if we have any duplicate file names file_name_counter = Counter() # type: CounterType[str] @@ -621,8 +623,8 @@ def _get_or_create_schema_state( txn: Cursor, database_engine: BaseDatabaseEngine ) -> Optional[Tuple[int, List[str], bool]]: # Bluntly try creating the schema_version tables. - schema_path = os.path.join(dir_path, "schema", "schema_version.sql") - executescript(txn, schema_path) + sql_path = os.path.join(schema_path, "common", "schema_version.sql") + executescript(txn, sql_path) txn.execute("SELECT version, upgraded FROM schema_version") row = txn.fetchone() diff --git a/synapse/storage/schema/README.md b/synapse/storage/schema/README.md new file mode 100644 index 000000000000..030153db64a8 --- /dev/null +++ b/synapse/storage/schema/README.md @@ -0,0 +1,37 @@ +# Synapse Database Schemas + +This directory contains the schema files used to build Synapse databases. + +Synapse supports splitting its datastore across multiple physical databases (which can +be useful for large installations), and the schema files are therefore split according +to the logical database they are apply to. + +At the time of writing, the following "logical" databases are supported: + +* `state` - used to store Matrix room state (more specifically, `state_groups`, + their relationships and contents.) +* `main` - stores everything else. + +Addionally, the `common` directory contains schema files for tables which must be +present on *all* physical databases. + +## Full schema dumps + +In the `full_schemas` directories, only the most recently-numbered snapshot is useful +(`54` at the time of writing). Older snapshots (eg, `16`) are present for historical +reference only. + +## Building full schema dumps + +If you want to recreate these schemas, they need to be made from a database that +has had all background updates run. + +To do so, use `scripts-dev/make_full_schema.sh`. This will produce new +`full.sql.postgres` and `full.sql.sqlite` files. + +Ensure postgres is installed, then run: + + ./scripts-dev/make_full_schema.sh -p postgres_username -o output_dir/ + +NB at the time of writing, this script predates the split into separate `state`/`main` +databases so will require updates to handle that correctly. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py new file mode 100644 index 000000000000..f0d9f2316762 --- /dev/null +++ b/synapse/storage/schema/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Remember to update this number every time a change is made to database +# schema files, so the users will be informed on server restarts. +SCHEMA_VERSION = 59 diff --git a/synapse/storage/schema/delta/25/00background_updates.sql b/synapse/storage/schema/common/delta/25/00background_updates.sql similarity index 100% rename from synapse/storage/schema/delta/25/00background_updates.sql rename to synapse/storage/schema/common/delta/25/00background_updates.sql diff --git a/synapse/storage/schema/delta/35/00background_updates_add_col.sql b/synapse/storage/schema/common/delta/35/00background_updates_add_col.sql similarity index 100% rename from synapse/storage/schema/delta/35/00background_updates_add_col.sql rename to synapse/storage/schema/common/delta/35/00background_updates_add_col.sql diff --git a/synapse/storage/schema/delta/58/00background_update_ordering.sql b/synapse/storage/schema/common/delta/58/00background_update_ordering.sql similarity index 100% rename from synapse/storage/schema/delta/58/00background_update_ordering.sql rename to synapse/storage/schema/common/delta/58/00background_update_ordering.sql diff --git a/synapse/storage/schema/full_schemas/54/full.sql b/synapse/storage/schema/common/full_schemas/54/full.sql similarity index 100% rename from synapse/storage/schema/full_schemas/54/full.sql rename to synapse/storage/schema/common/full_schemas/54/full.sql diff --git a/synapse/storage/schema/schema_version.sql b/synapse/storage/schema/common/schema_version.sql similarity index 100% rename from synapse/storage/schema/schema_version.sql rename to synapse/storage/schema/common/schema_version.sql diff --git a/synapse/storage/databases/main/schema/delta/12/v12.sql b/synapse/storage/schema/main/delta/12/v12.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/12/v12.sql rename to synapse/storage/schema/main/delta/12/v12.sql diff --git a/synapse/storage/databases/main/schema/delta/13/v13.sql b/synapse/storage/schema/main/delta/13/v13.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/13/v13.sql rename to synapse/storage/schema/main/delta/13/v13.sql diff --git a/synapse/storage/databases/main/schema/delta/14/v14.sql b/synapse/storage/schema/main/delta/14/v14.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/14/v14.sql rename to synapse/storage/schema/main/delta/14/v14.sql diff --git a/synapse/storage/databases/main/schema/delta/15/appservice_txns.sql b/synapse/storage/schema/main/delta/15/appservice_txns.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/appservice_txns.sql rename to synapse/storage/schema/main/delta/15/appservice_txns.sql diff --git a/synapse/storage/databases/main/schema/delta/15/presence_indices.sql b/synapse/storage/schema/main/delta/15/presence_indices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/presence_indices.sql rename to synapse/storage/schema/main/delta/15/presence_indices.sql diff --git a/synapse/storage/databases/main/schema/delta/15/v15.sql b/synapse/storage/schema/main/delta/15/v15.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/15/v15.sql rename to synapse/storage/schema/main/delta/15/v15.sql diff --git a/synapse/storage/databases/main/schema/delta/16/events_order_index.sql b/synapse/storage/schema/main/delta/16/events_order_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/events_order_index.sql rename to synapse/storage/schema/main/delta/16/events_order_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql b/synapse/storage/schema/main/delta/16/remote_media_cache_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/remote_media_cache_index.sql rename to synapse/storage/schema/main/delta/16/remote_media_cache_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql b/synapse/storage/schema/main/delta/16/remove_duplicates.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/remove_duplicates.sql rename to synapse/storage/schema/main/delta/16/remove_duplicates.sql diff --git a/synapse/storage/databases/main/schema/delta/16/room_alias_index.sql b/synapse/storage/schema/main/delta/16/room_alias_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/room_alias_index.sql rename to synapse/storage/schema/main/delta/16/room_alias_index.sql diff --git a/synapse/storage/databases/main/schema/delta/16/unique_constraints.sql b/synapse/storage/schema/main/delta/16/unique_constraints.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/unique_constraints.sql rename to synapse/storage/schema/main/delta/16/unique_constraints.sql diff --git a/synapse/storage/databases/main/schema/delta/16/users.sql b/synapse/storage/schema/main/delta/16/users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/16/users.sql rename to synapse/storage/schema/main/delta/16/users.sql diff --git a/synapse/storage/databases/main/schema/delta/17/drop_indexes.sql b/synapse/storage/schema/main/delta/17/drop_indexes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/drop_indexes.sql rename to synapse/storage/schema/main/delta/17/drop_indexes.sql diff --git a/synapse/storage/databases/main/schema/delta/17/server_keys.sql b/synapse/storage/schema/main/delta/17/server_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/server_keys.sql rename to synapse/storage/schema/main/delta/17/server_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/17/user_threepids.sql b/synapse/storage/schema/main/delta/17/user_threepids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/17/user_threepids.sql rename to synapse/storage/schema/main/delta/17/user_threepids.sql diff --git a/synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql b/synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/18/server_keys_bigger_ints.sql rename to synapse/storage/schema/main/delta/18/server_keys_bigger_ints.sql diff --git a/synapse/storage/databases/main/schema/delta/19/event_index.sql b/synapse/storage/schema/main/delta/19/event_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/19/event_index.sql rename to synapse/storage/schema/main/delta/19/event_index.sql diff --git a/synapse/storage/databases/main/schema/delta/20/dummy.sql b/synapse/storage/schema/main/delta/20/dummy.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/20/dummy.sql rename to synapse/storage/schema/main/delta/20/dummy.sql diff --git a/synapse/storage/databases/main/schema/delta/20/pushers.py b/synapse/storage/schema/main/delta/20/pushers.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/20/pushers.py rename to synapse/storage/schema/main/delta/20/pushers.py diff --git a/synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql b/synapse/storage/schema/main/delta/21/end_to_end_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/21/end_to_end_keys.sql rename to synapse/storage/schema/main/delta/21/end_to_end_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/21/receipts.sql b/synapse/storage/schema/main/delta/21/receipts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/21/receipts.sql rename to synapse/storage/schema/main/delta/21/receipts.sql diff --git a/synapse/storage/databases/main/schema/delta/22/receipts_index.sql b/synapse/storage/schema/main/delta/22/receipts_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/22/receipts_index.sql rename to synapse/storage/schema/main/delta/22/receipts_index.sql diff --git a/synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql b/synapse/storage/schema/main/delta/22/user_threepids_unique.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/22/user_threepids_unique.sql rename to synapse/storage/schema/main/delta/22/user_threepids_unique.sql diff --git a/synapse/storage/databases/main/schema/delta/24/stats_reporting.sql b/synapse/storage/schema/main/delta/24/stats_reporting.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/24/stats_reporting.sql rename to synapse/storage/schema/main/delta/24/stats_reporting.sql diff --git a/synapse/storage/databases/main/schema/delta/25/fts.py b/synapse/storage/schema/main/delta/25/fts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/fts.py rename to synapse/storage/schema/main/delta/25/fts.py diff --git a/synapse/storage/databases/main/schema/delta/25/guest_access.sql b/synapse/storage/schema/main/delta/25/guest_access.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/guest_access.sql rename to synapse/storage/schema/main/delta/25/guest_access.sql diff --git a/synapse/storage/databases/main/schema/delta/25/history_visibility.sql b/synapse/storage/schema/main/delta/25/history_visibility.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/history_visibility.sql rename to synapse/storage/schema/main/delta/25/history_visibility.sql diff --git a/synapse/storage/databases/main/schema/delta/25/tags.sql b/synapse/storage/schema/main/delta/25/tags.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/25/tags.sql rename to synapse/storage/schema/main/delta/25/tags.sql diff --git a/synapse/storage/databases/main/schema/delta/26/account_data.sql b/synapse/storage/schema/main/delta/26/account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/26/account_data.sql rename to synapse/storage/schema/main/delta/26/account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/27/account_data.sql b/synapse/storage/schema/main/delta/27/account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/account_data.sql rename to synapse/storage/schema/main/delta/27/account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql b/synapse/storage/schema/main/delta/27/forgotten_memberships.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/forgotten_memberships.sql rename to synapse/storage/schema/main/delta/27/forgotten_memberships.sql diff --git a/synapse/storage/databases/main/schema/delta/27/ts.py b/synapse/storage/schema/main/delta/27/ts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/27/ts.py rename to synapse/storage/schema/main/delta/27/ts.py diff --git a/synapse/storage/databases/main/schema/delta/28/event_push_actions.sql b/synapse/storage/schema/main/delta/28/event_push_actions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/event_push_actions.sql rename to synapse/storage/schema/main/delta/28/event_push_actions.sql diff --git a/synapse/storage/databases/main/schema/delta/28/events_room_stream.sql b/synapse/storage/schema/main/delta/28/events_room_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/events_room_stream.sql rename to synapse/storage/schema/main/delta/28/events_room_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/28/public_roms_index.sql b/synapse/storage/schema/main/delta/28/public_roms_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/public_roms_index.sql rename to synapse/storage/schema/main/delta/28/public_roms_index.sql diff --git a/synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql b/synapse/storage/schema/main/delta/28/receipts_user_id_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/receipts_user_id_index.sql rename to synapse/storage/schema/main/delta/28/receipts_user_id_index.sql diff --git a/synapse/storage/databases/main/schema/delta/28/upgrade_times.sql b/synapse/storage/schema/main/delta/28/upgrade_times.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/upgrade_times.sql rename to synapse/storage/schema/main/delta/28/upgrade_times.sql diff --git a/synapse/storage/databases/main/schema/delta/28/users_is_guest.sql b/synapse/storage/schema/main/delta/28/users_is_guest.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/28/users_is_guest.sql rename to synapse/storage/schema/main/delta/28/users_is_guest.sql diff --git a/synapse/storage/databases/main/schema/delta/29/push_actions.sql b/synapse/storage/schema/main/delta/29/push_actions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/29/push_actions.sql rename to synapse/storage/schema/main/delta/29/push_actions.sql diff --git a/synapse/storage/databases/main/schema/delta/30/alias_creator.sql b/synapse/storage/schema/main/delta/30/alias_creator.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/alias_creator.sql rename to synapse/storage/schema/main/delta/30/alias_creator.sql diff --git a/synapse/storage/databases/main/schema/delta/30/as_users.py b/synapse/storage/schema/main/delta/30/as_users.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/as_users.py rename to synapse/storage/schema/main/delta/30/as_users.py diff --git a/synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql b/synapse/storage/schema/main/delta/30/deleted_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/deleted_pushers.sql rename to synapse/storage/schema/main/delta/30/deleted_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/30/presence_stream.sql b/synapse/storage/schema/main/delta/30/presence_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/presence_stream.sql rename to synapse/storage/schema/main/delta/30/presence_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/30/public_rooms.sql b/synapse/storage/schema/main/delta/30/public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/public_rooms.sql rename to synapse/storage/schema/main/delta/30/public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql b/synapse/storage/schema/main/delta/30/push_rule_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/push_rule_stream.sql rename to synapse/storage/schema/main/delta/30/push_rule_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql b/synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/30/threepid_guest_access_tokens.sql rename to synapse/storage/schema/main/delta/30/threepid_guest_access_tokens.sql diff --git a/synapse/storage/databases/main/schema/delta/31/invites.sql b/synapse/storage/schema/main/delta/31/invites.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/invites.sql rename to synapse/storage/schema/main/delta/31/invites.sql diff --git a/synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql b/synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/local_media_repository_url_cache.sql rename to synapse/storage/schema/main/delta/31/local_media_repository_url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/31/pushers.py b/synapse/storage/schema/main/delta/31/pushers.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/pushers.py rename to synapse/storage/schema/main/delta/31/pushers.py diff --git a/synapse/storage/databases/main/schema/delta/31/pushers_index.sql b/synapse/storage/schema/main/delta/31/pushers_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/pushers_index.sql rename to synapse/storage/schema/main/delta/31/pushers_index.sql diff --git a/synapse/storage/databases/main/schema/delta/31/search_update.py b/synapse/storage/schema/main/delta/31/search_update.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/31/search_update.py rename to synapse/storage/schema/main/delta/31/search_update.py diff --git a/synapse/storage/databases/main/schema/delta/32/events.sql b/synapse/storage/schema/main/delta/32/events.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/events.sql rename to synapse/storage/schema/main/delta/32/events.sql diff --git a/synapse/storage/databases/main/schema/delta/32/openid.sql b/synapse/storage/schema/main/delta/32/openid.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/openid.sql rename to synapse/storage/schema/main/delta/32/openid.sql diff --git a/synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql b/synapse/storage/schema/main/delta/32/pusher_throttle.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/pusher_throttle.sql rename to synapse/storage/schema/main/delta/32/pusher_throttle.sql diff --git a/synapse/storage/databases/main/schema/delta/32/remove_indices.sql b/synapse/storage/schema/main/delta/32/remove_indices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/remove_indices.sql rename to synapse/storage/schema/main/delta/32/remove_indices.sql diff --git a/synapse/storage/databases/main/schema/delta/32/reports.sql b/synapse/storage/schema/main/delta/32/reports.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/32/reports.sql rename to synapse/storage/schema/main/delta/32/reports.sql diff --git a/synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql b/synapse/storage/schema/main/delta/33/access_tokens_device_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/access_tokens_device_index.sql rename to synapse/storage/schema/main/delta/33/access_tokens_device_index.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices.sql b/synapse/storage/schema/main/delta/33/devices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices.sql rename to synapse/storage/schema/main/delta/33/devices.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql b/synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys.sql rename to synapse/storage/schema/main/delta/33/devices_for_e2e_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql b/synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/devices_for_e2e_keys_clear_unknown_device.sql rename to synapse/storage/schema/main/delta/33/devices_for_e2e_keys_clear_unknown_device.sql diff --git a/synapse/storage/databases/main/schema/delta/33/event_fields.py b/synapse/storage/schema/main/delta/33/event_fields.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/event_fields.py rename to synapse/storage/schema/main/delta/33/event_fields.py diff --git a/synapse/storage/databases/main/schema/delta/33/remote_media_ts.py b/synapse/storage/schema/main/delta/33/remote_media_ts.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/remote_media_ts.py rename to synapse/storage/schema/main/delta/33/remote_media_ts.py diff --git a/synapse/storage/databases/main/schema/delta/33/user_ips_index.sql b/synapse/storage/schema/main/delta/33/user_ips_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/33/user_ips_index.sql rename to synapse/storage/schema/main/delta/33/user_ips_index.sql diff --git a/synapse/storage/databases/main/schema/delta/34/appservice_stream.sql b/synapse/storage/schema/main/delta/34/appservice_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/appservice_stream.sql rename to synapse/storage/schema/main/delta/34/appservice_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/34/cache_stream.py b/synapse/storage/schema/main/delta/34/cache_stream.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/cache_stream.py rename to synapse/storage/schema/main/delta/34/cache_stream.py diff --git a/synapse/storage/databases/main/schema/delta/34/device_inbox.sql b/synapse/storage/schema/main/delta/34/device_inbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/device_inbox.sql rename to synapse/storage/schema/main/delta/34/device_inbox.sql diff --git a/synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql b/synapse/storage/schema/main/delta/34/push_display_name_rename.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/push_display_name_rename.sql rename to synapse/storage/schema/main/delta/34/push_display_name_rename.sql diff --git a/synapse/storage/databases/main/schema/delta/34/received_txn_purge.py b/synapse/storage/schema/main/delta/34/received_txn_purge.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/34/received_txn_purge.py rename to synapse/storage/schema/main/delta/34/received_txn_purge.py diff --git a/synapse/storage/databases/main/schema/delta/35/contains_url.sql b/synapse/storage/schema/main/delta/35/contains_url.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/contains_url.sql rename to synapse/storage/schema/main/delta/35/contains_url.sql diff --git a/synapse/storage/databases/main/schema/delta/35/device_outbox.sql b/synapse/storage/schema/main/delta/35/device_outbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/device_outbox.sql rename to synapse/storage/schema/main/delta/35/device_outbox.sql diff --git a/synapse/storage/databases/main/schema/delta/35/device_stream_id.sql b/synapse/storage/schema/main/delta/35/device_stream_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/device_stream_id.sql rename to synapse/storage/schema/main/delta/35/device_stream_id.sql diff --git a/synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql b/synapse/storage/schema/main/delta/35/event_push_actions_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/event_push_actions_index.sql rename to synapse/storage/schema/main/delta/35/event_push_actions_index.sql diff --git a/synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql b/synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/public_room_list_change_stream.sql rename to synapse/storage/schema/main/delta/35/public_room_list_change_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql b/synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/35/stream_order_to_extrem.sql rename to synapse/storage/schema/main/delta/35/stream_order_to_extrem.sql diff --git a/synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql b/synapse/storage/schema/main/delta/36/readd_public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/36/readd_public_rooms.sql rename to synapse/storage/schema/main/delta/36/readd_public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py b/synapse/storage/schema/main/delta/37/remove_auth_idx.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/37/remove_auth_idx.py rename to synapse/storage/schema/main/delta/37/remove_auth_idx.py diff --git a/synapse/storage/databases/main/schema/delta/37/user_threepids.sql b/synapse/storage/schema/main/delta/37/user_threepids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/37/user_threepids.sql rename to synapse/storage/schema/main/delta/37/user_threepids.sql diff --git a/synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql b/synapse/storage/schema/main/delta/38/postgres_fts_gist.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/38/postgres_fts_gist.sql rename to synapse/storage/schema/main/delta/38/postgres_fts_gist.sql diff --git a/synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql b/synapse/storage/schema/main/delta/39/appservice_room_list.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/appservice_room_list.sql rename to synapse/storage/schema/main/delta/39/appservice_room_list.sql diff --git a/synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql b/synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/device_federation_stream_idx.sql rename to synapse/storage/schema/main/delta/39/device_federation_stream_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/39/event_push_index.sql b/synapse/storage/schema/main/delta/39/event_push_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/event_push_index.sql rename to synapse/storage/schema/main/delta/39/event_push_index.sql diff --git a/synapse/storage/databases/main/schema/delta/39/federation_out_position.sql b/synapse/storage/schema/main/delta/39/federation_out_position.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/federation_out_position.sql rename to synapse/storage/schema/main/delta/39/federation_out_position.sql diff --git a/synapse/storage/databases/main/schema/delta/39/membership_profile.sql b/synapse/storage/schema/main/delta/39/membership_profile.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/39/membership_profile.sql rename to synapse/storage/schema/main/delta/39/membership_profile.sql diff --git a/synapse/storage/databases/main/schema/delta/40/current_state_idx.sql b/synapse/storage/schema/main/delta/40/current_state_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/current_state_idx.sql rename to synapse/storage/schema/main/delta/40/current_state_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/40/device_inbox.sql b/synapse/storage/schema/main/delta/40/device_inbox.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/device_inbox.sql rename to synapse/storage/schema/main/delta/40/device_inbox.sql diff --git a/synapse/storage/databases/main/schema/delta/40/device_list_streams.sql b/synapse/storage/schema/main/delta/40/device_list_streams.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/device_list_streams.sql rename to synapse/storage/schema/main/delta/40/device_list_streams.sql diff --git a/synapse/storage/databases/main/schema/delta/40/event_push_summary.sql b/synapse/storage/schema/main/delta/40/event_push_summary.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/event_push_summary.sql rename to synapse/storage/schema/main/delta/40/event_push_summary.sql diff --git a/synapse/storage/databases/main/schema/delta/40/pushers.sql b/synapse/storage/schema/main/delta/40/pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/40/pushers.sql rename to synapse/storage/schema/main/delta/40/pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql b/synapse/storage/schema/main/delta/41/device_list_stream_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/device_list_stream_idx.sql rename to synapse/storage/schema/main/delta/41/device_list_stream_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql b/synapse/storage/schema/main/delta/41/device_outbound_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/device_outbound_index.sql rename to synapse/storage/schema/main/delta/41/device_outbound_index.sql diff --git a/synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql b/synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/event_search_event_id_idx.sql rename to synapse/storage/schema/main/delta/41/event_search_event_id_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/41/ratelimit.sql b/synapse/storage/schema/main/delta/41/ratelimit.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/41/ratelimit.sql rename to synapse/storage/schema/main/delta/41/ratelimit.sql diff --git a/synapse/storage/databases/main/schema/delta/42/current_state_delta.sql b/synapse/storage/schema/main/delta/42/current_state_delta.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/current_state_delta.sql rename to synapse/storage/schema/main/delta/42/current_state_delta.sql diff --git a/synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql b/synapse/storage/schema/main/delta/42/device_list_last_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/device_list_last_id.sql rename to synapse/storage/schema/main/delta/42/device_list_last_id.sql diff --git a/synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql b/synapse/storage/schema/main/delta/42/event_auth_state_only.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/event_auth_state_only.sql rename to synapse/storage/schema/main/delta/42/event_auth_state_only.sql diff --git a/synapse/storage/databases/main/schema/delta/42/user_dir.py b/synapse/storage/schema/main/delta/42/user_dir.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/42/user_dir.py rename to synapse/storage/schema/main/delta/42/user_dir.py diff --git a/synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql b/synapse/storage/schema/main/delta/43/blocked_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/blocked_rooms.sql rename to synapse/storage/schema/main/delta/43/blocked_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/43/quarantine_media.sql b/synapse/storage/schema/main/delta/43/quarantine_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/quarantine_media.sql rename to synapse/storage/schema/main/delta/43/quarantine_media.sql diff --git a/synapse/storage/databases/main/schema/delta/43/url_cache.sql b/synapse/storage/schema/main/delta/43/url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/url_cache.sql rename to synapse/storage/schema/main/delta/43/url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/43/user_share.sql b/synapse/storage/schema/main/delta/43/user_share.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/43/user_share.sql rename to synapse/storage/schema/main/delta/43/user_share.sql diff --git a/synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql b/synapse/storage/schema/main/delta/44/expire_url_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/44/expire_url_cache.sql rename to synapse/storage/schema/main/delta/44/expire_url_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/45/group_server.sql b/synapse/storage/schema/main/delta/45/group_server.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/45/group_server.sql rename to synapse/storage/schema/main/delta/45/group_server.sql diff --git a/synapse/storage/databases/main/schema/delta/45/profile_cache.sql b/synapse/storage/schema/main/delta/45/profile_cache.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/45/profile_cache.sql rename to synapse/storage/schema/main/delta/45/profile_cache.sql diff --git a/synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql b/synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/drop_refresh_tokens.sql rename to synapse/storage/schema/main/delta/46/drop_refresh_tokens.sql diff --git a/synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql b/synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/drop_unique_deleted_pushers.sql rename to synapse/storage/schema/main/delta/46/drop_unique_deleted_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/46/group_server.sql b/synapse/storage/schema/main/delta/46/group_server.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/group_server.sql rename to synapse/storage/schema/main/delta/46/group_server.sql diff --git a/synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql b/synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/local_media_repository_url_idx.sql rename to synapse/storage/schema/main/delta/46/local_media_repository_url_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql b/synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/user_dir_null_room_ids.sql rename to synapse/storage/schema/main/delta/46/user_dir_null_room_ids.sql diff --git a/synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql b/synapse/storage/schema/main/delta/46/user_dir_typos.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/46/user_dir_typos.sql rename to synapse/storage/schema/main/delta/46/user_dir_typos.sql diff --git a/synapse/storage/databases/main/schema/delta/47/last_access_media.sql b/synapse/storage/schema/main/delta/47/last_access_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/last_access_media.sql rename to synapse/storage/schema/main/delta/47/last_access_media.sql diff --git a/synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql b/synapse/storage/schema/main/delta/47/postgres_fts_gin.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/postgres_fts_gin.sql rename to synapse/storage/schema/main/delta/47/postgres_fts_gin.sql diff --git a/synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql b/synapse/storage/schema/main/delta/47/push_actions_staging.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/47/push_actions_staging.sql rename to synapse/storage/schema/main/delta/47/push_actions_staging.sql diff --git a/synapse/storage/databases/main/schema/delta/48/add_user_consent.sql b/synapse/storage/schema/main/delta/48/add_user_consent.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/add_user_consent.sql rename to synapse/storage/schema/main/delta/48/add_user_consent.sql diff --git a/synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql b/synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/add_user_ips_last_seen_index.sql rename to synapse/storage/schema/main/delta/48/add_user_ips_last_seen_index.sql diff --git a/synapse/storage/databases/main/schema/delta/48/deactivated_users.sql b/synapse/storage/schema/main/delta/48/deactivated_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/deactivated_users.sql rename to synapse/storage/schema/main/delta/48/deactivated_users.sql diff --git a/synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py b/synapse/storage/schema/main/delta/48/group_unique_indexes.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/group_unique_indexes.py rename to synapse/storage/schema/main/delta/48/group_unique_indexes.py diff --git a/synapse/storage/databases/main/schema/delta/48/groups_joinable.sql b/synapse/storage/schema/main/delta/48/groups_joinable.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/48/groups_joinable.sql rename to synapse/storage/schema/main/delta/48/groups_joinable.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql b/synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_consent_server_notice_sent.sql rename to synapse/storage/schema/main/delta/49/add_user_consent_server_notice_sent.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql b/synapse/storage/schema/main/delta/49/add_user_daily_visits.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_daily_visits.sql rename to synapse/storage/schema/main/delta/49/add_user_daily_visits.sql diff --git a/synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql b/synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/49/add_user_ips_last_seen_only_index.sql rename to synapse/storage/schema/main/delta/49/add_user_ips_last_seen_only_index.sql diff --git a/synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql b/synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/add_creation_ts_users_index.sql rename to synapse/storage/schema/main/delta/50/add_creation_ts_users_index.sql diff --git a/synapse/storage/databases/main/schema/delta/50/erasure_store.sql b/synapse/storage/schema/main/delta/50/erasure_store.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/erasure_store.sql rename to synapse/storage/schema/main/delta/50/erasure_store.sql diff --git a/synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py b/synapse/storage/schema/main/delta/50/make_event_content_nullable.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/50/make_event_content_nullable.py rename to synapse/storage/schema/main/delta/50/make_event_content_nullable.py diff --git a/synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql b/synapse/storage/schema/main/delta/51/e2e_room_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/51/e2e_room_keys.sql rename to synapse/storage/schema/main/delta/51/e2e_room_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql b/synapse/storage/schema/main/delta/51/monthly_active_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/51/monthly_active_users.sql rename to synapse/storage/schema/main/delta/51/monthly_active_users.sql diff --git a/synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql b/synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/add_event_to_state_group_index.sql rename to synapse/storage/schema/main/delta/52/add_event_to_state_group_index.sql diff --git a/synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql b/synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/device_list_streams_unique_idx.sql rename to synapse/storage/schema/main/delta/52/device_list_streams_unique_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql b/synapse/storage/schema/main/delta/52/e2e_room_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/52/e2e_room_keys.sql rename to synapse/storage/schema/main/delta/52/e2e_room_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql b/synapse/storage/schema/main/delta/53/add_user_type_to_users.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/add_user_type_to_users.sql rename to synapse/storage/schema/main/delta/53/add_user_type_to_users.sql diff --git a/synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql b/synapse/storage/schema/main/delta/53/drop_sent_transactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/drop_sent_transactions.sql rename to synapse/storage/schema/main/delta/53/drop_sent_transactions.sql diff --git a/synapse/storage/databases/main/schema/delta/53/event_format_version.sql b/synapse/storage/schema/main/delta/53/event_format_version.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/event_format_version.sql rename to synapse/storage/schema/main/delta/53/event_format_version.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql b/synapse/storage/schema/main/delta/53/user_dir_populate.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql rename to synapse/storage/schema/main/delta/53/user_dir_populate.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_ips_index.sql b/synapse/storage/schema/main/delta/53/user_ips_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_ips_index.sql rename to synapse/storage/schema/main/delta/53/user_ips_index.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_share.sql b/synapse/storage/schema/main/delta/53/user_share.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_share.sql rename to synapse/storage/schema/main/delta/53/user_share.sql diff --git a/synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql b/synapse/storage/schema/main/delta/53/user_threepid_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/user_threepid_id.sql rename to synapse/storage/schema/main/delta/53/user_threepid_id.sql diff --git a/synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql b/synapse/storage/schema/main/delta/53/users_in_public_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/53/users_in_public_rooms.sql rename to synapse/storage/schema/main/delta/53/users_in_public_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql b/synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/account_validity_with_renewal.sql rename to synapse/storage/schema/main/delta/54/account_validity_with_renewal.sql diff --git a/synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql b/synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/add_validity_to_server_keys.sql rename to synapse/storage/schema/main/delta/54/add_validity_to_server_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/main/delta/54/delete_forward_extremities.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/delete_forward_extremities.sql rename to synapse/storage/schema/main/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql b/synapse/storage/schema/main/delta/54/drop_legacy_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/drop_legacy_tables.sql rename to synapse/storage/schema/main/delta/54/drop_legacy_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql b/synapse/storage/schema/main/delta/54/drop_presence_list.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/drop_presence_list.sql rename to synapse/storage/schema/main/delta/54/drop_presence_list.sql diff --git a/synapse/storage/databases/main/schema/delta/54/relations.sql b/synapse/storage/schema/main/delta/54/relations.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/relations.sql rename to synapse/storage/schema/main/delta/54/relations.sql diff --git a/synapse/storage/databases/main/schema/delta/54/stats.sql b/synapse/storage/schema/main/delta/54/stats.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/stats.sql rename to synapse/storage/schema/main/delta/54/stats.sql diff --git a/synapse/storage/databases/main/schema/delta/54/stats2.sql b/synapse/storage/schema/main/delta/54/stats2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/54/stats2.sql rename to synapse/storage/schema/main/delta/54/stats2.sql diff --git a/synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql b/synapse/storage/schema/main/delta/55/access_token_expiry.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/access_token_expiry.sql rename to synapse/storage/schema/main/delta/55/access_token_expiry.sql diff --git a/synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql b/synapse/storage/schema/main/delta/55/track_threepid_validations.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/track_threepid_validations.sql rename to synapse/storage/schema/main/delta/55/track_threepid_validations.sql diff --git a/synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql b/synapse/storage/schema/main/delta/55/users_alter_deactivated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/55/users_alter_deactivated.sql rename to synapse/storage/schema/main/delta/55/users_alter_deactivated.sql diff --git a/synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql b/synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/add_spans_to_device_lists.sql rename to synapse/storage/schema/main/delta/56/add_spans_to_device_lists.sql diff --git a/synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql b/synapse/storage/schema/main/delta/56/current_state_events_membership.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/current_state_events_membership.sql rename to synapse/storage/schema/main/delta/56/current_state_events_membership.sql diff --git a/synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql b/synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/current_state_events_membership_mk2.sql rename to synapse/storage/schema/main/delta/56/current_state_events_membership_mk2.sql diff --git a/synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql b/synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/delete_keys_from_deleted_backups.sql rename to synapse/storage/schema/main/delta/56/delete_keys_from_deleted_backups.sql diff --git a/synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql b/synapse/storage/schema/main/delta/56/destinations_failure_ts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/destinations_failure_ts.sql rename to synapse/storage/schema/main/delta/56/destinations_failure_ts.sql diff --git a/synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres b/synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/destinations_retry_interval_type.sql.postgres rename to synapse/storage/schema/main/delta/56/destinations_retry_interval_type.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql b/synapse/storage/schema/main/delta/56/device_stream_id_insert.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/device_stream_id_insert.sql rename to synapse/storage/schema/main/delta/56/device_stream_id_insert.sql diff --git a/synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql b/synapse/storage/schema/main/delta/56/devices_last_seen.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/devices_last_seen.sql rename to synapse/storage/schema/main/delta/56/devices_last_seen.sql diff --git a/synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql b/synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/drop_unused_event_tables.sql rename to synapse/storage/schema/main/delta/56/drop_unused_event_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_expiry.sql b/synapse/storage/schema/main/delta/56/event_expiry.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_expiry.sql rename to synapse/storage/schema/main/delta/56/event_expiry.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels.sql b/synapse/storage/schema/main/delta/56/event_labels.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_labels.sql rename to synapse/storage/schema/main/delta/56/event_labels.sql diff --git a/synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql b/synapse/storage/schema/main/delta/56/event_labels_background_update.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/event_labels_background_update.sql rename to synapse/storage/schema/main/delta/56/event_labels_background_update.sql diff --git a/synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql b/synapse/storage/schema/main/delta/56/fix_room_keys_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/fix_room_keys_index.sql rename to synapse/storage/schema/main/delta/56/fix_room_keys_index.sql diff --git a/synapse/storage/databases/main/schema/delta/56/hidden_devices.sql b/synapse/storage/schema/main/delta/56/hidden_devices.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/hidden_devices.sql rename to synapse/storage/schema/main/delta/56/hidden_devices.sql diff --git a/synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite b/synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/hidden_devices_fix.sql.sqlite rename to synapse/storage/schema/main/delta/56/hidden_devices_fix.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql b/synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/nuke_empty_communities_from_db.sql rename to synapse/storage/schema/main/delta/56/nuke_empty_communities_from_db.sql diff --git a/synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql b/synapse/storage/schema/main/delta/56/public_room_list_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/public_room_list_idx.sql rename to synapse/storage/schema/main/delta/56/public_room_list_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor.sql b/synapse/storage/schema/main/delta/56/redaction_censor.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor.sql rename to synapse/storage/schema/main/delta/56/redaction_censor.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql b/synapse/storage/schema/main/delta/56/redaction_censor2.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor2.sql rename to synapse/storage/schema/main/delta/56/redaction_censor2.sql diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres b/synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor3_fix_update.sql.postgres rename to synapse/storage/schema/main/delta/56/redaction_censor3_fix_update.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql b/synapse/storage/schema/main/delta/56/redaction_censor4.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/redaction_censor4.sql rename to synapse/storage/schema/main/delta/56/redaction_censor4.sql diff --git a/synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql b/synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/remove_tombstoned_rooms_from_directory.sql rename to synapse/storage/schema/main/delta/56/remove_tombstoned_rooms_from_directory.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_key_etag.sql b/synapse/storage/schema/main/delta/56/room_key_etag.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_key_etag.sql rename to synapse/storage/schema/main/delta/56/room_key_etag.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql b/synapse/storage/schema/main/delta/56/room_membership_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_membership_idx.sql rename to synapse/storage/schema/main/delta/56/room_membership_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/56/room_retention.sql b/synapse/storage/schema/main/delta/56/room_retention.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/room_retention.sql rename to synapse/storage/schema/main/delta/56/room_retention.sql diff --git a/synapse/storage/databases/main/schema/delta/56/signing_keys.sql b/synapse/storage/schema/main/delta/56/signing_keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/signing_keys.sql rename to synapse/storage/schema/main/delta/56/signing_keys.sql diff --git a/synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql b/synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/signing_keys_nonunique_signatures.sql rename to synapse/storage/schema/main/delta/56/signing_keys_nonunique_signatures.sql diff --git a/synapse/storage/databases/main/schema/delta/56/stats_separated.sql b/synapse/storage/schema/main/delta/56/stats_separated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/stats_separated.sql rename to synapse/storage/schema/main/delta/56/stats_separated.sql diff --git a/synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py b/synapse/storage/schema/main/delta/56/unique_user_filter_index.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/unique_user_filter_index.py rename to synapse/storage/schema/main/delta/56/unique_user_filter_index.py diff --git a/synapse/storage/databases/main/schema/delta/56/user_external_ids.sql b/synapse/storage/schema/main/delta/56/user_external_ids.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/user_external_ids.sql rename to synapse/storage/schema/main/delta/56/user_external_ids.sql diff --git a/synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql b/synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/56/users_in_public_rooms_idx.sql rename to synapse/storage/schema/main/delta/56/users_in_public_rooms_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql b/synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/delete_old_current_state_events.sql rename to synapse/storage/schema/main/delta/57/delete_old_current_state_events.sql diff --git a/synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql b/synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/device_list_remote_cache_stale.sql rename to synapse/storage/schema/main/delta/57/device_list_remote_cache_stale.sql diff --git a/synapse/storage/databases/main/schema/delta/57/local_current_membership.py b/synapse/storage/schema/main/delta/57/local_current_membership.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/local_current_membership.py rename to synapse/storage/schema/main/delta/57/local_current_membership.py diff --git a/synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql b/synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/remove_sent_outbound_pokes.sql rename to synapse/storage/schema/main/delta/57/remove_sent_outbound_pokes.sql diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql b/synapse/storage/schema/main/delta/57/rooms_version_column.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column.sql rename to synapse/storage/schema/main/delta/57/rooms_version_column.sql diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres b/synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.postgres rename to synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite b/synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_2.sql.sqlite rename to synapse/storage/schema/main/delta/57/rooms_version_column_2.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres b/synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.postgres rename to synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite b/synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/57/rooms_version_column_3.sql.sqlite rename to synapse/storage/schema/main/delta/57/rooms_version_column_3.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql b/synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/02remove_dup_outbound_pokes.sql rename to synapse/storage/schema/main/delta/58/02remove_dup_outbound_pokes.sql diff --git a/synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql b/synapse/storage/schema/main/delta/58/03persist_ui_auth.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/03persist_ui_auth.sql rename to synapse/storage/schema/main/delta/58/03persist_ui_auth.sql diff --git a/synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres b/synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/05cache_instance.sql.postgres rename to synapse/storage/schema/main/delta/58/05cache_instance.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py b/synapse/storage/schema/main/delta/58/06dlols_unique_idx.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/06dlols_unique_idx.py rename to synapse/storage/schema/main/delta/58/06dlols_unique_idx.py diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres b/synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.postgres rename to synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite b/synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite rename to synapse/storage/schema/main/delta/58/07add_method_to_thumbnail_constraint.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/07persist_ui_auth_ips.sql b/synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/07persist_ui_auth_ips.sql rename to synapse/storage/schema/main/delta/58/07persist_ui_auth_ips.sql diff --git a/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres b/synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.postgres rename to synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite b/synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/08_media_safe_from_quarantine.sql.sqlite rename to synapse/storage/schema/main/delta/58/08_media_safe_from_quarantine.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/58/09shadow_ban.sql b/synapse/storage/schema/main/delta/58/09shadow_ban.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/09shadow_ban.sql rename to synapse/storage/schema/main/delta/58/09shadow_ban.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql b/synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10_pushrules_enabled_delete_obsolete.sql rename to synapse/storage/schema/main/delta/58/10_pushrules_enabled_delete_obsolete.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql b/synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10drop_local_rejections_stream.sql rename to synapse/storage/schema/main/delta/58/10drop_local_rejections_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql b/synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/10federation_pos_instance_name.sql rename to synapse/storage/schema/main/delta/58/10federation_pos_instance_name.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11dehydration.sql b/synapse/storage/schema/main/delta/58/11dehydration.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11dehydration.sql rename to synapse/storage/schema/main/delta/58/11dehydration.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11fallback.sql b/synapse/storage/schema/main/delta/58/11fallback.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11fallback.sql rename to synapse/storage/schema/main/delta/58/11fallback.sql diff --git a/synapse/storage/databases/main/schema/delta/58/11user_id_seq.py b/synapse/storage/schema/main/delta/58/11user_id_seq.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/11user_id_seq.py rename to synapse/storage/schema/main/delta/58/11user_id_seq.py diff --git a/synapse/storage/databases/main/schema/delta/58/12room_stats.sql b/synapse/storage/schema/main/delta/58/12room_stats.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/12room_stats.sql rename to synapse/storage/schema/main/delta/58/12room_stats.sql diff --git a/synapse/storage/databases/main/schema/delta/58/13remove_presence_allow_inbound.sql b/synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/13remove_presence_allow_inbound.sql rename to synapse/storage/schema/main/delta/58/13remove_presence_allow_inbound.sql diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql b/synapse/storage/schema/main/delta/58/14events_instance_name.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql rename to synapse/storage/schema/main/delta/58/14events_instance_name.sql diff --git a/synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres b/synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/14events_instance_name.sql.postgres rename to synapse/storage/schema/main/delta/58/14events_instance_name.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql b/synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/15_catchup_destination_rooms.sql rename to synapse/storage/schema/main/delta/58/15_catchup_destination_rooms.sql diff --git a/synapse/storage/databases/main/schema/delta/58/15unread_count.sql b/synapse/storage/schema/main/delta/58/15unread_count.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/15unread_count.sql rename to synapse/storage/schema/main/delta/58/15unread_count.sql diff --git a/synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql b/synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/16populate_stats_process_rooms_fix.sql rename to synapse/storage/schema/main/delta/58/16populate_stats_process_rooms_fix.sql diff --git a/synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql b/synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/17_catchup_last_successful.sql rename to synapse/storage/schema/main/delta/58/17_catchup_last_successful.sql diff --git a/synapse/storage/databases/main/schema/delta/58/18stream_positions.sql b/synapse/storage/schema/main/delta/58/18stream_positions.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/18stream_positions.sql rename to synapse/storage/schema/main/delta/58/18stream_positions.sql diff --git a/synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres b/synapse/storage/schema/main/delta/58/19instance_map.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/19instance_map.sql.postgres rename to synapse/storage/schema/main/delta/58/19instance_map.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/19txn_id.sql b/synapse/storage/schema/main/delta/58/19txn_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/19txn_id.sql rename to synapse/storage/schema/main/delta/58/19txn_id.sql diff --git a/synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql b/synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/20instance_name_event_tables.sql rename to synapse/storage/schema/main/delta/58/20instance_name_event_tables.sql diff --git a/synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql b/synapse/storage/schema/main/delta/58/20user_daily_visits.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/20user_daily_visits.sql rename to synapse/storage/schema/main/delta/58/20user_daily_visits.sql diff --git a/synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql b/synapse/storage/schema/main/delta/58/21as_device_stream.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/21as_device_stream.sql rename to synapse/storage/schema/main/delta/58/21as_device_stream.sql diff --git a/synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql b/synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/21drop_device_max_stream_id.sql rename to synapse/storage/schema/main/delta/58/21drop_device_max_stream_id.sql diff --git a/synapse/storage/databases/main/schema/delta/58/22puppet_token.sql b/synapse/storage/schema/main/delta/58/22puppet_token.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/22puppet_token.sql rename to synapse/storage/schema/main/delta/58/22puppet_token.sql diff --git a/synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql b/synapse/storage/schema/main/delta/58/22users_have_local_media.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/22users_have_local_media.sql rename to synapse/storage/schema/main/delta/58/22users_have_local_media.sql diff --git a/synapse/storage/databases/main/schema/delta/58/23e2e_cross_signing_keys_idx.sql b/synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/23e2e_cross_signing_keys_idx.sql rename to synapse/storage/schema/main/delta/58/23e2e_cross_signing_keys_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/58/24drop_event_json_index.sql b/synapse/storage/schema/main/delta/58/24drop_event_json_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/24drop_event_json_index.sql rename to synapse/storage/schema/main/delta/58/24drop_event_json_index.sql diff --git a/synapse/storage/databases/main/schema/delta/58/25user_external_ids_user_id_idx.sql b/synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/25user_external_ids_user_id_idx.sql rename to synapse/storage/schema/main/delta/58/25user_external_ids_user_id_idx.sql diff --git a/synapse/storage/databases/main/schema/delta/58/26access_token_last_validated.sql b/synapse/storage/schema/main/delta/58/26access_token_last_validated.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/26access_token_last_validated.sql rename to synapse/storage/schema/main/delta/58/26access_token_last_validated.sql diff --git a/synapse/storage/databases/main/schema/delta/58/27local_invites.sql b/synapse/storage/schema/main/delta/58/27local_invites.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/27local_invites.sql rename to synapse/storage/schema/main/delta/58/27local_invites.sql diff --git a/synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.postgres b/synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.postgres rename to synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.sqlite b/synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/delta/58/28drop_last_used_column.sql.sqlite rename to synapse/storage/schema/main/delta/58/28drop_last_used_column.sql.sqlite diff --git a/synapse/storage/databases/main/schema/delta/59/01ignored_user.py b/synapse/storage/schema/main/delta/59/01ignored_user.py similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/01ignored_user.py rename to synapse/storage/schema/main/delta/59/01ignored_user.py diff --git a/synapse/storage/databases/main/schema/delta/59/02shard_send_to_device.sql b/synapse/storage/schema/main/delta/59/02shard_send_to_device.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/02shard_send_to_device.sql rename to synapse/storage/schema/main/delta/59/02shard_send_to_device.sql diff --git a/synapse/storage/databases/main/schema/delta/59/03shard_send_to_device_sequence.sql.postgres b/synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/03shard_send_to_device_sequence.sql.postgres rename to synapse/storage/schema/main/delta/59/03shard_send_to_device_sequence.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql b/synapse/storage/schema/main/delta/59/04_event_auth_chains.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql rename to synapse/storage/schema/main/delta/59/04_event_auth_chains.sql diff --git a/synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql.postgres b/synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04_event_auth_chains.sql.postgres rename to synapse/storage/schema/main/delta/59/04_event_auth_chains.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/04drop_account_data.sql b/synapse/storage/schema/main/delta/59/04drop_account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/04drop_account_data.sql rename to synapse/storage/schema/main/delta/59/04drop_account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/59/05cache_invalidation.sql b/synapse/storage/schema/main/delta/59/05cache_invalidation.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/05cache_invalidation.sql rename to synapse/storage/schema/main/delta/59/05cache_invalidation.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06chain_cover_index.sql b/synapse/storage/schema/main/delta/59/06chain_cover_index.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06chain_cover_index.sql rename to synapse/storage/schema/main/delta/59/06chain_cover_index.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql b/synapse/storage/schema/main/delta/59/06shard_account_data.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql rename to synapse/storage/schema/main/delta/59/06shard_account_data.sql diff --git a/synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql.postgres b/synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/06shard_account_data.sql.postgres rename to synapse/storage/schema/main/delta/59/06shard_account_data.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/07shard_account_data_fix.sql b/synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/07shard_account_data_fix.sql rename to synapse/storage/schema/main/delta/59/07shard_account_data_fix.sql diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql b/synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/08delete_pushers_for_deactivated_accounts.sql rename to synapse/storage/schema/main/delta/59/08delete_pushers_for_deactivated_accounts.sql diff --git a/synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql b/synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/08delete_stale_pushers.sql rename to synapse/storage/schema/main/delta/59/08delete_stale_pushers.sql diff --git a/synapse/storage/databases/main/schema/delta/59/09rejected_events_metadata.sql b/synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/09rejected_events_metadata.sql rename to synapse/storage/schema/main/delta/59/09rejected_events_metadata.sql diff --git a/synapse/storage/databases/main/schema/delta/59/10delete_purged_chain_cover.sql b/synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/10delete_purged_chain_cover.sql rename to synapse/storage/schema/main/delta/59/10delete_purged_chain_cover.sql diff --git a/synapse/storage/databases/main/schema/delta/59/11drop_thumbnail_constraint.sql.postgres b/synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/11drop_thumbnail_constraint.sql.postgres rename to synapse/storage/schema/main/delta/59/11drop_thumbnail_constraint.sql.postgres diff --git a/synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql b/synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/12account_validity_token_used_ts_ms.sql rename to synapse/storage/schema/main/delta/59/12account_validity_token_used_ts_ms.sql diff --git a/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql b/synapse/storage/schema/main/delta/59/12presence_stream_instance.sql similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/12presence_stream_instance.sql rename to synapse/storage/schema/main/delta/59/12presence_stream_instance.sql diff --git a/synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres b/synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/delta/59/12presence_stream_instance_seq.sql.postgres rename to synapse/storage/schema/main/delta/59/12presence_stream_instance_seq.sql.postgres diff --git a/synapse/storage/databases/main/schema/full_schemas/16/application_services.sql b/synapse/storage/schema/main/full_schemas/16/application_services.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/application_services.sql rename to synapse/storage/schema/main/full_schemas/16/application_services.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/main/full_schemas/16/event_edges.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/event_edges.sql rename to synapse/storage/schema/main/full_schemas/16/event_edges.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql b/synapse/storage/schema/main/full_schemas/16/event_signatures.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/event_signatures.sql rename to synapse/storage/schema/main/full_schemas/16/event_signatures.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/im.sql b/synapse/storage/schema/main/full_schemas/16/im.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/im.sql rename to synapse/storage/schema/main/full_schemas/16/im.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/keys.sql b/synapse/storage/schema/main/full_schemas/16/keys.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/keys.sql rename to synapse/storage/schema/main/full_schemas/16/keys.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql b/synapse/storage/schema/main/full_schemas/16/media_repository.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/media_repository.sql rename to synapse/storage/schema/main/full_schemas/16/media_repository.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/presence.sql b/synapse/storage/schema/main/full_schemas/16/presence.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/presence.sql rename to synapse/storage/schema/main/full_schemas/16/presence.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/profiles.sql b/synapse/storage/schema/main/full_schemas/16/profiles.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/profiles.sql rename to synapse/storage/schema/main/full_schemas/16/profiles.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/push.sql b/synapse/storage/schema/main/full_schemas/16/push.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/push.sql rename to synapse/storage/schema/main/full_schemas/16/push.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/redactions.sql b/synapse/storage/schema/main/full_schemas/16/redactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/redactions.sql rename to synapse/storage/schema/main/full_schemas/16/redactions.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql b/synapse/storage/schema/main/full_schemas/16/room_aliases.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/room_aliases.sql rename to synapse/storage/schema/main/full_schemas/16/room_aliases.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/state.sql b/synapse/storage/schema/main/full_schemas/16/state.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/state.sql rename to synapse/storage/schema/main/full_schemas/16/state.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/transactions.sql b/synapse/storage/schema/main/full_schemas/16/transactions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/transactions.sql rename to synapse/storage/schema/main/full_schemas/16/transactions.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/16/users.sql b/synapse/storage/schema/main/full_schemas/16/users.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/16/users.sql rename to synapse/storage/schema/main/full_schemas/16/users.sql diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/main/full_schemas/54/full.sql.postgres similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/full.sql.postgres rename to synapse/storage/schema/main/full_schemas/54/full.sql.postgres diff --git a/synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/main/full_schemas/54/full.sql.sqlite similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/full.sql.sqlite rename to synapse/storage/schema/main/full_schemas/54/full.sql.sqlite diff --git a/synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/main/full_schemas/54/stream_positions.sql similarity index 100% rename from synapse/storage/databases/main/schema/full_schemas/54/stream_positions.sql rename to synapse/storage/schema/main/full_schemas/54/stream_positions.sql diff --git a/synapse/storage/databases/state/schema/delta/23/drop_state_index.sql b/synapse/storage/schema/state/delta/23/drop_state_index.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/23/drop_state_index.sql rename to synapse/storage/schema/state/delta/23/drop_state_index.sql diff --git a/synapse/storage/databases/state/schema/delta/30/state_stream.sql b/synapse/storage/schema/state/delta/30/state_stream.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/30/state_stream.sql rename to synapse/storage/schema/state/delta/30/state_stream.sql diff --git a/synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql b/synapse/storage/schema/state/delta/32/remove_state_indices.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/32/remove_state_indices.sql rename to synapse/storage/schema/state/delta/32/remove_state_indices.sql diff --git a/synapse/storage/databases/state/schema/delta/35/add_state_index.sql b/synapse/storage/schema/state/delta/35/add_state_index.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/add_state_index.sql rename to synapse/storage/schema/state/delta/35/add_state_index.sql diff --git a/synapse/storage/databases/state/schema/delta/35/state.sql b/synapse/storage/schema/state/delta/35/state.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/state.sql rename to synapse/storage/schema/state/delta/35/state.sql diff --git a/synapse/storage/databases/state/schema/delta/35/state_dedupe.sql b/synapse/storage/schema/state/delta/35/state_dedupe.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/35/state_dedupe.sql rename to synapse/storage/schema/state/delta/35/state_dedupe.sql diff --git a/synapse/storage/databases/state/schema/delta/47/state_group_seq.py b/synapse/storage/schema/state/delta/47/state_group_seq.py similarity index 100% rename from synapse/storage/databases/state/schema/delta/47/state_group_seq.py rename to synapse/storage/schema/state/delta/47/state_group_seq.py diff --git a/synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql b/synapse/storage/schema/state/delta/56/state_group_room_idx.sql similarity index 100% rename from synapse/storage/databases/state/schema/delta/56/state_group_room_idx.sql rename to synapse/storage/schema/state/delta/56/state_group_room_idx.sql diff --git a/synapse/storage/databases/state/schema/full_schemas/54/full.sql b/synapse/storage/schema/state/full_schemas/54/full.sql similarity index 100% rename from synapse/storage/databases/state/schema/full_schemas/54/full.sql rename to synapse/storage/schema/state/full_schemas/54/full.sql diff --git a/synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres b/synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres similarity index 100% rename from synapse/storage/databases/state/schema/full_schemas/54/sequence.sql.postgres rename to synapse/storage/schema/state/full_schemas/54/sequence.sql.postgres diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index aa20588bbe5a..77c4fe721c1d 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -47,10 +47,8 @@ def run_background_update(self): ) schema_path = os.path.join( - prepare_database.dir_path, - "databases", + prepare_database.schema_path, "main", - "schema", "delta", "54", "delete_forward_extremities.sql", From 4df26abf28a13f005dc4b220a5f7eb96bc4df116 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 May 2021 12:57:21 +0100 Subject: [PATCH 117/222] Unpin attrs dep after new version has been released (#9946) c.f. #9936 --- changelog.d/9946.misc | 1 + synapse/python_dependencies.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9946.misc diff --git a/changelog.d/9946.misc b/changelog.d/9946.misc new file mode 100644 index 000000000000..142ec5496f76 --- /dev/null +++ b/changelog.d/9946.misc @@ -0,0 +1 @@ +Unpin attrs dependency. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 7709361f1676..45a6b82834a1 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -79,7 +79,7 @@ # Fedora 31 only has 19.1, so if we want to upgrade we should wait until 33 # is out in November.) # Note: 21.1.0 broke `/sync`, see #9936 - "attrs>=19.1.0,<21.1.0", + "attrs>=19.1.0,!=21.1.0", "netaddr>=0.7.18", "Jinja2>=2.9", "bleach>=1.4.3", From 765473567cd7e9520bdb9f85491bb5fe719c360b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 7 May 2021 14:01:57 +0100 Subject: [PATCH 118/222] Fix make_full_schema to create the db with the right options and user (#9931) --- changelog.d/9931.misc | 1 + scripts-dev/make_full_schema.sh | 19 ++++++++++--------- 2 files changed, 11 insertions(+), 9 deletions(-) create mode 100644 changelog.d/9931.misc diff --git a/changelog.d/9931.misc b/changelog.d/9931.misc new file mode 100644 index 000000000000..326adc7f3cde --- /dev/null +++ b/changelog.d/9931.misc @@ -0,0 +1 @@ +Minor fixes to the `make_full_schema.sh` script. diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index bc8f9786608d..39bf30d258bd 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -6,7 +6,7 @@ # It does so by having Synapse generate an up-to-date SQLite DB, then running # synapse_port_db to convert it to Postgres. It then dumps the contents of both. -POSTGRES_HOST="localhost" +export PGHOST="localhost" POSTGRES_DB_NAME="synapse_full_schema.$$" SQLITE_FULL_SCHEMA_OUTPUT_FILE="full.sql.sqlite" @@ -32,7 +32,7 @@ usage() { while getopts "p:co:h" opt; do case $opt in p) - POSTGRES_USERNAME=$OPTARG + export PGUSER=$OPTARG ;; c) # Print all commands that are being executed @@ -69,7 +69,7 @@ if [ ${#unsatisfied_requirements} -ne 0 ]; then exit 1 fi -if [ -z "$POSTGRES_USERNAME" ]; then +if [ -z "$PGUSER" ]; then echo "No postgres username supplied" usage exit 1 @@ -84,8 +84,9 @@ fi # Create the output directory if it doesn't exist mkdir -p "$OUTPUT_DIR" -read -rsp "Postgres password for '$POSTGRES_USERNAME': " POSTGRES_PASSWORD +read -rsp "Postgres password for '$PGUSER': " PGPASSWORD echo "" +export PGPASSWORD # Exit immediately if a command fails set -e @@ -131,9 +132,9 @@ report_stats: false database: name: "psycopg2" args: - user: "$POSTGRES_USERNAME" - host: "$POSTGRES_HOST" - password: "$POSTGRES_PASSWORD" + user: "$PGUSER" + host: "$PGHOST" + password: "$PGPASSWORD" database: "$POSTGRES_DB_NAME" # Suppress the key server warning. @@ -150,7 +151,7 @@ scripts-dev/update_database --database-config "$SQLITE_CONFIG" # Create the PostgreSQL database. echo "Creating postgres database..." -createdb $POSTGRES_DB_NAME +createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_DB_NAME" echo "Copying data from SQLite3 to Postgres with synapse_port_db..." if [ -z "$COVERAGE" ]; then @@ -181,7 +182,7 @@ DROP TABLE user_directory_search_docsize; DROP TABLE user_directory_search_stat; " sqlite3 "$SQLITE_DB" <<< "$SQL" -psql $POSTGRES_DB_NAME -U "$POSTGRES_USERNAME" -w <<< "$SQL" +psql "$POSTGRES_DB_NAME" -w <<< "$SQL" echo "Dumping SQLite3 schema to '$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE'..." sqlite3 "$SQLITE_DB" ".dump" > "$OUTPUT_DIR/$SQLITE_FULL_SCHEMA_OUTPUT_FILE" From 6c847785494473c0a430f368a0d79c9202b8ddd0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 May 2021 14:54:09 +0100 Subject: [PATCH 119/222] Always cache 'event_to_prev_state_group' (#9950) Fixes regression in send PDU times introduced in #9905. --- changelog.d/9950.feature | 1 + synapse/handlers/message.py | 13 +++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 changelog.d/9950.feature diff --git a/changelog.d/9950.feature b/changelog.d/9950.feature new file mode 100644 index 000000000000..96a0e7f09fbc --- /dev/null +++ b/changelog.d/9950.feature @@ -0,0 +1 @@ +Improve performance of sending events for worker-based deployments using Redis. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8729332d4b86..5afb7fc261b5 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1050,6 +1050,13 @@ async def cache_joined_hosts_for_event( ) if state_entry.state_group: + await self._external_cache.set( + "event_to_prev_state_group", + event.event_id, + state_entry.state_group, + expiry_ms=60 * 60 * 1000, + ) + if state_entry.state_group in self._external_cache_joined_hosts_updates: return @@ -1057,12 +1064,6 @@ async def cache_joined_hosts_for_event( # Note that the expiry times must be larger than the expiry time in # _external_cache_joined_hosts_updates. - await self._external_cache.set( - "event_to_prev_state_group", - event.event_id, - state_entry.state_group, - expiry_ms=60 * 60 * 1000, - ) await self._external_cache.set( "get_joined_hosts", str(state_entry.state_group), From 51065c44bb0875373fada2838e69e4bc5005a95d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 10 May 2021 13:02:55 +0100 Subject: [PATCH 120/222] Fix port_db on empty db (#9930) ... and test it. --- .buildkite/scripts/create_postgres_db.py | 36 ---------------------- .buildkite/scripts/postgres_exec.py | 31 +++++++++++++++++++ .buildkite/scripts/test_synapse_port_db.sh | 35 +++++++++++++++------ .github/workflows/tests.yml | 2 +- changelog.d/9930.bugfix | 1 + scripts/synapse_port_db | 18 ++++++----- 6 files changed, 69 insertions(+), 54 deletions(-) delete mode 100755 .buildkite/scripts/create_postgres_db.py create mode 100755 .buildkite/scripts/postgres_exec.py create mode 100644 changelog.d/9930.bugfix diff --git a/.buildkite/scripts/create_postgres_db.py b/.buildkite/scripts/create_postgres_db.py deleted file mode 100755 index cc829db2161d..000000000000 --- a/.buildkite/scripts/create_postgres_db.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from synapse.storage.engines import create_engine - -logger = logging.getLogger("create_postgres_db") - -if __name__ == "__main__": - # Create a PostgresEngine. - db_engine = create_engine({"name": "psycopg2", "args": {}}) - - # Connect to postgres to create the base database. - # We use "postgres" as a database because it's bound to exist and the "synapse" one - # doesn't exist yet. - db_conn = db_engine.module.connect( - user="postgres", host="postgres", password="postgres", dbname="postgres" - ) - db_conn.autocommit = True - cur = db_conn.cursor() - cur.execute("CREATE DATABASE synapse;") - cur.close() - db_conn.close() diff --git a/.buildkite/scripts/postgres_exec.py b/.buildkite/scripts/postgres_exec.py new file mode 100755 index 000000000000..086b391724ec --- /dev/null +++ b/.buildkite/scripts/postgres_exec.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +import psycopg2 + +# a very simple replacment for `psql`, to make up for the lack of the postgres client +# libraries in the synapse docker image. + +# We use "postgres" as a database because it's bound to exist and the "synapse" one +# doesn't exist yet. +db_conn = psycopg2.connect( + user="postgres", host="postgres", password="postgres", dbname="postgres" +) +db_conn.autocommit = True +cur = db_conn.cursor() +for c in sys.argv[1:]: + cur.execute(c) diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh index 8914319e3825..a7e245476937 100755 --- a/.buildkite/scripts/test_synapse_port_db.sh +++ b/.buildkite/scripts/test_synapse_port_db.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash # -# Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along -# with additional dependencies needed for the test (such as coverage or the PostgreSQL -# driver), update the schema of the test SQLite database and run background updates on it, -# create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to -# test porting the SQLite database to the PostgreSQL database (with coverage). +# Test script for 'synapse_port_db'. +# - sets up synapse and deps +# - runs the port script on a prepopulated test sqlite db +# - also runs it against an new sqlite db + set -xe cd `dirname $0`/../.. @@ -22,15 +22,32 @@ echo "--- Generate the signing key" # Generate the server's signing key. python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml -echo "--- Prepare the databases" +echo "--- Prepare test database" # Make sure the SQLite3 database is using the latest schema and has no pending background update. scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml # Create the PostgreSQL database. -./.buildkite/scripts/create_postgres_db.py +./.buildkite/scripts/postgres_exec.py "CREATE DATABASE synapse" + +echo "+++ Run synapse_port_db against test database" +coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml + +##### + +# Now do the same again, on an empty database. + +echo "--- Prepare empty SQLite database" + +# we do this by deleting the sqlite db, and then doing the same again. +rm .buildkite/test_db.db + +scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml -echo "+++ Run synapse_port_db" +# re-create the PostgreSQL database. +./.buildkite/scripts/postgres_exec.py \ + "DROP DATABASE synapse" \ + "CREATE DATABASE synapse" -# Run the script +echo "+++ Run synapse_port_db against empty database" coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 12c82ac620fb..e7f3be1b4ea9 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -273,7 +273,7 @@ jobs: python-version: ${{ matrix.python-version }} - name: Patch Buildkite-specific test scripts run: | - sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/create_postgres_db.py + sed -i -e 's/host="postgres"/host="localhost"/' .buildkite/scripts/postgres_exec.py sed -i -e 's/host: postgres/host: localhost/' .buildkite/postgres-config.yaml sed -i -e 's|/src/||' .buildkite/{sqlite,postgres}-config.yaml sed -i -e 's/\$TOP/\$GITHUB_WORKSPACE/' .coveragerc diff --git a/changelog.d/9930.bugfix b/changelog.d/9930.bugfix new file mode 100644 index 000000000000..9b22ed4458bb --- /dev/null +++ b/changelog.d/9930.bugfix @@ -0,0 +1 @@ +Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index f0c93d522622..5fb5bb35f77d 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -913,10 +913,11 @@ class Porter(object): (curr_forward_id + 1,), ) - txn.execute( - "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", - (curr_backward_id + 1,), - ) + if curr_backward_id: + txn.execute( + "ALTER SEQUENCE events_backfill_stream_seq RESTART WITH %s", + (curr_backward_id + 1,), + ) await self.postgres_store.db_pool.runInteraction( "_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos, @@ -954,10 +955,11 @@ class Porter(object): (curr_chain_id,), ) - await self.postgres_store.db_pool.runInteraction( - "_setup_event_auth_chain_id", r, - ) - + if curr_chain_id is not None: + await self.postgres_store.db_pool.runInteraction( + "_setup_event_auth_chain_id", + r, + ) ############################################## From 2b2985b5cfc4267dfb0f6b900e1a0f69f3f2cdc5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 May 2021 13:29:02 +0100 Subject: [PATCH 121/222] Improve performance of backfilling in large rooms. (#9935) We were pulling the full auth chain for the room out of the DB each time we backfilled, which can be *huge* for large rooms and is totally unnecessary. --- changelog.d/9935.feature | 1 + synapse/handlers/federation.py | 123 +++++++++++++++------------------ 2 files changed, 55 insertions(+), 69 deletions(-) create mode 100644 changelog.d/9935.feature diff --git a/changelog.d/9935.feature b/changelog.d/9935.feature new file mode 100644 index 000000000000..eeda5bf50e23 --- /dev/null +++ b/changelog.d/9935.feature @@ -0,0 +1 @@ +Improve performance of backfilling in large rooms. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e8330a2b50db..798ed75b30fa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -552,8 +552,12 @@ async def _get_state_for_room( destination: str, room_id: str, event_id: str, - ) -> Tuple[List[EventBase], List[EventBase]]: - """Requests all of the room state at a given event from a remote homeserver. + ) -> List[EventBase]: + """Requests all of the room state at a given event from a remote + homeserver. + + Will also fetch any missing events reported in the `auth_chain_ids` + section of `/state_ids`. Args: destination: The remote homeserver to query for the state. @@ -561,8 +565,7 @@ async def _get_state_for_room( event_id: The id of the event we want the state at. Returns: - A list of events in the state, not including the event itself, and - a list of events in the auth chain for the given event. + A list of events in the state, not including the event itself. """ ( state_event_ids, @@ -571,68 +574,53 @@ async def _get_state_for_room( destination, room_id, event_id=event_id ) - desired_events = set(state_event_ids + auth_event_ids) - - event_map = await self._get_events_from_store_or_dest( - destination, room_id, desired_events - ) + # Fetch the state events from the DB, and check we have the auth events. + event_map = await self.store.get_events(state_event_ids, allow_rejected=True) + auth_events_in_store = await self.store.have_seen_events(auth_event_ids) - failed_to_fetch = desired_events - event_map.keys() - if failed_to_fetch: - logger.warning( - "Failed to fetch missing state/auth events for %s %s", - event_id, - failed_to_fetch, + # Check for missing events. We handle state and auth event seperately, + # as we want to pull the state from the DB, but we don't for the auth + # events. (Note: we likely won't use the majority of the auth chain, and + # it can be *huge* for large rooms, so it's worth ensuring that we don't + # unnecessarily pull it from the DB). + missing_state_events = set(state_event_ids) - set(event_map) + missing_auth_events = set(auth_event_ids) - set(auth_events_in_store) + if missing_state_events or missing_auth_events: + await self._get_events_and_persist( + destination=destination, + room_id=room_id, + events=missing_state_events | missing_auth_events, ) - remote_state = [ - event_map[e_id] for e_id in state_event_ids if e_id in event_map - ] - - auth_chain = [event_map[e_id] for e_id in auth_event_ids if e_id in event_map] - auth_chain.sort(key=lambda e: e.depth) - - return remote_state, auth_chain - - async def _get_events_from_store_or_dest( - self, destination: str, room_id: str, event_ids: Iterable[str] - ) -> Dict[str, EventBase]: - """Fetch events from a remote destination, checking if we already have them. - - Persists any events we don't already have as outliers. - - If we fail to fetch any of the events, a warning will be logged, and the event - will be omitted from the result. Likewise, any events which turn out not to - be in the given room. + if missing_state_events: + new_events = await self.store.get_events( + missing_state_events, allow_rejected=True + ) + event_map.update(new_events) - This function *does not* automatically get missing auth events of the - newly fetched events. Callers must include the full auth chain of - of the missing events in the `event_ids` argument, to ensure that any - missing auth events are correctly fetched. + missing_state_events.difference_update(new_events) - Returns: - map from event_id to event - """ - fetched_events = await self.store.get_events(event_ids, allow_rejected=True) - - missing_events = set(event_ids) - fetched_events.keys() + if missing_state_events: + logger.warning( + "Failed to fetch missing state events for %s %s", + event_id, + missing_state_events, + ) - if missing_events: - logger.debug( - "Fetching unknown state/auth events %s for room %s", - missing_events, - room_id, - ) + if missing_auth_events: + auth_events_in_store = await self.store.have_seen_events( + missing_auth_events + ) + missing_auth_events.difference_update(auth_events_in_store) - await self._get_events_and_persist( - destination=destination, room_id=room_id, events=missing_events - ) + if missing_auth_events: + logger.warning( + "Failed to fetch missing auth events for %s %s", + event_id, + missing_auth_events, + ) - # we need to make sure we re-load from the database to get the rejected - # state correct. - fetched_events.update( - (await self.store.get_events(missing_events, allow_rejected=True)) - ) + remote_state = list(event_map.values()) # check for events which were in the wrong room. # @@ -640,8 +628,8 @@ async def _get_events_from_store_or_dest( # auth_events at an event in room A are actually events in room B bad_events = [ - (event_id, event.room_id) - for event_id, event in fetched_events.items() + (event.event_id, event.room_id) + for event in remote_state if event.room_id != room_id ] @@ -658,9 +646,10 @@ async def _get_events_from_store_or_dest( room_id, ) - del fetched_events[bad_event_id] + if bad_events: + remote_state = [e for e in remote_state if e.room_id == room_id] - return fetched_events + return remote_state async def _get_state_after_missing_prev_event( self, @@ -963,27 +952,23 @@ async def backfill( # For each edge get the current state. - auth_events = {} state_events = {} events_to_state = {} for e_id in edges: - state, auth = await self._get_state_for_room( + state = await self._get_state_for_room( destination=dest, room_id=room_id, event_id=e_id, ) - auth_events.update({a.event_id: a for a in auth}) - auth_events.update({s.event_id: s for s in state}) state_events.update({s.event_id: s for s in state}) events_to_state[e_id] = state required_auth = { a_id - for event in events - + list(state_events.values()) - + list(auth_events.values()) + for event in events + list(state_events.values()) for a_id in event.auth_event_ids() } + auth_events = await self.store.get_events(required_auth, allow_rejected=True) auth_events.update( {e_id: event_map[e_id] for e_id in required_auth if e_id in event_map} ) From 03318a766cac9f8b053db2214d9c332a977d226c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 May 2021 10:47:23 +0100 Subject: [PATCH 122/222] Merge pull request from GHSA-x345-32rc-8h85 * tests for push rule pattern matching * tests for acl pattern matching * factor out common `re.escape` * Factor out common re.compile * Factor out common anchoring code * add word_boundary support to `glob_to_regex` * Use `glob_to_regex` in push rule evaluator NB that this drops support for character classes. I don't think anyone ever used them. * Improve efficiency of globs with multiple wildcards The idea here is that we compress multiple `*` globs into a single `.*`. We also need to consider `?`, since `*?*` is as hard to implement efficiently as `**`. * add assertion on regex pattern * Fix mypy * Simplify glob_to_regex * Inline the glob_to_regex helper function Signed-off-by: Dan Callahan * Moar comments Signed-off-by: Dan Callahan Co-authored-by: Dan Callahan --- synapse/config/tls.py | 4 +- synapse/push/push_rule_evaluator.py | 55 +------ synapse/util/__init__.py | 61 ++++++-- tests/federation/test_federation_server.py | 19 +++ tests/push/test_push_rule_evaluator.py | 166 +++++++++++++++++++++ tests/util/test_glob_to_regex.py | 59 ++++++++ 6 files changed, 296 insertions(+), 68 deletions(-) create mode 100644 tests/util/test_glob_to_regex.py diff --git a/synapse/config/tls.py b/synapse/config/tls.py index b0418697583e..7df4e4c3e6b4 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -17,7 +17,7 @@ import warnings from datetime import datetime from hashlib import sha256 -from typing import List, Optional +from typing import List, Optional, Pattern from unpaddedbase64 import encode_base64 @@ -124,7 +124,7 @@ def read_config(self, config: dict, config_dir_path: str, **kwargs): fed_whitelist_entries = [] # Support globs (*) in whitelist values - self.federation_certificate_verification_whitelist = [] # type: List[str] + self.federation_certificate_verification_whitelist = [] # type: List[Pattern] for entry in fed_whitelist_entries: try: entry_regex = glob_to_regex(entry.encode("ascii").decode("ascii")) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 49ecb38522ae..98b90a4f516e 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -19,6 +19,7 @@ from synapse.events import EventBase from synapse.types import UserID +from synapse.util import glob_to_regex, re_word_boundary from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -183,7 +184,7 @@ def _contains_display_name(self, display_name: str) -> bool: r = regex_cache.get((display_name, False, True), None) if not r: r1 = re.escape(display_name) - r1 = _re_word_boundary(r1) + r1 = re_word_boundary(r1) r = re.compile(r1, flags=re.IGNORECASE) regex_cache[(display_name, False, True)] = r @@ -212,7 +213,7 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: try: r = regex_cache.get((glob, True, word_boundary), None) if not r: - r = _glob_to_re(glob, word_boundary) + r = glob_to_regex(glob, word_boundary) regex_cache[(glob, True, word_boundary)] = r return bool(r.search(value)) except re.error: @@ -220,56 +221,6 @@ def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool: return False -def _glob_to_re(glob: str, word_boundary: bool) -> Pattern: - """Generates regex for a given glob. - - Args: - glob - word_boundary: Whether to match against word boundaries or entire string. - """ - if IS_GLOB.search(glob): - r = re.escape(glob) - - r = r.replace(r"\*", ".*?") - r = r.replace(r"\?", ".") - - # handle [abc], [a-z] and [!a-z] style ranges. - r = GLOB_REGEX.sub( - lambda x: ( - "[%s%s]" % (x.group(1) and "^" or "", x.group(2).replace(r"\\\-", "-")) - ), - r, - ) - if word_boundary: - r = _re_word_boundary(r) - - return re.compile(r, flags=re.IGNORECASE) - else: - r = "^" + r + "$" - - return re.compile(r, flags=re.IGNORECASE) - elif word_boundary: - r = re.escape(glob) - r = _re_word_boundary(r) - - return re.compile(r, flags=re.IGNORECASE) - else: - r = "^" + re.escape(glob) + "$" - return re.compile(r, flags=re.IGNORECASE) - - -def _re_word_boundary(r: str) -> str: - """ - Adds word boundary characters to the start and end of an - expression to require that the match occur as a whole word, - but do so respecting the fact that strings starting or ending - with non-word characters will change word boundaries. - """ - # we can't use \b as it chokes on unicode. however \W seems to be okay - # as shorthand for [^0-9A-Za-z_]. - return r"(^|\W)%s(\W|$)" % (r,) - - def _flatten_dict( d: Union[EventBase, dict], prefix: Optional[List[str]] = None, diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 0f84fa3f4e3e..b69f562ca586 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -15,6 +15,7 @@ import json import logging import re +from typing import Pattern import attr from frozendict import frozendict @@ -26,6 +27,9 @@ logger = logging.getLogger(__name__) +_WILDCARD_RUN = re.compile(r"([\?\*]+)") + + def _reject_invalid_json(val): """Do not allow Infinity, -Infinity, or NaN values in JSON.""" raise ValueError("Invalid JSON value: '%s'" % val) @@ -158,25 +162,54 @@ def log_failure(failure, msg, consumeErrors=True): return failure -def glob_to_regex(glob): +def glob_to_regex(glob: str, word_boundary: bool = False) -> Pattern: """Converts a glob to a compiled regex object. - The regex is anchored at the beginning and end of the string. - Args: - glob (str) + glob: pattern to match + word_boundary: If True, the pattern will be allowed to match at word boundaries + anywhere in the string. Otherwise, the pattern is anchored at the start and + end of the string. Returns: - re.RegexObject + compiled regex pattern """ - res = "" - for c in glob: - if c == "*": - res = res + ".*" - elif c == "?": - res = res + "." + + # Patterns with wildcards must be simplified to avoid performance cliffs + # - The glob `?**?**?` is equivalent to the glob `???*` + # - The glob `???*` is equivalent to the regex `.{3,}` + chunks = [] + for chunk in _WILDCARD_RUN.split(glob): + # No wildcards? re.escape() + if not _WILDCARD_RUN.match(chunk): + chunks.append(re.escape(chunk)) + continue + + # Wildcards? Simplify. + qmarks = chunk.count("?") + if "*" in chunk: + chunks.append(".{%d,}" % qmarks) else: - res = res + re.escape(c) + chunks.append(".{%d}" % qmarks) + + res = "".join(chunks) - # \A anchors at start of string, \Z at end of string - return re.compile(r"\A" + res + r"\Z", re.IGNORECASE) + if word_boundary: + res = re_word_boundary(res) + else: + # \A anchors at start of string, \Z at end of string + res = r"\A" + res + r"\Z" + + return re.compile(res, re.IGNORECASE) + + +def re_word_boundary(r: str) -> str: + """ + Adds word boundary characters to the start and end of an + expression to require that the match occur as a whole word, + but do so respecting the fact that strings starting or ending + with non-word characters will change word boundaries. + """ + # we can't use \b as it chokes on unicode. however \W seems to be okay + # as shorthand for [^0-9A-Za-z_]. + return r"(^|\W)%s(\W|$)" % (r,) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 8508b6bd3b53..173789156459 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -74,6 +74,25 @@ def test_block_ip_literals(self): self.assertFalse(server_matches_acl_event("[1:2::]", e)) self.assertTrue(server_matches_acl_event("1:2:3:4", e)) + def test_wildcard_matching(self): + e = _create_acl_event({"allow": ["good*.com"]}) + self.assertTrue( + server_matches_acl_event("good.com", e), + "* matches 0 characters", + ) + self.assertTrue( + server_matches_acl_event("GOOD.COM", e), + "pattern is case-insensitive", + ) + self.assertTrue( + server_matches_acl_event("good.aa.com", e), + "* matches several characters, including '.'", + ) + self.assertFalse( + server_matches_acl_event("ishgood.com", e), + "pattern does not allow prefixes", + ) + class StateQueryTests(unittest.FederatingHomeserverTestCase): diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 45906ce72083..a52e89e4074b 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict + from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent from synapse.push import push_rule_evaluator @@ -66,6 +68,170 @@ def test_display_name(self): # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) + def _assert_matches( + self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + ) -> None: + evaluator = self._get_evaluator(content) + self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg) + + def _assert_not_matches( + self, condition: Dict[str, Any], content: Dict[str, Any], msg=None + ) -> None: + evaluator = self._get_evaluator(content) + self.assertFalse( + evaluator.matches(condition, "@user:test", "display_name"), msg + ) + + def test_event_match_body(self): + """Check that event_match conditions on content.body work as expected""" + + # if the key is `content.body`, the pattern matches substrings. + + # non-wildcards should match + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": "foobaz", + } + self._assert_matches( + condition, + {"body": "aaa FoobaZ zzz"}, + "patterns should match and be case-insensitive", + ) + self._assert_not_matches( + condition, + {"body": "aa xFoobaZ yy"}, + "pattern should only match at word boundaries", + ) + self._assert_not_matches( + condition, + {"body": "aa foobazx yy"}, + "pattern should only match at word boundaries", + ) + + # wildcards should match + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": "f?o*baz", + } + + self._assert_matches( + condition, + {"body": "aaa FoobarbaZ zzz"}, + "* should match string and pattern should be case-insensitive", + ) + self._assert_matches( + condition, {"body": "aa foobaz yy"}, "* should match 0 characters" + ) + self._assert_not_matches( + condition, {"body": "aa fobbaz yy"}, "? should not match 0 characters" + ) + self._assert_not_matches( + condition, {"body": "aa fiiobaz yy"}, "? should not match 2 characters" + ) + self._assert_not_matches( + condition, + {"body": "aa xfooxbaz yy"}, + "pattern should only match at word boundaries", + ) + self._assert_not_matches( + condition, + {"body": "aa fooxbazx yy"}, + "pattern should only match at word boundaries", + ) + + # test backslashes + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": r"f\oobaz", + } + self._assert_matches( + condition, + {"body": r"F\oobaz"}, + "backslash should match itself", + ) + condition = { + "kind": "event_match", + "key": "content.body", + "pattern": r"f\?obaz", + } + self._assert_matches( + condition, + {"body": r"F\oobaz"}, + r"? after \ should match any character", + ) + + def test_event_match_non_body(self): + """Check that event_match conditions on other keys work as expected""" + + # if the key is anything other than 'content.body', the pattern must match the + # whole value. + + # non-wildcards should match + condition = { + "kind": "event_match", + "key": "content.value", + "pattern": "foobaz", + } + self._assert_matches( + condition, + {"value": "FoobaZ"}, + "patterns should match and be case-insensitive", + ) + self._assert_not_matches( + condition, + {"value": "xFoobaZ"}, + "pattern should only match at the start/end of the value", + ) + self._assert_not_matches( + condition, + {"value": "FoobaZz"}, + "pattern should only match at the start/end of the value", + ) + + # wildcards should match + condition = { + "kind": "event_match", + "key": "content.value", + "pattern": "f?o*baz", + } + self._assert_matches( + condition, + {"value": "FoobarbaZ"}, + "* should match string and pattern should be case-insensitive", + ) + self._assert_matches( + condition, {"value": "foobaz"}, "* should match 0 characters" + ) + self._assert_not_matches( + condition, {"value": "fobbaz"}, "? should not match 0 characters" + ) + self._assert_not_matches( + condition, {"value": "fiiobaz"}, "? should not match 2 characters" + ) + self._assert_not_matches( + condition, + {"value": "xfooxbaz"}, + "pattern should only match at the start/end of the value", + ) + self._assert_not_matches( + condition, + {"value": "fooxbazx"}, + "pattern should only match at the start/end of the value", + ) + self._assert_not_matches( + condition, + {"value": "x\nfooxbaz"}, + "pattern should not match after a newline", + ) + self._assert_not_matches( + condition, + {"value": "fooxbaz\nx"}, + "pattern should not match before a newline", + ) + def test_no_body(self): """Not having a body shouldn't break the evaluator.""" evaluator = self._get_evaluator({}) diff --git a/tests/util/test_glob_to_regex.py b/tests/util/test_glob_to_regex.py new file mode 100644 index 000000000000..220accb92b65 --- /dev/null +++ b/tests/util/test_glob_to_regex.py @@ -0,0 +1,59 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from synapse.util import glob_to_regex + +from tests.unittest import TestCase + + +class GlobToRegexTestCase(TestCase): + def test_literal_match(self): + """patterns without wildcards should match""" + pat = glob_to_regex("foobaz") + self.assertTrue( + pat.match("FoobaZ"), "patterns should match and be case-insensitive" + ) + self.assertFalse( + pat.match("x foobaz"), "pattern should not match at word boundaries" + ) + + def test_wildcard_match(self): + pat = glob_to_regex("f?o*baz") + + self.assertTrue( + pat.match("FoobarbaZ"), + "* should match string and pattern should be case-insensitive", + ) + self.assertTrue(pat.match("foobaz"), "* should match 0 characters") + self.assertFalse(pat.match("fooxaz"), "the character after * must match") + self.assertFalse(pat.match("fobbaz"), "? should not match 0 characters") + self.assertFalse(pat.match("fiiobaz"), "? should not match 2 characters") + + def test_multi_wildcard(self): + """patterns with multiple wildcards in a row should match""" + pat = glob_to_regex("**baz") + self.assertTrue(pat.match("agsgsbaz"), "** should match any string") + self.assertTrue(pat.match("baz"), "** should match the empty string") + self.assertEqual(pat.pattern, r"\A.{0,}baz\Z") + + pat = glob_to_regex("*?baz") + self.assertTrue(pat.match("agsgsbaz"), "*? should match any string") + self.assertTrue(pat.match("abaz"), "*? should match a single char") + self.assertFalse(pat.match("baz"), "*? should not match the empty string") + self.assertEqual(pat.pattern, r"\A.{1,}baz\Z") + + pat = glob_to_regex("a?*?*?baz") + self.assertTrue(pat.match("a g baz"), "?*?*? should match 3 chars") + self.assertFalse(pat.match("a..baz"), "?*?*? should not match 2 chars") + self.assertTrue(pat.match("a.gg.baz"), "?*?*? should match 4 chars") + self.assertEqual(pat.pattern, r"\Aa.{3,}baz\Z") From 7967b36efe6a033f46cd882d0b31a8c3eb18631c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 May 2021 11:02:56 +0100 Subject: [PATCH 123/222] Fix `m.room_key_request` to-device messages (#9961) fixes #9960 --- changelog.d/9961.bugfix | 1 + synapse/api/constants.py | 5 +++- synapse/federation/federation_server.py | 19 -------------- synapse/handlers/devicemessage.py | 33 ++++++++++++++++++++----- 4 files changed, 32 insertions(+), 26 deletions(-) create mode 100644 changelog.d/9961.bugfix diff --git a/changelog.d/9961.bugfix b/changelog.d/9961.bugfix new file mode 100644 index 000000000000..e26d141a5316 --- /dev/null +++ b/changelog.d/9961.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index ab628b2be7fd..3940da5c8880 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -116,9 +116,12 @@ class EventTypes: MSC1772_SPACE_PARENT = "org.matrix.msc1772.space.parent" +class ToDeviceEventTypes: + RoomKeyRequest = "m.room_key_request" + + class EduTypes: Presence = "m.presence" - RoomKeyRequest = "m.room_key_request" class RejectedReason: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index b729a69203d0..ace30aa45078 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -44,7 +44,6 @@ SynapseError, UnsupportedRoomVersionError, ) -from synapse.api.ratelimiting import Ratelimiter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import EventBase from synapse.federation.federation_base import FederationBase, event_from_pdu_json @@ -865,14 +864,6 @@ def __init__(self, hs: "HomeServer"): # EDU received. self._edu_type_to_instance = {} # type: Dict[str, List[str]] - # A rate limiter for incoming room key requests per origin. - self._room_key_request_rate_limiter = Ratelimiter( - store=hs.get_datastore(), - clock=self.clock, - rate_hz=self.config.rc_key_requests.per_second, - burst_count=self.config.rc_key_requests.burst_count, - ) - def register_edu_handler( self, edu_type: str, handler: Callable[[str, JsonDict], Awaitable[None]] ) -> None: @@ -926,16 +917,6 @@ async def on_edu(self, edu_type: str, origin: str, content: dict) -> None: if not self.config.use_presence and edu_type == EduTypes.Presence: return - # If the incoming room key requests from a particular origin are over - # the limit, drop them. - if ( - edu_type == EduTypes.RoomKeyRequest - and not await self._room_key_request_rate_limiter.can_do_action( - None, origin - ) - ): - return - # Check if we have a handler on this instance handler = self.edu_handlers.get(edu_type) if handler: diff --git a/synapse/handlers/devicemessage.py b/synapse/handlers/devicemessage.py index c5d631de07a9..580b941595cf 100644 --- a/synapse/handlers/devicemessage.py +++ b/synapse/handlers/devicemessage.py @@ -15,7 +15,7 @@ import logging from typing import TYPE_CHECKING, Any, Dict -from synapse.api.constants import EduTypes +from synapse.api.constants import ToDeviceEventTypes from synapse.api.errors import SynapseError from synapse.api.ratelimiting import Ratelimiter from synapse.logging.context import run_in_background @@ -79,6 +79,8 @@ def __init__(self, hs: "HomeServer"): ReplicationUserDevicesResyncRestServlet.make_client(hs) ) + # a rate limiter for room key requests. The keys are + # (sending_user_id, sending_device_id). self._ratelimiter = Ratelimiter( store=self.store, clock=hs.get_clock(), @@ -100,12 +102,25 @@ async def on_direct_to_device_edu(self, origin: str, content: JsonDict) -> None: for user_id, by_device in content["messages"].items(): # we use UserID.from_string to catch invalid user ids if not self.is_mine(UserID.from_string(user_id)): - logger.warning("Request for keys for non-local user %s", user_id) + logger.warning("To-device message to non-local user %s", user_id) raise SynapseError(400, "Not a user here") if not by_device: continue + # Ratelimit key requests by the sending user. + if message_type == ToDeviceEventTypes.RoomKeyRequest: + allowed, _ = await self._ratelimiter.can_do_action( + None, (sender_user_id, None) + ) + if not allowed: + logger.info( + "Dropping room_key_request from %s to %s due to rate limit", + sender_user_id, + user_id, + ) + continue + messages_by_device = { device_id: { "content": message_content, @@ -192,13 +207,19 @@ async def send_device_message( for user_id, by_device in messages.items(): # Ratelimit local cross-user key requests by the sending device. if ( - message_type == EduTypes.RoomKeyRequest + message_type == ToDeviceEventTypes.RoomKeyRequest and user_id != sender_user_id - and await self._ratelimiter.can_do_action( + ): + allowed, _ = await self._ratelimiter.can_do_action( requester, (sender_user_id, requester.device_id) ) - ): - continue + if not allowed: + logger.info( + "Dropping room_key_request from %s to %s due to rate limit", + sender_user_id, + user_id, + ) + continue # we use UserID.from_string to catch invalid user ids if self.is_mine(UserID.from_string(user_id)): From b378d98c8f93412bc51f0d4b9c4aa2d1701cf523 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 May 2021 11:04:03 +0100 Subject: [PATCH 124/222] Add debug logging for issue #9533 (#9959) Hopefully this will help us track down where to-device messages are getting lost/delayed. --- changelog.d/9959.misc | 1 + .../federation/sender/per_destination_queue.py | 9 +++++++++ synapse/logging/__init__.py | 7 ++++++- synapse/notifier.py | 8 ++++++++ synapse/replication/tcp/client.py | 1 - synapse/storage/databases/main/deviceinbox.py | 18 ++++++++++++++++++ 6 files changed, 42 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9959.misc diff --git a/changelog.d/9959.misc b/changelog.d/9959.misc new file mode 100644 index 000000000000..7231f29d79aa --- /dev/null +++ b/changelog.d/9959.misc @@ -0,0 +1 @@ +Add debug logging for lost/delayed to-device messages. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 3b053ebcfb08..3a2efd56eea9 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -28,6 +28,7 @@ from synapse.events import EventBase from synapse.federation.units import Edu from synapse.handlers.presence import format_user_presence_state +from synapse.logging import issue9533_logger from synapse.logging.opentracing import SynapseTags, set_tag from synapse.metrics import sent_transactions_counter from synapse.metrics.background_process_metrics import run_as_background_process @@ -574,6 +575,14 @@ async def _get_to_device_message_edus(self, limit: int) -> Tuple[List[Edu], int] for content in contents ] + if edus: + issue9533_logger.debug( + "Sending %i to-device messages to %s, up to stream id %i", + len(edus), + self._destination, + stream_id, + ) + return (edus, stream_id) def _start_catching_up(self) -> None: diff --git a/synapse/logging/__init__.py b/synapse/logging/__init__.py index e00969f8b18c..b50a4f95eb3a 100644 --- a/synapse/logging/__init__.py +++ b/synapse/logging/__init__.py @@ -12,8 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -# These are imported to allow for nicer logging configuration files. +import logging + from synapse.logging._remote import RemoteHandler from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter +# These are imported to allow for nicer logging configuration files. __all__ = ["RemoteHandler", "JsonFormatter", "TerseJsonFormatter"] + +# Debug logger for https://github.com/matrix-org/synapse/issues/9533 etc +issue9533_logger = logging.getLogger("synapse.9533_debug") diff --git a/synapse/notifier.py b/synapse/notifier.py index b9531007e27c..24b4e6649f18 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -38,6 +38,7 @@ from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.handlers.presence import format_user_presence_state +from synapse.logging import issue9533_logger from synapse.logging.context import PreserveLoggingContext from synapse.logging.opentracing import log_kv, start_active_span from synapse.logging.utils import log_function @@ -426,6 +427,13 @@ def on_new_event( for room in rooms: user_streams |= self.room_to_user_streams.get(room, set()) + if stream_key == "to_device_key": + issue9533_logger.debug( + "to-device messages stream id %s, awaking streams for %s", + new_token, + users, + ) + time_now_ms = self.clock.time_msec() for user_stream in user_streams: try: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 4f3c6a18b6cf..62d780917500 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -51,7 +51,6 @@ logger = logging.getLogger(__name__) - # How long we allow callers to wait for replication updates before timing out. _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 30 diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 7c9d1f744eaf..50e7ddd7355b 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -15,6 +15,7 @@ import logging from typing import List, Optional, Tuple +from synapse.logging import issue9533_logger from synapse.logging.opentracing import log_kv, set_tag, trace from synapse.replication.tcp.streams import ToDeviceStream from synapse.storage._base import SQLBaseStore, db_to_json @@ -404,6 +405,13 @@ def add_messages_txn(txn, now_ms, stream_id): ], ) + if remote_messages_by_destination: + issue9533_logger.debug( + "Queued outgoing to-device messages with stream_id %i for %s", + stream_id, + list(remote_messages_by_destination.keys()), + ) + async with self._device_inbox_id_gen.get_next() as stream_id: now_ms = self.clock.time_msec() await self.db_pool.runInteraction( @@ -533,6 +541,16 @@ def _add_messages_to_local_device_inbox_txn( ], ) + issue9533_logger.debug( + "Stored to-device messages with stream_id %i for %s", + stream_id, + [ + (user_id, device_id) + for (user_id, messages_by_device) in local_by_user_then_device.items() + for device_id in messages_by_device.keys() + ], + ) + class DeviceInboxBackgroundUpdateStore(SQLBaseStore): DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop" From 86fb71431ca562ba536c50620d1e367a9c6d4ac9 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 May 2021 13:53:49 +0100 Subject: [PATCH 125/222] 1.33.2 --- CHANGES.md | 16 ++++++++++++++++ changelog.d/9946.misc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 23 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/9946.misc diff --git a/CHANGES.md b/CHANGES.md index a41abbefbaf7..7ae0e7b3c1fb 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +Synapse 1.33.2 (2021-05-11) +=========================== + +Due to the security issue highlighted below, server administrators are encouraged to update Synapse. We are not aware of these vulnerabilities being exploited in the wild. + +Security advisory +----------------- + +This release fixes a denial of service attack ([CVE-2021-29471](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29471)) against Synapse's push rules implementation. Server admins are encouraged to upgrade. + +Internal Changes +---------------- + +- Unpin attrs dependency. ([\#9946](https://github.com/matrix-org/synapse/issues/9946)) + + Synapse 1.33.1 (2021-05-06) =========================== diff --git a/changelog.d/9946.misc b/changelog.d/9946.misc deleted file mode 100644 index 142ec5496f76..000000000000 --- a/changelog.d/9946.misc +++ /dev/null @@ -1 +0,0 @@ -Unpin attrs dependency. diff --git a/debian/changelog b/debian/changelog index de50dd14ea80..76b82c172e97 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.33.2) stable; urgency=medium + + * New synapse release 1.33.2. + + -- Synapse Packaging team Tue, 11 May 2021 11:17:59 +0100 + matrix-synapse-py3 (1.33.1) stable; urgency=medium * New synapse release 1.33.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 441cd8b33906..ce822ccb0426 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.33.1" +__version__ = "1.33.2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From dc6366a9bd370a0f772f376a2053c0ce48cb6607 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 11 May 2021 08:03:23 -0500 Subject: [PATCH 126/222] Add config option to hide device names over federation (#9945) Now that cross signing exists there is much less of a need for other people to look at devices and verify them individually. This PR adds a config option to allow you to prevent device display names from being shared with other servers. Signed-off-by: Aaron Raimist --- changelog.d/9945.feature | 1 + docs/sample_config.yaml | 6 ++++++ synapse/config/federation.py | 10 ++++++++++ synapse/storage/databases/main/end_to_end_keys.py | 4 +++- 4 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9945.feature diff --git a/changelog.d/9945.feature b/changelog.d/9945.feature new file mode 100644 index 000000000000..84308e8cce18 --- /dev/null +++ b/changelog.d/9945.feature @@ -0,0 +1 @@ +Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f469d6e54f66..7cf222d356cf 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -741,6 +741,12 @@ acme: # #allow_profile_lookup_over_federation: false +# Uncomment to disable device display name lookup over federation. By default, the +# Federation API allows other homeservers to obtain device display names of any user +# on this homeserver. Defaults to 'true'. +# +#allow_device_name_lookup_over_federation: false + ## Caching ## diff --git a/synapse/config/federation.py b/synapse/config/federation.py index 090ba047fa23..cdd7a1ef054e 100644 --- a/synapse/config/federation.py +++ b/synapse/config/federation.py @@ -44,6 +44,10 @@ def read_config(self, config, **kwargs): "allow_profile_lookup_over_federation", True ) + self.allow_device_name_lookup_over_federation = config.get( + "allow_device_name_lookup_over_federation", True + ) + def generate_config_section(self, config_dir_path, server_name, **kwargs): return """\ ## Federation ## @@ -75,6 +79,12 @@ def generate_config_section(self, config_dir_path, server_name, **kwargs): # on this homeserver. Defaults to 'true'. # #allow_profile_lookup_over_federation: false + + # Uncomment to disable device display name lookup over federation. By default, the + # Federation API allows other homeservers to obtain device display names of any user + # on this homeserver. Defaults to 'true'. + # + #allow_device_name_lookup_over_federation: false """ diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 88afe97c41ac..398d6b6acb85 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -84,7 +84,9 @@ async def get_e2e_device_keys_for_federation_query( if keys: result["keys"] = keys - device_display_name = device.display_name + device_display_name = None + if self.hs.config.allow_device_name_lookup_over_federation: + device_display_name = device.display_name if device_display_name: result["device_display_name"] = device_display_name From d1473f7362e9b146dbd256076c8e3c7d163e7d94 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 11 May 2021 14:09:46 +0100 Subject: [PATCH 127/222] Use link to advisory rather than to the CVE repo --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 7ae0e7b3c1fb..93efa3ce5605 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ Due to the security issue highlighted below, server administrators are encourage Security advisory ----------------- -This release fixes a denial of service attack ([CVE-2021-29471](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-29471)) against Synapse's push rules implementation. Server admins are encouraged to upgrade. +This release fixes a denial of service attack ([CVE-2021-29471](https://github.com/matrix-org/synapse/security/advisories/GHSA-x345-32rc-8h85)) against Synapse's push rules implementation. Server admins are encouraged to upgrade. Internal Changes ---------------- From 28c68411028e15858819f2a5896313ce0e71c25b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 11 May 2021 10:58:58 -0400 Subject: [PATCH 128/222] Send the `m.room.create` stripped event with invites (support MSC1772). (#9966) MSC1772 specifies the m.room.create event should be sent as part of the invite_state. This was done optionally behind an experimental flag, but is now done by default due to MSC1772 being approved. --- UPGRADE.rst | 29 +++++++++++++++++++++++++++++ changelog.d/9915.feature | 2 +- changelog.d/9966.feature | 1 + docs/sample_config.yaml | 1 + synapse/config/api.py | 6 ++---- 5 files changed, 34 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9966.feature diff --git a/UPGRADE.rst b/UPGRADE.rst index e921e0c08a5f..606e357b6e39 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -85,6 +85,35 @@ for example: wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb +Upgrading to v1.34.0 +==================== + +`room_invite_state_types` configuration setting +----------------------------------------------- + +The ``room_invite_state_types`` configuration setting has been deprecated and +replaced with ``room_prejoin_state``. See the `sample configuration file `_. + +If you have set ``room_invite_state_types`` to the default value you should simply +remove it from your configuration file. The default value used to be: + +.. code:: yaml + + room_invite_state_types: + - "m.room.join_rules" + - "m.room.canonical_alias" + - "m.room.avatar" + - "m.room.encryption" + - "m.room.name" + +If you have customised this value by adding addition state types, you should +remove ``room_invite_state_types`` and configure ``additional_event_types`` with +your customisations. + +If you have customised this value by removing state types, you should rename +``room_invite_state_types`` to ``additional_event_types``, and set +``disable_default_event_types`` to ``true``. + Upgrading to v1.33.0 ==================== diff --git a/changelog.d/9915.feature b/changelog.d/9915.feature index 832916cb01af..7b81faabead1 100644 --- a/changelog.d/9915.feature +++ b/changelog.d/9915.feature @@ -1 +1 @@ -Support stable identifiers from [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772). +Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/changelog.d/9966.feature b/changelog.d/9966.feature new file mode 100644 index 000000000000..7b81faabead1 --- /dev/null +++ b/changelog.d/9966.feature @@ -0,0 +1 @@ +Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 7cf222d356cf..67ad57b1aa9a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1521,6 +1521,7 @@ room_prejoin_state: # - m.room.avatar # - m.room.encryption # - m.room.name + # - m.room.create # # Uncomment the following to disable these defaults (so that only the event # types listed in 'additional_event_types' are shared). Defaults to 'false'. diff --git a/synapse/config/api.py b/synapse/config/api.py index 55c038c0c4ee..b18044f9822a 100644 --- a/synapse/config/api.py +++ b/synapse/config/api.py @@ -88,10 +88,6 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: if not room_prejoin_state_config.get("disable_default_event_types"): yield from _DEFAULT_PREJOIN_STATE_TYPES - if self.spaces_enabled: - # MSC1772 suggests adding m.room.create to the prejoin state - yield EventTypes.Create - yield from room_prejoin_state_config.get("additional_event_types", []) @@ -109,6 +105,8 @@ def _get_prejoin_state_types(self, config: JsonDict) -> Iterable[str]: EventTypes.RoomAvatar, EventTypes.RoomEncryption, EventTypes.Name, + # Per MSC1772. + EventTypes.Create, ] From f4833e0c062e189fcfd8186fc197d1fc1052814a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 11 May 2021 12:21:43 -0400 Subject: [PATCH 129/222] Support fetching the spaces summary via GET over federation. (#9947) Per changes in MSC2946, the C-S and S-S APIs for spaces summary should use GET requests. Until this is stable, the POST endpoints still exist. This does not switch federation requests to use the GET version yet since it is newly added and already deployed servers might not support it. When switching to the stable endpoint we should switch to GET requests. --- changelog.d/9947.feature | 1 + synapse/federation/transport/client.py | 1 + synapse/federation/transport/server.py | 26 ++++++++++++++++++++++++++ synapse/rest/client/v1/room.py | 1 + 4 files changed, 29 insertions(+) create mode 100644 changelog.d/9947.feature diff --git a/changelog.d/9947.feature b/changelog.d/9947.feature new file mode 100644 index 000000000000..ce8874f81062 --- /dev/null +++ b/changelog.d/9947.feature @@ -0,0 +1 @@ +Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index ada322a81e14..497848a2b75b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -995,6 +995,7 @@ async def get_space_summary( returned per space exclude_rooms: a list of any rooms we can skip """ + # TODO When switching to the stable endpoint, use GET instead of POST. path = _create_path( FEDERATION_UNSTABLE_PREFIX, "/org.matrix.msc2946/spaces/%s", room_id ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index a3759bdda1d9..e1b746247469 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1376,6 +1376,32 @@ class FederationSpaceSummaryServlet(BaseFederationServlet): PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc2946" PATH = "/spaces/(?P[^/]*)" + async def on_GET( + self, + origin: str, + content: JsonDict, + query: Mapping[bytes, Sequence[bytes]], + room_id: str, + ) -> Tuple[int, JsonDict]: + suggested_only = parse_boolean_from_args(query, "suggested_only", default=False) + max_rooms_per_space = parse_integer_from_args(query, "max_rooms_per_space") + + exclude_rooms = [] + if b"exclude_rooms" in query: + try: + exclude_rooms = [ + room_id.decode("ascii") for room_id in query[b"exclude_rooms"] + ] + except Exception: + raise SynapseError( + 400, "Bad query parameter for exclude_rooms", Codes.INVALID_PARAM + ) + + return 200, await self.handler.federation_space_summary( + room_id, suggested_only, max_rooms_per_space, exclude_rooms + ) + + # TODO When switching to the stable endpoint, remove the POST handler. async def on_POST( self, origin: str, diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 5cab4d3c7baf..51813cccbe72 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -1020,6 +1020,7 @@ async def on_GET( max_rooms_per_space=parse_integer(request, "max_rooms_per_space"), ) + # TODO When switching to the stable endpoint, remove the POST handler. async def on_POST( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: From 27c375f812725bead5fa41a55aacf316f6e2376c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 11 May 2021 12:57:39 -0400 Subject: [PATCH 130/222] Sort child events according to MSC1772 for the spaces summary API. (#9954) This should help ensure that equivalent results are achieved between homeservers querying for the summary of a space. This implements modified MSC1772 rules, according to MSC2946. The different is that the origin_server_ts of the m.room.create event is not used as a tie-breaker since this might not be known if the homeserver is not part of the room. --- changelog.d/9954.feature | 1 + synapse/handlers/space_summary.py | 71 +++++++++++++++++++++++- tests/handlers/test_space_summary.py | 81 ++++++++++++++++++++++++++++ 3 files changed, 151 insertions(+), 2 deletions(-) create mode 100644 changelog.d/9954.feature create mode 100644 tests/handlers/test_space_summary.py diff --git a/changelog.d/9954.feature b/changelog.d/9954.feature new file mode 100644 index 000000000000..ce8874f81062 --- /dev/null +++ b/changelog.d/9954.feature @@ -0,0 +1 @@ +Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 2e997841f166..e35d91832b42 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -14,6 +14,7 @@ import itertools import logging +import re from collections import deque from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple, cast @@ -226,6 +227,23 @@ async def _summarize_local_room( suggested_only: bool, max_children: Optional[int], ) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]: + """ + Generate a room entry and a list of event entries for a given room. + + Args: + requester: The requesting user, or None if this is over federation. + room_id: The room ID to summarize. + suggested_only: True if only suggested children should be returned. + Otherwise, all children are returned. + max_children: The maximum number of children to return for this node. + + Returns: + A tuple of: + An iterable of a single value of the room. + + An iterable of the sorted children events. This may be limited + to a maximum size or may include all children. + """ if not await self._is_room_accessible(room_id, requester): return (), () @@ -357,6 +375,18 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: return room_entry async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: + """ + Get the child events for a given room. + + The returned results are sorted for stability. + + Args: + room_id: The room id to get the children of. + + Returns: + An iterable of sorted child events. + """ + # look for child rooms/spaces. current_state_ids = await self._store.get_current_state_ids(room_id) @@ -370,8 +400,9 @@ async def _get_child_events(self, room_id: str) -> Iterable[EventBase]: ] ) - # filter out any events without a "via" (which implies it has been redacted) - return (e for e in events if _has_valid_via(e)) + # filter out any events without a "via" (which implies it has been redacted), + # and order to ensure we return stable results. + return sorted(filter(_has_valid_via, events), key=_child_events_comparison_key) @attr.s(frozen=True, slots=True) @@ -397,3 +428,39 @@ def _is_suggested_child_event(edge_event: EventBase) -> bool: return True logger.debug("Ignorning not-suggested child %s", edge_event.state_key) return False + + +# Order may only contain characters in the range of \x20 (space) to \x7F (~). +_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7F]") + + +def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]: + """ + Generate a value for comparing two child events for ordering. + + The rules for ordering are supposed to be: + + 1. The 'order' key, if it is valid. + 2. The 'origin_server_ts' of the 'm.room.create' event. + 3. The 'room_id'. + + But we skip step 2 since we may not have any state from the room. + + Args: + child: The event for generating a comparison key. + + Returns: + The comparison key as a tuple of: + False if the ordering is valid. + The ordering field. + The room ID. + """ + order = child.content.get("order") + # If order is not a string or doesn't meet the requirements, ignore it. + if not isinstance(order, str): + order = None + elif len(order) > 50 or _INVALID_ORDER_CHARS_RE.search(order): + order = None + + # Items without an order come last. + return (order is None, order, child.room_id) diff --git a/tests/handlers/test_space_summary.py b/tests/handlers/test_space_summary.py new file mode 100644 index 000000000000..2c5e81531b9e --- /dev/null +++ b/tests/handlers/test_space_summary.py @@ -0,0 +1,81 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional +from unittest import mock + +from synapse.handlers.space_summary import _child_events_comparison_key + +from tests import unittest + + +def _create_event(room_id: str, order: Optional[Any] = None): + result = mock.Mock() + result.room_id = room_id + result.content = {} + if order is not None: + result.content["order"] = order + return result + + +def _order(*events): + return sorted(events, key=_child_events_comparison_key) + + +class TestSpaceSummarySort(unittest.TestCase): + def test_no_order_last(self): + """An event with no ordering is placed behind those with an ordering.""" + ev1 = _create_event("!abc:test") + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_order(self): + """The ordering should be used.""" + ev1 = _create_event("!abc:test", "xyz") + ev2 = _create_event("!xyz:test", "abc") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_order_room_id(self): + """Room ID is a tie-breaker for ordering.""" + ev1 = _create_event("!abc:test", "abc") + ev2 = _create_event("!xyz:test", "abc") + + self.assertEqual([ev1, ev2], _order(ev1, ev2)) + + def test_invalid_ordering_type(self): + """Invalid orderings are considered the same as missing.""" + ev1 = _create_event("!abc:test", 1) + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", {}) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", []) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", True) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + def test_invalid_ordering_value(self): + """Invalid orderings are considered the same as missing.""" + ev1 = _create_event("!abc:test", "foo\n") + ev2 = _create_event("!xyz:test", "xyz") + + self.assertEqual([ev2, ev1], _order(ev1, ev2)) + + ev1 = _create_event("!abc:test", "a" * 51) + self.assertEqual([ev2, ev1], _order(ev1, ev2)) From 63fb220e5f90b63acfa4bdfb61788f2c2d867c86 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 11 May 2021 18:01:11 +0100 Subject: [PATCH 131/222] Tests for to-device messages (#9965) --- changelog.d/9965.bugfix | 1 + .../rest/client/v2_alpha/test_sendtodevice.py | 201 ++++++++++++++++++ 2 files changed, 202 insertions(+) create mode 100644 changelog.d/9965.bugfix create mode 100644 tests/rest/client/v2_alpha/test_sendtodevice.py diff --git a/changelog.d/9965.bugfix b/changelog.d/9965.bugfix new file mode 100644 index 000000000000..e26d141a5316 --- /dev/null +++ b/changelog.d/9965.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/tests/rest/client/v2_alpha/test_sendtodevice.py b/tests/rest/client/v2_alpha/test_sendtodevice.py new file mode 100644 index 000000000000..c9c99cc5d7a8 --- /dev/null +++ b/tests/rest/client/v2_alpha/test_sendtodevice.py @@ -0,0 +1,201 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest import admin +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import sendtodevice, sync + +from tests.unittest import HomeserverTestCase, override_config + + +class SendToDeviceTestCase(HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + sendtodevice.register_servlets, + sync.register_servlets, + ] + + def test_user_to_user(self): + """A to-device message from one user to another should get delivered""" + + user1 = self.register_user("u1", "pass") + user1_tok = self.login("u1", "pass", "d1") + + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + # send the message + test_msg = {"foo": "bar"} + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.test/1234", + content={"messages": {user2: {"d2": test_msg}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # check it appears + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + expected_result = { + "events": [ + { + "sender": user1, + "type": "m.test", + "content": test_msg, + } + ] + } + self.assertEqual(channel.json_body["to_device"], expected_result) + + # it should re-appear if we do another sync + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body["to_device"], expected_result) + + # it should *not* appear if we do an incremental sync + sync_token = channel.json_body["next_batch"] + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body.get("to_device", {}).get("events", []), []) + + @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) + def test_local_room_key_request(self): + """m.room_key_request has special-casing; test from local user""" + user1 = self.register_user("u1", "pass") + user1_tok = self.login("u1", "pass", "d1") + + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + # send three messages + for i in range(3): + chan = self.make_request( + "PUT", + f"/_matrix/client/r0/sendToDevice/m.room_key_request/{i}", + content={"messages": {user2: {"d2": {"idx": i}}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # now sync: we should get two of the three + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 2) + for i in range(2): + self.assertEqual( + msgs[i], + {"sender": user1, "type": "m.room_key_request", "content": {"idx": i}}, + ) + sync_token = channel.json_body["next_batch"] + + # ... time passes + self.reactor.advance(1) + + # and we can send more messages + chan = self.make_request( + "PUT", + "/_matrix/client/r0/sendToDevice/m.room_key_request/3", + content={"messages": {user2: {"d2": {"idx": 3}}}}, + access_token=user1_tok, + ) + self.assertEqual(chan.code, 200, chan.result) + + # ... which should arrive + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 1) + self.assertEqual( + msgs[0], + {"sender": user1, "type": "m.room_key_request", "content": {"idx": 3}}, + ) + + @override_config({"rc_key_requests": {"per_second": 10, "burst_count": 2}}) + def test_remote_room_key_request(self): + """m.room_key_request has special-casing; test from remote user""" + user2 = self.register_user("u2", "pass") + user2_tok = self.login("u2", "pass", "d2") + + federation_registry = self.hs.get_federation_registry() + + # send three messages + for i in range(3): + self.get_success( + federation_registry.on_edu( + "m.direct_to_device", + "remote_server", + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "messages": {user2: {"d2": {"idx": i}}}, + "message_id": f"{i}", + }, + ) + ) + + # now sync: we should get two of the three + channel = self.make_request("GET", "/sync", access_token=user2_tok) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 2) + for i in range(2): + self.assertEqual( + msgs[i], + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "content": {"idx": i}, + }, + ) + sync_token = channel.json_body["next_batch"] + + # ... time passes + self.reactor.advance(1) + + # and we can send more messages + self.get_success( + federation_registry.on_edu( + "m.direct_to_device", + "remote_server", + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "messages": {user2: {"d2": {"idx": 3}}}, + "message_id": "3", + }, + ) + ) + + # ... which should arrive + channel = self.make_request( + "GET", f"/sync?since={sync_token}", access_token=user2_tok + ) + self.assertEqual(channel.code, 200, channel.result) + msgs = channel.json_body["to_device"]["events"] + self.assertEqual(len(msgs), 1) + self.assertEqual( + msgs[0], + { + "sender": "@user:remote_server", + "type": "m.room_key_request", + "content": {"idx": 3}, + }, + ) From affaffb0abc3993501ec024e00c286da85e121e9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 12 May 2021 13:17:11 +0100 Subject: [PATCH 132/222] Run cache_joined_hosts_for_event in background (#9951) --- changelog.d/9951.feature | 1 + synapse/handlers/message.py | 45 ++++++++++++++++++++++++++++++++----- 2 files changed, 41 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9951.feature diff --git a/changelog.d/9951.feature b/changelog.d/9951.feature new file mode 100644 index 000000000000..96a0e7f09fbc --- /dev/null +++ b/changelog.d/9951.feature @@ -0,0 +1 @@ +Improve performance of sending events for worker-based deployments using Redis. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5afb7fc261b5..9f365eb5ad5a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -19,6 +19,7 @@ from canonicaljson import encode_canonical_json +from twisted.internet import defer from twisted.internet.interfaces import IDelayedCall from synapse import event_auth @@ -43,14 +44,14 @@ from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext from synapse.events.validator import EventValidator -from synapse.logging.context import run_in_background +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.storage.state import StateFilter from synapse.types import Requester, RoomAlias, StreamToken, UserID, create_requester -from synapse.util import json_decoder, json_encoder -from synapse.util.async_helpers import Linearizer +from synapse.util import json_decoder, json_encoder, log_failure +from synapse.util.async_helpers import Linearizer, unwrapFirstError from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.metrics import measure_func from synapse.visibility import filter_events_for_client @@ -979,9 +980,43 @@ async def handle_new_client_event( logger.exception("Failed to encode content: %r", event.content) raise - await self.action_generator.handle_push_actions_for_event(event, context) + # We now persist the event (and update the cache in parallel, since we + # don't want to block on it). + result = await make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self._persist_event, + requester=requester, + event=event, + context=context, + ratelimit=ratelimit, + extra_users=extra_users, + ), + run_in_background( + self.cache_joined_hosts_for_event, event, context + ).addErrback(log_failure, "cache_joined_hosts_for_event failed"), + ], + consumeErrors=True, + ) + ).addErrback(unwrapFirstError) + + return result[0] + + async def _persist_event( + self, + requester: Requester, + event: EventBase, + context: EventContext, + ratelimit: bool = True, + extra_users: Optional[List[UserID]] = None, + ) -> EventBase: + """Actually persists the event. Should only be called by + `handle_new_client_event`, and see its docstring for documentation of + the arguments. + """ - await self.cache_joined_hosts_for_event(event, context) + await self.action_generator.handle_push_actions_for_event(event, context) try: # If we're a worker we need to hit out to the master. From 7562d887e159f404c8d752271310f4432f246656 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 12 May 2021 15:04:51 +0100 Subject: [PATCH 133/222] Change the format of access tokens away from macaroons (#5588) --- changelog.d/5588.misc | 1 + scripts-dev/dump_macaroon.py | 2 +- synapse/handlers/auth.py | 28 +++++++++++---- synapse/handlers/register.py | 4 +-- synapse/util/stringutils.py | 20 +++++++++++ tests/api/test_auth.py | 63 --------------------------------- tests/handlers/test_auth.py | 43 +++++++++++----------- tests/handlers/test_register.py | 12 +++---- tests/util/test_stringutils.py | 8 ++++- 9 files changed, 78 insertions(+), 103 deletions(-) create mode 100644 changelog.d/5588.misc diff --git a/changelog.d/5588.misc b/changelog.d/5588.misc new file mode 100644 index 000000000000..b8f52a212c6e --- /dev/null +++ b/changelog.d/5588.misc @@ -0,0 +1 @@ +Reduce the length of Synapse's access tokens. diff --git a/scripts-dev/dump_macaroon.py b/scripts-dev/dump_macaroon.py index 980b5e709f96..0ca75d3fe14f 100755 --- a/scripts-dev/dump_macaroon.py +++ b/scripts-dev/dump_macaroon.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python import sys diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 36f2450e2e93..8a6666a4ade6 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -17,6 +17,7 @@ import time import unicodedata import urllib.parse +from binascii import crc32 from typing import ( TYPE_CHECKING, Any, @@ -34,6 +35,7 @@ import attr import bcrypt import pymacaroons +import unpaddedbase64 from twisted.web.server import Request @@ -66,6 +68,7 @@ from synapse.util.async_helpers import maybe_awaitable from synapse.util.macaroons import get_value_from_macaroon, satisfy_expiry from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email if TYPE_CHECKING: @@ -808,10 +811,12 @@ async def get_access_token_for_user_id( logger.info( "Logging in user %s as %s%s", user_id, puppets_user_id, fmt_expiry ) + target_user_id_obj = UserID.from_string(puppets_user_id) else: logger.info( "Logging in user %s on device %s%s", user_id, device_id, fmt_expiry ) + target_user_id_obj = UserID.from_string(user_id) if ( not is_appservice_ghost @@ -819,7 +824,7 @@ async def get_access_token_for_user_id( ): await self.auth.check_auth_blocking(user_id) - access_token = self.macaroon_gen.generate_access_token(user_id) + access_token = self.generate_access_token(target_user_id_obj) await self.store.add_access_token_to_user( user_id=user_id, token=access_token, @@ -1192,6 +1197,19 @@ async def _check_local_password(self, user_id: str, password: str) -> Optional[s return None return user_id + def generate_access_token(self, for_user: UserID) -> str: + """Generates an opaque string, for use as an access token""" + + # we use the following format for access tokens: + # syt___ + + b64local = unpaddedbase64.encode_base64(for_user.localpart.encode("utf-8")) + random_string = stringutils.random_string(20) + base = f"syt_{b64local}_{random_string}" + + crc = base62_encode(crc32(base.encode("ascii")), minwidth=6) + return f"{base}_{crc}" + async def validate_short_term_login_token( self, login_token: str ) -> LoginTokenAttributes: @@ -1585,10 +1603,7 @@ class MacaroonGenerator: hs = attr.ib() - def generate_access_token( - self, user_id: str, extra_caveats: Optional[List[str]] = None - ) -> str: - extra_caveats = extra_caveats or [] + def generate_guest_access_token(self, user_id: str) -> str: macaroon = self._generate_base_macaroon(user_id) macaroon.add_first_party_caveat("type = access") # Include a nonce, to make sure that each login gets a different @@ -1596,8 +1611,7 @@ def generate_access_token( macaroon.add_first_party_caveat( "nonce = %s" % (stringutils.random_string_with_symbols(16),) ) - for caveat in extra_caveats: - macaroon.add_first_party_caveat(caveat) + macaroon.add_first_party_caveat("guest = true") return macaroon.serialize() def generate_short_term_login_token( diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 007fb1284091..4ceef3fab3d9 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -722,9 +722,7 @@ class and RegisterDeviceReplicationServlet. ) if is_guest: assert valid_until_ms is None - access_token = self.macaroon_gen.generate_access_token( - user_id, ["guest = true"] - ) + access_token = self.macaroon_gen.generate_guest_access_token(user_id) else: access_token = await self._auth_handler.get_access_token_for_user_id( user_id, diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index cd82777f80eb..4f25cd1d26ae 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -220,3 +220,23 @@ def strtobool(val: str) -> bool: return False else: raise ValueError("invalid truth value %r" % (val,)) + + +_BASE62 = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + + +def base62_encode(num: int, minwidth: int = 1) -> str: + """Encode a number using base62 + + Args: + num: number to be encoded + minwidth: width to pad to, if the number is small + """ + res = "" + while num: + num, rem = divmod(num, 62) + res = _BASE62[rem] + res + + # pad to minimum width + pad = "0" * (minwidth - len(res)) + return pad + res diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index c0ed64f78430..1b0a81575739 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -21,13 +21,11 @@ from synapse.api.errors import ( AuthError, Codes, - InvalidClientCredentialsError, InvalidClientTokenError, MissingClientTokenError, ResourceLimitError, ) from synapse.storage.databases.main.registration import TokenLookupResult -from synapse.types import UserID from tests import unittest from tests.test_utils import simple_async_mock @@ -253,67 +251,6 @@ def test_get_guest_user_from_macaroon(self): self.assertTrue(user_info.is_guest) self.store.get_user_by_id.assert_called_with(user_id) - def test_cannot_use_regular_token_as_guest(self): - USER_ID = "@percy:matrix.org" - self.store.add_access_token_to_user = simple_async_mock(None) - self.store.get_device = simple_async_mock(None) - - token = self.get_success( - self.hs.get_auth_handler().get_access_token_for_user_id( - USER_ID, "DEVICE", valid_until_ms=None - ) - ) - self.store.add_access_token_to_user.assert_called_with( - user_id=USER_ID, - token=token, - device_id="DEVICE", - valid_until_ms=None, - puppets_user_id=None, - ) - - async def get_user(tok): - if token != tok: - return None - return TokenLookupResult( - user_id=USER_ID, - is_guest=False, - token_id=1234, - device_id="DEVICE", - ) - - self.store.get_user_by_access_token = get_user - self.store.get_user_by_id = simple_async_mock({"is_guest": False}) - - # check the token works - request = Mock(args={}) - request.args[b"access_token"] = [token.encode("ascii")] - request.requestHeaders.getRawHeaders = mock_getRawHeaders() - requester = self.get_success( - self.auth.get_user_by_req(request, allow_guest=True) - ) - self.assertEqual(UserID.from_string(USER_ID), requester.user) - self.assertFalse(requester.is_guest) - - # add an is_guest caveat - mac = pymacaroons.Macaroon.deserialize(token) - mac.add_first_party_caveat("guest = true") - guest_tok = mac.serialize() - - # the token should *not* work now - request = Mock(args={}) - request.args[b"access_token"] = [guest_tok.encode("ascii")] - request.requestHeaders.getRawHeaders = mock_getRawHeaders() - - cm = self.get_failure( - self.auth.get_user_by_req(request, allow_guest=True), - InvalidClientCredentialsError, - ) - - self.assertEqual(401, cm.value.code) - self.assertEqual("Guest access token used for regular user", cm.value.msg) - - self.store.get_user_by_id.assert_called_with(USER_ID) - def test_blocking_mau(self): self.auth_blocking._limit_usage_by_mau = False self.auth_blocking._max_mau_value = 50 diff --git a/tests/handlers/test_auth.py b/tests/handlers/test_auth.py index fe7e9484fd5b..5f3350e490da 100644 --- a/tests/handlers/test_auth.py +++ b/tests/handlers/test_auth.py @@ -16,12 +16,17 @@ import pymacaroons from synapse.api.errors import AuthError, ResourceLimitError +from synapse.rest import admin from tests import unittest from tests.test_utils import make_awaitable class AuthTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + ] + def prepare(self, reactor, clock, hs): self.auth_handler = hs.get_auth_handler() self.macaroon_generator = hs.get_macaroon_generator() @@ -35,16 +40,10 @@ def prepare(self, reactor, clock, hs): self.small_number_of_users = 1 self.large_number_of_users = 100 - def test_token_is_a_macaroon(self): - token = self.macaroon_generator.generate_access_token("some_user") - # Check that we can parse the thing with pymacaroons - macaroon = pymacaroons.Macaroon.deserialize(token) - # The most basic of sanity checks - if "some_user" not in macaroon.inspect(): - self.fail("some_user was not in %s" % macaroon.inspect()) + self.user1 = self.register_user("a_user", "pass") def test_macaroon_caveats(self): - token = self.macaroon_generator.generate_access_token("a_user") + token = self.macaroon_generator.generate_guest_access_token("a_user") macaroon = pymacaroons.Macaroon.deserialize(token) def verify_gen(caveat): @@ -59,19 +58,23 @@ def verify_type(caveat): def verify_nonce(caveat): return caveat.startswith("nonce =") + def verify_guest(caveat): + return caveat == "guest = true" + v = pymacaroons.Verifier() v.satisfy_general(verify_gen) v.satisfy_general(verify_user) v.satisfy_general(verify_type) v.satisfy_general(verify_nonce) + v.satisfy_general(verify_guest) v.verify(macaroon, self.hs.config.macaroon_secret_key) def test_short_term_login_token_gives_user_id(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", "", 5000 + self.user1, "", 5000 ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) self.assertEqual("", res.auth_provider_id) # when we advance the clock, the token should be rejected @@ -83,22 +86,22 @@ def test_short_term_login_token_gives_user_id(self): def test_short_term_login_token_gives_auth_provider(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", auth_provider_id="my_idp" + self.user1, auth_provider_id="my_idp" ) res = self.get_success(self.auth_handler.validate_short_term_login_token(token)) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) self.assertEqual("my_idp", res.auth_provider_id) def test_short_term_login_token_cannot_replace_user_id(self): token = self.macaroon_generator.generate_short_term_login_token( - "a_user", "", 5000 + self.user1, "", 5000 ) macaroon = pymacaroons.Macaroon.deserialize(token) res = self.get_success( self.auth_handler.validate_short_term_login_token(macaroon.serialize()) ) - self.assertEqual("a_user", res.user_id) + self.assertEqual(self.user1, res.user_id) # add another "user_id" caveat, which might allow us to override the # user_id. @@ -114,7 +117,7 @@ def test_mau_limits_disabled(self): # Ensure does not throw exception self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) @@ -132,7 +135,7 @@ def test_mau_limits_exceeded_large(self): self.get_failure( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ), ResourceLimitError, ) @@ -160,7 +163,7 @@ def test_mau_limits_parity(self): # If not in monthly active cohort self.get_failure( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ), ResourceLimitError, ) @@ -177,7 +180,7 @@ def test_mau_limits_parity(self): ) self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) self.get_success( @@ -195,7 +198,7 @@ def test_mau_limits_not_exceeded(self): # Ensure does not raise exception self.get_success( self.auth_handler.get_access_token_for_user_id( - "user_a", device_id=None, valid_until_ms=None + self.user1, device_id=None, valid_until_ms=None ) ) @@ -210,6 +213,6 @@ def test_mau_limits_not_exceeded(self): def _get_macaroon(self): token = self.macaroon_generator.generate_short_term_login_token( - "user_a", "", 5000 + self.user1, "", 5000 ) return pymacaroons.Macaroon.deserialize(token) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 608f8f3d3323..bd4319052338 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -48,10 +48,6 @@ def prepare(self, reactor, clock, hs): self.mock_distributor = Mock() self.mock_distributor.declare("registered_user") self.mock_captcha_client = Mock() - self.macaroon_generator = Mock( - generate_access_token=Mock(return_value="secret") - ) - self.hs.get_macaroon_generator = Mock(return_value=self.macaroon_generator) self.handler = self.hs.get_registration_handler() self.store = self.hs.get_datastore() self.lots_of_users = 100 @@ -67,8 +63,8 @@ def test_user_is_created_and_logged_in_if_doesnt_exist(self): self.get_or_create_user(requester, frank.localpart, "Frankie") ) self.assertEquals(result_user_id, user_id) - self.assertTrue(result_token is not None) - self.assertEquals(result_token, "secret") + self.assertIsInstance(result_token, str) + self.assertGreater(len(result_token), 20) def test_if_user_exists(self): store = self.hs.get_datastore() @@ -500,7 +496,7 @@ def check_registration_for_spam( user_id = self.get_success(self.handler.register_user(localpart="user")) # Get an access token. - token = self.macaroon_generator.generate_access_token(user_id) + token = "testtok" self.get_success( self.store.add_access_token_to_user( user_id=user_id, token=token, device_id=None, valid_until_ms=None @@ -577,7 +573,7 @@ async def get_or_create_user( user = UserID(localpart, self.hs.hostname) user_id = user.to_string() - token = self.macaroon_generator.generate_access_token(user_id) + token = self.hs.get_auth_handler().generate_access_token(user) if need_register: await self.handler.register_with_store( diff --git a/tests/util/test_stringutils.py b/tests/util/test_stringutils.py index f7fecd9cf301..ad4dd7f0078f 100644 --- a/tests/util/test_stringutils.py +++ b/tests/util/test_stringutils.py @@ -13,7 +13,7 @@ # limitations under the License. from synapse.api.errors import SynapseError -from synapse.util.stringutils import assert_valid_client_secret +from synapse.util.stringutils import assert_valid_client_secret, base62_encode from .. import unittest @@ -45,3 +45,9 @@ def test_client_secret_regex(self): for client_secret in bad: with self.assertRaises(SynapseError): assert_valid_client_secret(client_secret) + + def test_base62_encode(self): + self.assertEqual("0", base62_encode(0)) + self.assertEqual("10", base62_encode(62)) + self.assertEqual("1c", base62_encode(100)) + self.assertEqual("001c", base62_encode(100, minwidth=4)) From a683028d81606708f686b890c0a44f5a20b54798 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 16:05:28 +0200 Subject: [PATCH 134/222] Correctly ratelimit invites when creating a room (#9968) * Correctly ratelimit invites when creating a room Also allow ratelimiting for more than one action at a time. --- changelog.d/9968.bugfix | 1 + synapse/api/ratelimiting.py | 22 +++++++++--- synapse/handlers/room.py | 27 ++++++++++---- synapse/handlers/room_member.py | 25 +++++++++++++ tests/api/test_ratelimiting.py | 57 ++++++++++++++++++++++++++++++ tests/rest/client/v1/test_rooms.py | 37 +++++++++++++++++++ 6 files changed, 157 insertions(+), 12 deletions(-) create mode 100644 changelog.d/9968.bugfix diff --git a/changelog.d/9968.bugfix b/changelog.d/9968.bugfix new file mode 100644 index 000000000000..39e75f995650 --- /dev/null +++ b/changelog.d/9968.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index 2244b8a34062..b9a10283f44a 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -57,6 +57,7 @@ async def can_do_action( rate_hz: Optional[float] = None, burst_count: Optional[int] = None, update: bool = True, + n_actions: int = 1, _time_now_s: Optional[int] = None, ) -> Tuple[bool, float]: """Can the entity (e.g. user or IP address) perform the action? @@ -76,6 +77,9 @@ async def can_do_action( burst_count: How many actions that can be performed before being limited. Overrides the value set during instantiation if set. update: Whether to count this check as performing the action + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. @@ -124,17 +128,20 @@ async def can_do_action( time_delta = time_now_s - time_start performed_count = action_count - time_delta * rate_hz if performed_count < 0: - # Allow, reset back to count 1 - allowed = True + performed_count = 0 time_start = time_now_s - action_count = 1.0 - elif performed_count > burst_count - 1.0: + + # This check would be easier read as performed_count + n_actions > burst_count, + # but performed_count might be a very precise float (with lots of numbers + # following the point) in which case Python might round it up when adding it to + # n_actions. Writing it this way ensures it doesn't happen. + if performed_count > burst_count - n_actions: # Deny, we have exceeded our burst count allowed = False else: # We haven't reached our limit yet allowed = True - action_count += 1.0 + action_count = performed_count + n_actions if update: self.actions[key] = (action_count, time_start, rate_hz) @@ -182,6 +189,7 @@ async def ratelimit( rate_hz: Optional[float] = None, burst_count: Optional[int] = None, update: bool = True, + n_actions: int = 1, _time_now_s: Optional[int] = None, ): """Checks if an action can be performed. If not, raises a LimitExceededError @@ -201,6 +209,9 @@ async def ratelimit( burst_count: How many actions that can be performed before being limited. Overrides the value set during instantiation if set. update: Whether to count this check as performing the action + n_actions: The number of times the user wants to do this action. If the user + cannot do all of the actions, the user's action count is not incremented + at all. _time_now_s: The current time. Optional, defaults to the current time according to self.clock. Only used by tests. @@ -216,6 +227,7 @@ async def ratelimit( rate_hz=rate_hz, burst_count=burst_count, update=update, + n_actions=n_actions, _time_now_s=time_now_s, ) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index fb4823a5cc6e..835d874ceedb 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -32,7 +32,14 @@ RoomCreationPreset, RoomEncryptionAlgorithms, ) -from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + LimitExceededError, + NotFoundError, + StoreError, + SynapseError, +) from synapse.api.filtering import Filter from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.events import EventBase @@ -126,10 +133,6 @@ def __init__(self, hs: "HomeServer"): self.third_party_event_rules = hs.get_third_party_event_rules() - self._invite_burst_count = ( - hs.config.ratelimiting.rc_invites_per_room.burst_count - ) - async def upgrade_room( self, requester: Requester, old_room_id: str, new_version: RoomVersion ) -> str: @@ -676,8 +679,18 @@ async def create_room( invite_3pid_list = [] invite_list = [] - if len(invite_list) + len(invite_3pid_list) > self._invite_burst_count: - raise SynapseError(400, "Cannot invite so many users at once") + if invite_list or invite_3pid_list: + try: + # If there are invites in the request, see if the ratelimiting settings + # allow that number of invites to be sent from the current user. + await self.room_member_handler.ratelimit_multiple_invites( + requester, + room_id=None, + n_invites=len(invite_list) + len(invite_3pid_list), + update=False, + ) + except LimitExceededError: + raise SynapseError(400, "Cannot invite so many users at once") await self.event_creation_handler.assert_accepted_privacy_policy(requester) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 20700fc5a8a4..9a092da71597 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -163,6 +163,31 @@ async def _user_left_room(self, target: UserID, room_id: str) -> None: async def forget(self, user: UserID, room_id: str) -> None: raise NotImplementedError() + async def ratelimit_multiple_invites( + self, + requester: Optional[Requester], + room_id: Optional[str], + n_invites: int, + update: bool = True, + ): + """Ratelimit more than one invite sent by the given requester in the given room. + + Args: + requester: The requester sending the invites. + room_id: The room the invites are being sent in. + n_invites: The amount of invites to ratelimit for. + update: Whether to update the ratelimiter's cache. + + Raises: + LimitExceededError: The requester can't send that many invites in the room. + """ + await self._invites_per_room_limiter.ratelimit( + requester, + room_id, + update=update, + n_actions=n_invites, + ) + async def ratelimit_invite( self, requester: Optional[Requester], diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index fa96ba07a590..dcf0110c16e0 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -230,3 +230,60 @@ def test_db_user_override(self): # Shouldn't raise for _ in range(20): self.get_success_or_raise(limiter.ratelimit(requester, _time_now_s=0)) + + def test_multiple_actions(self): + limiter = Ratelimiter( + store=self.hs.get_datastore(), clock=None, rate_hz=0.1, burst_count=3 + ) + # Test that 4 actions aren't allowed with a maximum burst of 3. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=4, _time_now_s=0) + ) + self.assertFalse(allowed) + + # Test that 3 actions are allowed with a maximum burst of 3. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=3, _time_now_s=0) + ) + self.assertTrue(allowed) + self.assertEquals(10.0, time_allowed) + + # Test that, after doing these 3 actions, we can't do any more action without + # waiting. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=1, _time_now_s=0) + ) + self.assertFalse(allowed) + self.assertEquals(10.0, time_allowed) + + # Test that after waiting we can do only 1 action. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action( + None, + key="test_id", + update=False, + n_actions=1, + _time_now_s=10, + ) + ) + self.assertTrue(allowed) + # The time allowed is the current time because we could still repeat the action + # once. + self.assertEquals(10.0, time_allowed) + + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=10) + ) + self.assertFalse(allowed) + # The time allowed doesn't change despite allowed being False because, while we + # don't allow 2 actions, we could still do 1. + self.assertEquals(10.0, time_allowed) + + # Test that after waiting a bit more we can do 2 actions. + allowed, time_allowed = self.get_success_or_raise( + limiter.can_do_action(None, key="test_id", n_actions=2, _time_now_s=20) + ) + self.assertTrue(allowed) + # The time allowed is the current time because we could still repeat the action + # once. + self.assertEquals(20.0, time_allowed) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index a3694f3d0200..7c4bdcdfdd45 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -463,6 +463,43 @@ def test_post_room_invitees_invalid_mxid(self): ) self.assertEquals(400, channel.code) + @unittest.override_config({"rc_invites": {"per_room": {"burst_count": 3}}}) + def test_post_room_invitees_ratelimit(self): + """Test that invites sent when creating a room are ratelimited by a RateLimiter, + which ratelimits them correctly, including by not limiting when the requester is + exempt from ratelimiting. + """ + + # Build the request's content. We use local MXIDs because invites over federation + # are more difficult to mock. + content = json.dumps( + { + "invite": [ + "@alice1:red", + "@alice2:red", + "@alice3:red", + "@alice4:red", + ] + } + ).encode("utf8") + + # Test that the invites are correctly ratelimited. + channel = self.make_request("POST", "/createRoom", content) + self.assertEqual(400, channel.code) + self.assertEqual( + "Cannot invite so many users at once", + channel.json_body["error"], + ) + + # Add the current user to the ratelimit overrides, allowing them no ratelimiting. + self.get_success( + self.hs.get_datastore().set_ratelimit_for_user(self.user_id, 0, 0) + ) + + # Test that the invites aren't ratelimited anymore. + channel = self.make_request("POST", "/createRoom", content) + self.assertEqual(200, channel.code) + class RoomTopicTestCase(RoomBase): """ Tests /rooms/$room_id/topic REST events. """ From 47806b0869da4adf84a978e4898ec1b4f5985af5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 16:59:46 +0100 Subject: [PATCH 135/222] 1.34.0rc1 --- CHANGES.md | 61 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5588.misc | 1 - changelog.d/9881.feature | 1 - changelog.d/9882.misc | 1 - changelog.d/9885.misc | 1 - changelog.d/9886.misc | 1 - changelog.d/9889.feature | 1 - changelog.d/9889.removal | 1 - changelog.d/9895.bugfix | 1 - changelog.d/9896.bugfix | 1 - changelog.d/9896.misc | 1 - changelog.d/9902.feature | 1 - changelog.d/9904.misc | 1 - changelog.d/9905.feature | 1 - changelog.d/9910.bugfix | 1 - changelog.d/9910.feature | 1 - changelog.d/9911.doc | 1 - changelog.d/9913.docker | 1 - changelog.d/9915.feature | 1 - changelog.d/9916.feature | 1 - changelog.d/9928.bugfix | 1 - changelog.d/9930.bugfix | 1 - changelog.d/9931.misc | 1 - changelog.d/9932.misc | 1 - changelog.d/9935.feature | 1 - changelog.d/9945.feature | 1 - changelog.d/9947.feature | 1 - changelog.d/9950.feature | 1 - changelog.d/9951.feature | 1 - changelog.d/9954.feature | 1 - changelog.d/9959.misc | 1 - changelog.d/9961.bugfix | 1 - changelog.d/9965.bugfix | 1 - changelog.d/9966.feature | 1 - changelog.d/9968.bugfix | 1 - synapse/__init__.py | 2 +- 36 files changed, 62 insertions(+), 35 deletions(-) delete mode 100644 changelog.d/5588.misc delete mode 100644 changelog.d/9881.feature delete mode 100644 changelog.d/9882.misc delete mode 100644 changelog.d/9885.misc delete mode 100644 changelog.d/9886.misc delete mode 100644 changelog.d/9889.feature delete mode 100644 changelog.d/9889.removal delete mode 100644 changelog.d/9895.bugfix delete mode 100644 changelog.d/9896.bugfix delete mode 100644 changelog.d/9896.misc delete mode 100644 changelog.d/9902.feature delete mode 100644 changelog.d/9904.misc delete mode 100644 changelog.d/9905.feature delete mode 100644 changelog.d/9910.bugfix delete mode 100644 changelog.d/9910.feature delete mode 100644 changelog.d/9911.doc delete mode 100644 changelog.d/9913.docker delete mode 100644 changelog.d/9915.feature delete mode 100644 changelog.d/9916.feature delete mode 100644 changelog.d/9928.bugfix delete mode 100644 changelog.d/9930.bugfix delete mode 100644 changelog.d/9931.misc delete mode 100644 changelog.d/9932.misc delete mode 100644 changelog.d/9935.feature delete mode 100644 changelog.d/9945.feature delete mode 100644 changelog.d/9947.feature delete mode 100644 changelog.d/9950.feature delete mode 100644 changelog.d/9951.feature delete mode 100644 changelog.d/9954.feature delete mode 100644 changelog.d/9959.misc delete mode 100644 changelog.d/9961.bugfix delete mode 100644 changelog.d/9965.bugfix delete mode 100644 changelog.d/9966.feature delete mode 100644 changelog.d/9968.bugfix diff --git a/CHANGES.md b/CHANGES.md index 93efa3ce5605..ddc1f13a313f 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,64 @@ +Synapse 1.34.0rc1 (2021-05-12) +============================== + +Features +-------- + +- Add experimental option to track memory usage of the caches. ([\#9881](https://github.com/matrix-org/synapse/issues/9881)) +- Add support for `DELETE /_synapse/admin/v1/rooms/`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889)) +- Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902)) +- Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951)) +- Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916)) +- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) +- Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935)) +- Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945)) +- Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954)) + + +Bugfixes +-------- + +- Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. ([\#9895](https://github.com/matrix-org/synapse/issues/9895)) +- Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. ([\#9896](https://github.com/matrix-org/synapse/issues/9896)) +- Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. ([\#9910](https://github.com/matrix-org/synapse/issues/9910)) +- Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. ([\#9928](https://github.com/matrix-org/synapse/issues/9928)) +- Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. ([\#9930](https://github.com/matrix-org/synapse/issues/9930)) +- Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. ([\#9961](https://github.com/matrix-org/synapse/issues/9961), [\#9965](https://github.com/matrix-org/synapse/issues/9965)) +- Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. ([\#9968](https://github.com/matrix-org/synapse/issues/9968)) + + +Updates to the Docker image +--------------------------- + +- Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913)) + + +Improved Documentation +---------------------- + +- Add `port` argument to the Postgres database sample config section. ([\#9911](https://github.com/matrix-org/synapse/issues/9911)) + + +Deprecations and Removals +------------------------- + +- Mark as deprecated `POST /_synapse/admin/v1/rooms//delete`. ([\#9889](https://github.com/matrix-org/synapse/issues/9889)) + + +Internal Changes +---------------- + +- Reduce the length of Synapse's access tokens. ([\#5588](https://github.com/matrix-org/synapse/issues/5588)) +- Export jemalloc stats to Prometheus if it is being used. ([\#9882](https://github.com/matrix-org/synapse/issues/9882)) +- Add type hints to presence handler. ([\#9885](https://github.com/matrix-org/synapse/issues/9885)) +- Reduce memory usage of the LRU caches. ([\#9886](https://github.com/matrix-org/synapse/issues/9886)) +- Add type hints to the `synapse.handlers` module. ([\#9896](https://github.com/matrix-org/synapse/issues/9896)) +- Time response time for external cache requests. ([\#9904](https://github.com/matrix-org/synapse/issues/9904)) +- Minor fixes to the `make_full_schema.sh` script. ([\#9931](https://github.com/matrix-org/synapse/issues/9931)) +- Move database schema files into a common directory. ([\#9932](https://github.com/matrix-org/synapse/issues/9932)) +- Add debug logging for lost/delayed to-device messages. ([\#9959](https://github.com/matrix-org/synapse/issues/9959)) + + Synapse 1.33.2 (2021-05-11) =========================== diff --git a/changelog.d/5588.misc b/changelog.d/5588.misc deleted file mode 100644 index b8f52a212c6e..000000000000 --- a/changelog.d/5588.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the length of Synapse's access tokens. diff --git a/changelog.d/9881.feature b/changelog.d/9881.feature deleted file mode 100644 index 088a517e0255..000000000000 --- a/changelog.d/9881.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental option to track memory usage of the caches. diff --git a/changelog.d/9882.misc b/changelog.d/9882.misc deleted file mode 100644 index facfa31f38ad..000000000000 --- a/changelog.d/9882.misc +++ /dev/null @@ -1 +0,0 @@ -Export jemalloc stats to Prometheus if it is being used. diff --git a/changelog.d/9885.misc b/changelog.d/9885.misc deleted file mode 100644 index 492fccea46e3..000000000000 --- a/changelog.d/9885.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to presence handler. diff --git a/changelog.d/9886.misc b/changelog.d/9886.misc deleted file mode 100644 index 8ff869e6598d..000000000000 --- a/changelog.d/9886.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory usage of the LRU caches. diff --git a/changelog.d/9889.feature b/changelog.d/9889.feature deleted file mode 100644 index 74d46f222ee1..000000000000 --- a/changelog.d/9889.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for `DELETE /_synapse/admin/v1/rooms/`. \ No newline at end of file diff --git a/changelog.d/9889.removal b/changelog.d/9889.removal deleted file mode 100644 index 398b9e129b48..000000000000 --- a/changelog.d/9889.removal +++ /dev/null @@ -1 +0,0 @@ -Mark as deprecated `POST /_synapse/admin/v1/rooms//delete`. \ No newline at end of file diff --git a/changelog.d/9895.bugfix b/changelog.d/9895.bugfix deleted file mode 100644 index 1053f975bf18..000000000000 --- a/changelog.d/9895.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.32.0 where the associated connection was improperly logged for SQL logging statements. diff --git a/changelog.d/9896.bugfix b/changelog.d/9896.bugfix deleted file mode 100644 index 07a8e87f9f98..000000000000 --- a/changelog.d/9896.bugfix +++ /dev/null @@ -1 +0,0 @@ -Correct the type hint for the `user_may_create_room_alias` method of spam checkers. It is provided a `RoomAlias`, not a `str`. diff --git a/changelog.d/9896.misc b/changelog.d/9896.misc deleted file mode 100644 index e41c7d1f02b4..000000000000 --- a/changelog.d/9896.misc +++ /dev/null @@ -1 +0,0 @@ -Add type hints to the `synapse.handlers` module. diff --git a/changelog.d/9902.feature b/changelog.d/9902.feature deleted file mode 100644 index 4d9f324d4e29..000000000000 --- a/changelog.d/9902.feature +++ /dev/null @@ -1 +0,0 @@ -Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. diff --git a/changelog.d/9904.misc b/changelog.d/9904.misc deleted file mode 100644 index 3db1e625aed6..000000000000 --- a/changelog.d/9904.misc +++ /dev/null @@ -1 +0,0 @@ -Time response time for external cache requests. diff --git a/changelog.d/9905.feature b/changelog.d/9905.feature deleted file mode 100644 index 96a0e7f09fbc..000000000000 --- a/changelog.d/9905.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending events for worker-based deployments using Redis. diff --git a/changelog.d/9910.bugfix b/changelog.d/9910.bugfix deleted file mode 100644 index 06d523fd46a8..000000000000 --- a/changelog.d/9910.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where user directory could get out of sync if room visibility and membership changed in quick succession. diff --git a/changelog.d/9910.feature b/changelog.d/9910.feature deleted file mode 100644 index 54165cce18af..000000000000 --- a/changelog.d/9910.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance after joining a large room when presence is enabled. diff --git a/changelog.d/9911.doc b/changelog.d/9911.doc deleted file mode 100644 index f7fd9f1ba9f2..000000000000 --- a/changelog.d/9911.doc +++ /dev/null @@ -1 +0,0 @@ -Add `port` argument to the Postgres database sample config section. \ No newline at end of file diff --git a/changelog.d/9913.docker b/changelog.d/9913.docker deleted file mode 100644 index 93835e14cba0..000000000000 --- a/changelog.d/9913.docker +++ /dev/null @@ -1 +0,0 @@ -Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. diff --git a/changelog.d/9915.feature b/changelog.d/9915.feature deleted file mode 100644 index 7b81faabead1..000000000000 --- a/changelog.d/9915.feature +++ /dev/null @@ -1 +0,0 @@ -Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/changelog.d/9916.feature b/changelog.d/9916.feature deleted file mode 100644 index 54165cce18af..000000000000 --- a/changelog.d/9916.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance after joining a large room when presence is enabled. diff --git a/changelog.d/9928.bugfix b/changelog.d/9928.bugfix deleted file mode 100644 index 7b74cd9fb650..000000000000 --- a/changelog.d/9928.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include the `origin_server_ts` property in the experimental [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) support to allow clients to properly sort rooms. diff --git a/changelog.d/9930.bugfix b/changelog.d/9930.bugfix deleted file mode 100644 index 9b22ed4458bb..000000000000 --- a/changelog.d/9930.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bugs introduced in v1.23.0 which made the PostgreSQL port script fail when run with a newly-created SQLite database. diff --git a/changelog.d/9931.misc b/changelog.d/9931.misc deleted file mode 100644 index 326adc7f3cde..000000000000 --- a/changelog.d/9931.misc +++ /dev/null @@ -1 +0,0 @@ -Minor fixes to the `make_full_schema.sh` script. diff --git a/changelog.d/9932.misc b/changelog.d/9932.misc deleted file mode 100644 index 9e16a3617320..000000000000 --- a/changelog.d/9932.misc +++ /dev/null @@ -1 +0,0 @@ -Move database schema files into a common directory. diff --git a/changelog.d/9935.feature b/changelog.d/9935.feature deleted file mode 100644 index eeda5bf50e23..000000000000 --- a/changelog.d/9935.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of backfilling in large rooms. diff --git a/changelog.d/9945.feature b/changelog.d/9945.feature deleted file mode 100644 index 84308e8cce18..000000000000 --- a/changelog.d/9945.feature +++ /dev/null @@ -1 +0,0 @@ -Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. diff --git a/changelog.d/9947.feature b/changelog.d/9947.feature deleted file mode 100644 index ce8874f81062..000000000000 --- a/changelog.d/9947.feature +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/changelog.d/9950.feature b/changelog.d/9950.feature deleted file mode 100644 index 96a0e7f09fbc..000000000000 --- a/changelog.d/9950.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending events for worker-based deployments using Redis. diff --git a/changelog.d/9951.feature b/changelog.d/9951.feature deleted file mode 100644 index 96a0e7f09fbc..000000000000 --- a/changelog.d/9951.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance of sending events for worker-based deployments using Redis. diff --git a/changelog.d/9954.feature b/changelog.d/9954.feature deleted file mode 100644 index ce8874f81062..000000000000 --- a/changelog.d/9954.feature +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. diff --git a/changelog.d/9959.misc b/changelog.d/9959.misc deleted file mode 100644 index 7231f29d79aa..000000000000 --- a/changelog.d/9959.misc +++ /dev/null @@ -1 +0,0 @@ -Add debug logging for lost/delayed to-device messages. diff --git a/changelog.d/9961.bugfix b/changelog.d/9961.bugfix deleted file mode 100644 index e26d141a5316..000000000000 --- a/changelog.d/9961.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/changelog.d/9965.bugfix b/changelog.d/9965.bugfix deleted file mode 100644 index e26d141a5316..000000000000 --- a/changelog.d/9965.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.29.0 which caused `m.room_key_request` to-device messages sent from one user to another to be dropped. diff --git a/changelog.d/9966.feature b/changelog.d/9966.feature deleted file mode 100644 index 7b81faabead1..000000000000 --- a/changelog.d/9966.feature +++ /dev/null @@ -1 +0,0 @@ -Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. \ No newline at end of file diff --git a/changelog.d/9968.bugfix b/changelog.d/9968.bugfix deleted file mode 100644 index 39e75f995650..000000000000 --- a/changelog.d/9968.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.27.0 preventing users and appservices exempt from ratelimiting from creating rooms with many invitees. diff --git a/synapse/__init__.py b/synapse/__init__.py index ce822ccb0426..15d54a1ceb81 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.33.2" +__version__ = "1.34.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 91143bb24ee69df71f935fc8062b11508f6c4d76 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 17:04:00 +0100 Subject: [PATCH 136/222] Refer and link to the upgrade notes rather than to the file name --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index ddc1f13a313f..e6c455033931 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features - Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902)) - Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951)) - Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916)) -- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see `UPGRADE.rst` if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) +- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/master/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) - Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935)) - Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945)) - Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954)) From 451f25172afc0ce46e416c73fa703c5edf279d54 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 May 2021 17:10:42 +0100 Subject: [PATCH 137/222] Incorporate changes from review --- CHANGES.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e6c455033931..2ceae0ac8c2b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,10 @@ Synapse 1.34.0rc1 (2021-05-12) ============================== +This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting. + +This release also deprecates the `POST /_synapse/admin/v1/rooms//delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/` route instead. + Features -------- @@ -9,7 +13,7 @@ Features - Add limits to how often Synapse will GC, ensuring that large servers do not end up GC thrashing if `gc_thresholds` has not been correctly set. ([\#9902](https://github.com/matrix-org/synapse/issues/9902)) - Improve performance of sending events for worker-based deployments using Redis. ([\#9905](https://github.com/matrix-org/synapse/issues/9905), [\#9950](https://github.com/matrix-org/synapse/issues/9950), [\#9951](https://github.com/matrix-org/synapse/issues/9951)) - Improve performance after joining a large room when presence is enabled. ([\#9910](https://github.com/matrix-org/synapse/issues/9910), [\#9916](https://github.com/matrix-org/synapse/issues/9916)) -- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/master/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) +- Support stable identifiers for [MSC1772](https://github.com/matrix-org/matrix-doc/pull/1772) Spaces. `m.space.child` events will now be taken into account when populating the experimental spaces summary response. Please see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) if you have customised `room_invite_state_types` in your configuration. ([\#9915](https://github.com/matrix-org/synapse/issues/9915), [\#9966](https://github.com/matrix-org/synapse/issues/9966)) - Improve performance of backfilling in large rooms. ([\#9935](https://github.com/matrix-org/synapse/issues/9935)) - Add a config option to allow you to prevent device display names from being shared over federation. Contributed by @aaronraimist. ([\#9945](https://github.com/matrix-org/synapse/issues/9945)) - Update support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946): Spaces Summary. ([\#9947](https://github.com/matrix-org/synapse/issues/9947), [\#9954](https://github.com/matrix-org/synapse/issues/9954)) @@ -30,7 +34,7 @@ Bugfixes Updates to the Docker image --------------------------- -- Added startup_delay to docker healthcheck to reduce waiting time for coming online, updated readme for extra options, contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913)) +- Add `startup_delay` to docker healthcheck to reduce waiting time for coming online and update the documentation with extra options. Contributed by @Maquis196. ([\#9913](https://github.com/matrix-org/synapse/issues/9913)) Improved Documentation From d19bccdbecfeee3e59666748db7fd971cd7978d2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 13 May 2021 14:37:20 -0400 Subject: [PATCH 138/222] Update SSO mapping providers documentation about unique IDs. (#9980) --- changelog.d/9980.doc | 1 + docs/sso_mapping_providers.md | 18 +++++++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) create mode 100644 changelog.d/9980.doc diff --git a/changelog.d/9980.doc b/changelog.d/9980.doc new file mode 100644 index 000000000000..d30ed0601da6 --- /dev/null +++ b/changelog.d/9980.doc @@ -0,0 +1 @@ +Clarify documentation around SSO mapping providers generating unique IDs and localparts. diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index 50020d1a4ad1..6db2dc8be5b9 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -67,8 +67,8 @@ A custom mapping provider must specify the following methods: - Arguments: - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user information from. - - This method must return a string, which is the unique identifier for the - user. Commonly the ``sub`` claim of the response. + - This method must return a string, which is the unique, immutable identifier + for the user. Commonly the `sub` claim of the response. * `map_user_attributes(self, userinfo, token, failures)` - This method must be async. - Arguments: @@ -87,7 +87,9 @@ A custom mapping provider must specify the following methods: `localpart` value, such as `john.doe1`. - Returns a dictionary with two keys: - `localpart`: A string, used to generate the Matrix ID. If this is - `None`, the user is prompted to pick their own username. + `None`, the user is prompted to pick their own username. This is only used + during a user's first login. Once a localpart has been associated with a + remote user ID (see `get_remote_user_id`) it cannot be updated. - `displayname`: An optional string, the display name for the user. * `get_extra_attributes(self, userinfo, token)` - This method must be async. @@ -153,8 +155,8 @@ A custom mapping provider must specify the following methods: information from. - `client_redirect_url` - A string, the URL that the client will be redirected to. - - This method must return a string, which is the unique identifier for the - user. Commonly the ``uid`` claim of the response. + - This method must return a string, which is the unique, immutable identifier + for the user. Commonly the `uid` claim of the response. * `saml_response_to_user_attributes(self, saml_response, failures, client_redirect_url)` - Arguments: - `saml_response` - A `saml2.response.AuthnResponse` object to extract user @@ -172,8 +174,10 @@ A custom mapping provider must specify the following methods: redirected to. - This method must return a dictionary, which will then be used by Synapse to build a new user. The following keys are allowed: - * `mxid_localpart` - The mxid localpart of the new user. If this is - `None`, the user is prompted to pick their own username. + * `mxid_localpart` - A string, the mxid localpart of the new user. If this is + `None`, the user is prompted to pick their own username. This is only used + during a user's first login. Once a localpart has been associated with a + remote user ID (see `get_remote_user_id`) it cannot be updated. * `displayname` - The displayname of the new user. If not provided, will default to the value of `mxid_localpart`. * `emails` - A list of emails for the new user. If not provided, will From 976216959b3a216a3403b953338f92852c45645c Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 14 May 2021 09:21:00 +0100 Subject: [PATCH 139/222] Update minimum supported version in postgres.md (#9988) --- changelog.d/9988.doc | 1 + docs/postgres.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9988.doc diff --git a/changelog.d/9988.doc b/changelog.d/9988.doc new file mode 100644 index 000000000000..87cda376a678 --- /dev/null +++ b/changelog.d/9988.doc @@ -0,0 +1 @@ +Fix outdated minimum PostgreSQL version in postgres.md. diff --git a/docs/postgres.md b/docs/postgres.md index 680685d04ef4..b99fad8a6e24 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -1,6 +1,6 @@ # Using Postgres -Postgres version 9.5 or later is known to work. +Synapse supports PostgreSQL versions 9.6 or later. ## Install postgres client libraries From c14f99be461d8ac9a36ad548e8e463feeda6394c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 14 May 2021 10:51:08 +0100 Subject: [PATCH 140/222] Support enabling opentracing by user (#9978) Add a config option which allows enabling opentracing by user id, eg for debugging requests made by a test user. --- changelog.d/9978.feature | 1 + docs/opentracing.md | 10 +++++----- docs/sample_config.yaml | 20 ++++++++++++++------ synapse/api/auth.py | 5 +++++ synapse/config/tracer.py | 37 +++++++++++++++++++++++++++++++------ 5 files changed, 56 insertions(+), 17 deletions(-) create mode 100644 changelog.d/9978.feature diff --git a/changelog.d/9978.feature b/changelog.d/9978.feature new file mode 100644 index 000000000000..851adb9f6ea8 --- /dev/null +++ b/changelog.d/9978.feature @@ -0,0 +1 @@ +Add a configuration option which allows enabling opentracing by user id. diff --git a/docs/opentracing.md b/docs/opentracing.md index 4c7a56a5d7f3..f91362f11221 100644 --- a/docs/opentracing.md +++ b/docs/opentracing.md @@ -42,17 +42,17 @@ To receive OpenTracing spans, start up a Jaeger server. This can be done using docker like so: ```sh -docker run -d --name jaeger +docker run -d --name jaeger \ -p 6831:6831/udp \ -p 6832:6832/udp \ -p 5778:5778 \ -p 16686:16686 \ -p 14268:14268 \ - jaegertracing/all-in-one:1.13 + jaegertracing/all-in-one:1 ``` Latest documentation is probably at - +https://www.jaegertracing.io/docs/latest/getting-started. ## Enable OpenTracing in Synapse @@ -62,7 +62,7 @@ as shown in the [sample config](./sample_config.yaml). For example: ```yaml opentracing: - tracer_enabled: true + enabled: true homeserver_whitelist: - "mytrustedhomeserver.org" - "*.myotherhomeservers.com" @@ -90,4 +90,4 @@ to two problems, namely: ## Configuring Jaeger Sampling strategies can be set as in this document: - +. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 67ad57b1aa9a..2952f2ba3201 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2845,7 +2845,8 @@ opentracing: #enabled: true # The list of homeservers we wish to send and receive span contexts and span baggage. - # See docs/opentracing.rst + # See docs/opentracing.rst. + # # This is a list of regexes which are matched against the server_name of the # homeserver. # @@ -2854,19 +2855,26 @@ opentracing: #homeserver_whitelist: # - ".*" + # A list of the matrix IDs of users whose requests will always be traced, + # even if the tracing system would otherwise drop the traces due to + # probabilistic sampling. + # + # By default, the list is empty. + # + #force_tracing_for_users: + # - "@user1:server_name" + # - "@user2:server_name" + # Jaeger can be configured to sample traces at different rates. # All configuration options provided by Jaeger can be set here. - # Jaeger's configuration mostly related to trace sampling which + # Jaeger's configuration is mostly related to trace sampling which # is documented here: - # https://www.jaegertracing.io/docs/1.13/sampling/. + # https://www.jaegertracing.io/docs/latest/sampling/. # #jaeger_config: # sampler: # type: const # param: 1 - - # Logging whether spans were started and reported - # # logging: # false diff --git a/synapse/api/auth.py b/synapse/api/auth.py index efc926d0941d..458306eba5ec 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -87,6 +87,7 @@ def __init__(self, hs: "HomeServer"): ) self._track_appservice_user_ips = hs.config.track_appservice_user_ips self._macaroon_secret_key = hs.config.macaroon_secret_key + self._force_tracing_for_users = hs.config.tracing.force_tracing_for_users async def check_from_context( self, room_version: str, event, context, do_sig_check=True @@ -208,6 +209,8 @@ async def get_user_by_req( opentracing.set_tag("authenticated_entity", user_id) opentracing.set_tag("user_id", user_id) opentracing.set_tag("appservice_id", app_service.id) + if user_id in self._force_tracing_for_users: + opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) return requester @@ -260,6 +263,8 @@ async def get_user_by_req( opentracing.set_tag("user_id", user_info.user_id) if device_id: opentracing.set_tag("device_id", device_id) + if user_info.token_owner in self._force_tracing_for_users: + opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) return requester except KeyError: diff --git a/synapse/config/tracer.py b/synapse/config/tracer.py index db22b5b19fb6..d0ea17261f87 100644 --- a/synapse/config/tracer.py +++ b/synapse/config/tracer.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Set + from synapse.python_dependencies import DependencyException, check_requirements from ._base import Config, ConfigError @@ -32,6 +34,8 @@ def read_config(self, config, **kwargs): {"sampler": {"type": "const", "param": 1}, "logging": False}, ) + self.force_tracing_for_users: Set[str] = set() + if not self.opentracer_enabled: return @@ -48,6 +52,19 @@ def read_config(self, config, **kwargs): if not isinstance(self.opentracer_whitelist, list): raise ConfigError("Tracer homeserver_whitelist config is malformed") + force_tracing_for_users = opentracing_config.get("force_tracing_for_users", []) + if not isinstance(force_tracing_for_users, list): + raise ConfigError( + "Expected a list", ("opentracing", "force_tracing_for_users") + ) + for i, u in enumerate(force_tracing_for_users): + if not isinstance(u, str): + raise ConfigError( + "Expected a string", + ("opentracing", "force_tracing_for_users", f"index {i}"), + ) + self.force_tracing_for_users.add(u) + def generate_config_section(cls, **kwargs): return """\ ## Opentracing ## @@ -64,7 +81,8 @@ def generate_config_section(cls, **kwargs): #enabled: true # The list of homeservers we wish to send and receive span contexts and span baggage. - # See docs/opentracing.rst + # See docs/opentracing.rst. + # # This is a list of regexes which are matched against the server_name of the # homeserver. # @@ -73,19 +91,26 @@ def generate_config_section(cls, **kwargs): #homeserver_whitelist: # - ".*" + # A list of the matrix IDs of users whose requests will always be traced, + # even if the tracing system would otherwise drop the traces due to + # probabilistic sampling. + # + # By default, the list is empty. + # + #force_tracing_for_users: + # - "@user1:server_name" + # - "@user2:server_name" + # Jaeger can be configured to sample traces at different rates. # All configuration options provided by Jaeger can be set here. - # Jaeger's configuration mostly related to trace sampling which + # Jaeger's configuration is mostly related to trace sampling which # is documented here: - # https://www.jaegertracing.io/docs/1.13/sampling/. + # https://www.jaegertracing.io/docs/latest/sampling/. # #jaeger_config: # sampler: # type: const # param: 1 - - # Logging whether spans were started and reported - # # logging: # false """ From 498084228b89d30462df0a5adfcc737fdc21d314 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Fri, 14 May 2021 10:58:46 +0100 Subject: [PATCH 141/222] Use Python's secrets module instead of random (#9984) Functionally identical, but more obviously cryptographically secure. ...Explicit is better than implicit? Avoids needing to know that SystemRandom() implies a CSPRNG, and complies with the big scary red box on the documentation for random: > Warning: > The pseudo-random generators of this module should not be used for > security purposes. For security or cryptographic uses, see the > secrets module. https://docs.python.org/3/library/random.html Signed-off-by: Dan Callahan --- changelog.d/9984.misc | 1 + synapse/util/stringutils.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 8 deletions(-) create mode 100644 changelog.d/9984.misc diff --git a/changelog.d/9984.misc b/changelog.d/9984.misc new file mode 100644 index 000000000000..97bd747f2653 --- /dev/null +++ b/changelog.d/9984.misc @@ -0,0 +1 @@ +Simplify a few helper functions. diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 4f25cd1d26ae..40cd51a8cac8 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. import itertools -import random import re +import secrets import string from collections.abc import Iterable from typing import Optional, Tuple @@ -35,18 +35,21 @@ # MXC_REGEX = re.compile("^mxc://([^/]+)/([^/#?]+)$") -# random_string and random_string_with_symbols are used for a range of things, -# some cryptographically important, some less so. We use SystemRandom to make sure -# we get cryptographically-secure randoms. -rand = random.SystemRandom() - def random_string(length: int) -> str: - return "".join(rand.choice(string.ascii_letters) for _ in range(length)) + """Generate a cryptographically secure string of random letters. + + Drawn from the characters: `a-z` and `A-Z` + """ + return "".join(secrets.choice(string.ascii_letters) for _ in range(length)) def random_string_with_symbols(length: int) -> str: - return "".join(rand.choice(_string_with_symbols) for _ in range(length)) + """Generate a cryptographically secure string of random letters/numbers/symbols. + + Drawn from the characters: `a-z`, `A-Z`, `0-9`, and `.,;:^&*-_+=#~@` + """ + return "".join(secrets.choice(_string_with_symbols) for _ in range(length)) def is_ascii(s: bytes) -> bool: From bd918d874f4eb459b9c2f9af6e8e994b6b19d264 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Fri, 14 May 2021 10:58:52 +0100 Subject: [PATCH 142/222] Simplify exception handling in is_ascii. (#9985) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can get away with just catching UnicodeError here. ⋮ +-- ValueError | +-- UnicodeError | +-- UnicodeDecodeError | +-- UnicodeEncodeError | +-- UnicodeTranslateError ⋮ https://docs.python.org/3/library/exceptions.html#exception-hierarchy Signed-off-by: Dan Callahan --- changelog.d/9985.misc | 1 + synapse/util/stringutils.py | 4 +--- 2 files changed, 2 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9985.misc diff --git a/changelog.d/9985.misc b/changelog.d/9985.misc new file mode 100644 index 000000000000..97bd747f2653 --- /dev/null +++ b/changelog.d/9985.misc @@ -0,0 +1 @@ +Simplify a few helper functions. diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index 40cd51a8cac8..f02943219120 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -55,9 +55,7 @@ def random_string_with_symbols(length: int) -> str: def is_ascii(s: bytes) -> bool: try: s.decode("ascii").encode("ascii") - except UnicodeDecodeError: - return False - except UnicodeEncodeError: + except UnicodeError: return False return True From ebdef256b36ec67a8aac9721eea8971dd5cf361f Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Fri, 14 May 2021 10:58:57 +0100 Subject: [PATCH 143/222] Remove superfluous call to bool() (#9986) Our strtobool already returns a bool, so no need to re-cast here Signed-off-by: Dan Callahan --- changelog.d/9986.misc | 1 + synapse/config/registration.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9986.misc diff --git a/changelog.d/9986.misc b/changelog.d/9986.misc new file mode 100644 index 000000000000..97bd747f2653 --- /dev/null +++ b/changelog.d/9986.misc @@ -0,0 +1 @@ +Simplify a few helper functions. diff --git a/synapse/config/registration.py b/synapse/config/registration.py index e6f52b4f40ea..d9dc55a0c3c5 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -349,4 +349,4 @@ def add_arguments(parser): def read_arguments(self, args): if args.enable_registration is not None: - self.enable_registration = bool(strtobool(str(args.enable_registration))) + self.enable_registration = strtobool(str(args.enable_registration)) From 52ed9655edf8849d68a178e1c76040c79824a353 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Fri, 14 May 2021 10:59:10 +0100 Subject: [PATCH 144/222] Remove unnecessary SystemRandom from SQLBaseStore (#9987) It's not obvious that instances of SQLBaseStore each need their own instances of random.SystemRandom(); let's just use random directly. Introduced by 52839886d664576831462e033b88e5aba4c019e3 Signed-off-by: Dan Callahan --- changelog.d/9987.misc | 1 + synapse/storage/_base.py | 2 -- synapse/storage/databases/main/registration.py | 3 ++- 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9987.misc diff --git a/changelog.d/9987.misc b/changelog.d/9987.misc new file mode 100644 index 000000000000..02c088e3e6c2 --- /dev/null +++ b/changelog.d/9987.misc @@ -0,0 +1 @@ +Remove unnecessary property from SQLBaseStore. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 3d98d3f5f822..0623da9aa196 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -14,7 +14,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import random from abc import ABCMeta from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union @@ -44,7 +43,6 @@ def __init__(self, database: DatabasePool, db_conn: Connection, hs: "HomeServer" self._clock = hs.get_clock() self.database_engine = database.engine self.db_pool = database - self.rand = random.SystemRandom() def process_replication_rows( self, diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 6e5ee557d2ef..e5c5cf8ff065 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import random import re from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union @@ -997,7 +998,7 @@ def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): expiration_ts = now_ms + self._account_validity_period if use_delta: - expiration_ts = self.rand.randrange( + expiration_ts = random.randrange( expiration_ts - self._account_validity_startup_job_max_delta, expiration_ts, ) From 5090f26b636bf4439575767a2272d033fb33b2d5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 14 May 2021 11:12:36 +0100 Subject: [PATCH 145/222] Minor `@cachedList` enhancements (#9975) - use a tuple rather than a list for the iterable that is passed into the wrapped function, for performance - test that we can pass an iterable and that keys are correctly deduped. --- changelog.d/9975.misc | 1 + synapse/storage/databases/main/devices.py | 2 +- .../storage/databases/main/end_to_end_keys.py | 4 ++-- .../databases/main/user_erasure_store.py | 13 +++++-------- synapse/util/caches/descriptors.py | 14 ++++++++------ tests/util/caches/test_descriptors.py | 17 ++++++++++++++--- 6 files changed, 31 insertions(+), 20 deletions(-) create mode 100644 changelog.d/9975.misc diff --git a/changelog.d/9975.misc b/changelog.d/9975.misc new file mode 100644 index 000000000000..28b1e40c2b99 --- /dev/null +++ b/changelog.d/9975.misc @@ -0,0 +1 @@ +Minor enhancements to the `@cachedList` descriptor. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index c9346de31673..a1f98b7e3854 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -665,7 +665,7 @@ async def get_device_list_last_stream_id_for_remote( cached_method_name="get_device_list_last_stream_id_for_remote", list_name="user_ids", ) - async def get_device_list_last_stream_id_for_remotes(self, user_ids: str): + async def get_device_list_last_stream_id_for_remotes(self, user_ids: Iterable[str]): rows = await self.db_pool.simple_select_many_batch( table="device_lists_remote_extremeties", column="user_id", diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 398d6b6acb85..9ba5778a8826 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -473,7 +473,7 @@ def _get_bare_e2e_cross_signing_keys(self, user_id): num_args=1, ) async def _get_bare_e2e_cross_signing_keys_bulk( - self, user_ids: List[str] + self, user_ids: Iterable[str] ) -> Dict[str, Dict[str, dict]]: """Returns the cross-signing keys for a set of users. The output of this function should be passed to _get_e2e_cross_signing_signatures_txn if @@ -497,7 +497,7 @@ async def _get_bare_e2e_cross_signing_keys_bulk( def _get_bare_e2e_cross_signing_keys_bulk_txn( self, txn: Connection, - user_ids: List[str], + user_ids: Iterable[str], ) -> Dict[str, Dict[str, dict]]: """Returns the cross-signing keys for a set of users. The output of this function should be passed to _get_e2e_cross_signing_signatures_txn if diff --git a/synapse/storage/databases/main/user_erasure_store.py b/synapse/storage/databases/main/user_erasure_store.py index acf6b2fb643d..1ecdd40c38d2 100644 --- a/synapse/storage/databases/main/user_erasure_store.py +++ b/synapse/storage/databases/main/user_erasure_store.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, Iterable + from synapse.storage._base import SQLBaseStore from synapse.util.caches.descriptors import cached, cachedList @@ -37,21 +39,16 @@ async def is_user_erased(self, user_id: str) -> bool: return bool(result) @cachedList(cached_method_name="is_user_erased", list_name="user_ids") - async def are_users_erased(self, user_ids): + async def are_users_erased(self, user_ids: Iterable[str]) -> Dict[str, bool]: """ Checks which users in a list have requested erasure Args: - user_ids (iterable[str]): full user id to check + user_ids: full user ids to check Returns: - dict[str, bool]: - for each user, whether the user has requested erasure. + for each user, whether the user has requested erasure. """ - # this serves the dual purpose of (a) making sure we can do len and - # iterate it multiple times, and (b) avoiding duplicates. - user_ids = tuple(set(user_ids)) - rows = await self.db_pool.simple_select_many_batch( table="erased_users", column="user_id", diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index ac4a078b260b..3a4d027095bb 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -322,8 +322,8 @@ def _wrapped(*args, **kwargs): class DeferredCacheListDescriptor(_CacheDescriptorBase): """Wraps an existing cache to support bulk fetching of keys. - Given a list of keys it looks in the cache to find any hits, then passes - the list of missing keys to the wrapped function. + Given an iterable of keys it looks in the cache to find any hits, then passes + the tuple of missing keys to the wrapped function. Once wrapped, the function returns a Deferred which resolves to the list of results. @@ -437,7 +437,9 @@ def errback(f): return f args_to_call = dict(arg_dict) - args_to_call[self.list_name] = list(missing) + # copy the missing set before sending it to the callee, to guard against + # modification. + args_to_call[self.list_name] = tuple(missing) cached_defers.append( defer.maybeDeferred( @@ -522,14 +524,14 @@ def cachedList( Used to do batch lookups for an already created cache. A single argument is specified as a list that is iterated through to lookup keys in the - original cache. A new list consisting of the keys that weren't in the cache - get passed to the original function, the result of which is stored in the + original cache. A new tuple consisting of the (deduplicated) keys that weren't in + the cache gets passed to the original function, the result of which is stored in the cache. Args: cached_method_name: The name of the single-item lookup method. This is only used to find the cache to use. - list_name: The name of the argument that is the list to use to + list_name: The name of the argument that is the iterable to use to do batch lookups in the cache. num_args: Number of arguments to use as the key in the cache (including list_name). Defaults to all named parameters. diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 178ac8a68cc7..bbbc276697e2 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -666,18 +666,20 @@ async def list_fn(self, args1, arg2): with LoggingContext("c1") as c1: obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} + + # start the lookup off d1 = obj.list_fn([10, 20], 2) self.assertEqual(current_context(), SENTINEL_CONTEXT) r = yield d1 self.assertEqual(current_context(), c1) - obj.mock.assert_called_once_with([10, 20], 2) + obj.mock.assert_called_once_with((10, 20), 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = {30: "peas"} r = yield obj.list_fn([20, 30], 2) - obj.mock.assert_called_once_with([30], 2) + obj.mock.assert_called_once_with((30,), 2) self.assertEqual(r, {20: "chips", 30: "peas"}) obj.mock.reset_mock() @@ -692,6 +694,15 @@ async def list_fn(self, args1, arg2): obj.mock.assert_not_called() self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"}) + # we should also be able to use a (single-use) iterable, and should + # deduplicate the keys + obj.mock.reset_mock() + obj.mock.return_value = {40: "gravy"} + iterable = (x for x in [10, 40, 40]) + r = yield obj.list_fn(iterable, 2) + obj.mock.assert_called_once_with((40,), 2) + self.assertEqual(r, {10: "fish", 40: "gravy"}) + @defer.inlineCallbacks def test_invalidate(self): """Make sure that invalidation callbacks are called.""" @@ -717,7 +728,7 @@ async def list_fn(self, args1, arg2): # cache miss obj.mock.return_value = {10: "fish", 20: "chips"} r1 = yield obj.list_fn([10, 20], 2, on_invalidate=invalidate0) - obj.mock.assert_called_once_with([10, 20], 2) + obj.mock.assert_called_once_with((10, 20), 2) self.assertEqual(r1, {10: "fish", 20: "chips"}) obj.mock.reset_mock() From 6482075c95957ad980d9c1323f9f982e6f7aaff4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 14 May 2021 11:46:35 +0100 Subject: [PATCH 146/222] Run `black` on the scripts (#9981) Turns out these scripts weren't getting linted. --- changelog.d/9981.misc | 1 + scripts-dev/build_debian_packages | 105 +++++++++++++++++++----------- scripts-dev/lint.sh | 18 ++++- scripts/export_signing_key | 13 +++- scripts/generate_config | 18 ++--- scripts/hash_password | 6 +- scripts/synapse_port_db | 46 +++++++------ tox.ini | 10 +++ 8 files changed, 141 insertions(+), 76 deletions(-) create mode 100644 changelog.d/9981.misc diff --git a/changelog.d/9981.misc b/changelog.d/9981.misc new file mode 100644 index 000000000000..677c9b4cbdc1 --- /dev/null +++ b/changelog.d/9981.misc @@ -0,0 +1 @@ +Run `black` on files in the `scripts` directory. diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index 07d018db9985..546724f89fea 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -21,18 +21,18 @@ DISTS = ( "debian:buster", "debian:bullseye", "debian:sid", - "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23) - "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) - "ubuntu:groovy", # 20.10 (EOL 2021-07-07) + "ubuntu:bionic", # 18.04 LTS (our EOL forced by Py36 on 2021-12-23) + "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) + "ubuntu:groovy", # 20.10 (EOL 2021-07-07) "ubuntu:hirsute", # 21.04 (EOL 2022-01-05) ) -DESC = '''\ +DESC = """\ Builds .debs for synapse, using a Docker image for the build environment. By default, builds for all known distributions, but a list of distributions can be passed on the commandline for debugging. -''' +""" class Builder(object): @@ -46,7 +46,7 @@ class Builder(object): """Build deb for a single distribution""" if self._failed: - print("not building %s due to earlier failure" % (dist, )) + print("not building %s due to earlier failure" % (dist,)) raise Exception("failed") try: @@ -68,48 +68,65 @@ class Builder(object): # we tend to get source packages which are full of debs. (We could hack # around that with more magic in the build_debian.sh script, but that # doesn't solve the problem for natively-run dpkg-buildpakage). - debsdir = os.path.join(projdir, '../debs') + debsdir = os.path.join(projdir, "../debs") os.makedirs(debsdir, exist_ok=True) if self.redirect_stdout: - logfile = os.path.join(debsdir, "%s.buildlog" % (tag, )) + logfile = os.path.join(debsdir, "%s.buildlog" % (tag,)) print("building %s: directing output to %s" % (dist, logfile)) stdout = open(logfile, "w") else: stdout = None # first build a docker image for the build environment - subprocess.check_call([ - "docker", "build", - "--tag", "dh-venv-builder:" + tag, - "--build-arg", "distro=" + dist, - "-f", "docker/Dockerfile-dhvirtualenv", - "docker", - ], stdout=stdout, stderr=subprocess.STDOUT) + subprocess.check_call( + [ + "docker", + "build", + "--tag", + "dh-venv-builder:" + tag, + "--build-arg", + "distro=" + dist, + "-f", + "docker/Dockerfile-dhvirtualenv", + "docker", + ], + stdout=stdout, + stderr=subprocess.STDOUT, + ) container_name = "synapse_build_" + tag with self._lock: self.active_containers.add(container_name) # then run the build itself - subprocess.check_call([ - "docker", "run", - "--rm", - "--name", container_name, - "--volume=" + projdir + ":/synapse/source:ro", - "--volume=" + debsdir + ":/debs", - "-e", "TARGET_USERID=%i" % (os.getuid(), ), - "-e", "TARGET_GROUPID=%i" % (os.getgid(), ), - "-e", "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""), - "dh-venv-builder:" + tag, - ], stdout=stdout, stderr=subprocess.STDOUT) + subprocess.check_call( + [ + "docker", + "run", + "--rm", + "--name", + container_name, + "--volume=" + projdir + ":/synapse/source:ro", + "--volume=" + debsdir + ":/debs", + "-e", + "TARGET_USERID=%i" % (os.getuid(),), + "-e", + "TARGET_GROUPID=%i" % (os.getgid(),), + "-e", + "DEB_BUILD_OPTIONS=%s" % ("nocheck" if skip_tests else ""), + "dh-venv-builder:" + tag, + ], + stdout=stdout, + stderr=subprocess.STDOUT, + ) with self._lock: self.active_containers.remove(container_name) if stdout is not None: stdout.close() - print("Completed build of %s" % (dist, )) + print("Completed build of %s" % (dist,)) def kill_containers(self): with self._lock: @@ -117,9 +134,14 @@ class Builder(object): for c in active: print("killing container %s" % (c,)) - subprocess.run([ - "docker", "kill", c, - ], stdout=subprocess.DEVNULL) + subprocess.run( + [ + "docker", + "kill", + c, + ], + stdout=subprocess.DEVNULL, + ) with self._lock: self.active_containers.remove(c) @@ -130,31 +152,38 @@ def run_builds(dists, jobs=1, skip_tests=False): def sig(signum, _frame): print("Caught SIGINT") builder.kill_containers() + signal.signal(signal.SIGINT, sig) with ThreadPoolExecutor(max_workers=jobs) as e: res = e.map(lambda dist: builder.run_build(dist, skip_tests), dists) # make sure we consume the iterable so that exceptions are raised. - for r in res: + for _ in res: pass -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser( description=DESC, ) parser.add_argument( - '-j', '--jobs', type=int, default=1, - help='specify the number of builds to run in parallel', + "-j", + "--jobs", + type=int, + default=1, + help="specify the number of builds to run in parallel", ) parser.add_argument( - '--no-check', action='store_true', - help='skip running tests after building', + "--no-check", + action="store_true", + help="skip running tests after building", ) parser.add_argument( - 'dist', nargs='*', default=DISTS, - help='a list of distributions to build for. Default: %(default)s', + "dist", + nargs="*", + default=DISTS, + help="a list of distributions to build for. Default: %(default)s", ) args = parser.parse_args() run_builds(dists=args.dist, jobs=args.jobs, skip_tests=args.no_check) diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 9761e975943c..869eb2372d51 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -80,8 +80,22 @@ else # then lint everything! if [[ -z ${files+x} ]]; then # Lint all source code files and directories - # Note: this list aims the mirror the one in tox.ini - files=("synapse" "docker" "tests" "scripts-dev" "scripts" "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite") + # Note: this list aims to mirror the one in tox.ini + files=( + "synapse" "docker" "tests" + # annoyingly, black doesn't find these so we have to list them + "scripts/export_signing_key" + "scripts/generate_config" + "scripts/generate_log_config" + "scripts/hash_password" + "scripts/register_new_matrix_user" + "scripts/synapse_port_db" + "scripts-dev" + "scripts-dev/build_debian_packages" + "scripts-dev/sign_json" + "scripts-dev/update_database" + "contrib" "synctl" "setup.py" "synmark" "stubs" ".buildkite" + ) fi fi diff --git a/scripts/export_signing_key b/scripts/export_signing_key index 0ed167ea855c..bf0139bd64b8 100755 --- a/scripts/export_signing_key +++ b/scripts/export_signing_key @@ -30,7 +30,11 @@ def exit(status: int = 0, message: Optional[str] = None): def format_plain(public_key: nacl.signing.VerifyKey): print( "%s:%s %s" - % (public_key.alg, public_key.version, encode_verify_key_base64(public_key),) + % ( + public_key.alg, + public_key.version, + encode_verify_key_base64(public_key), + ) ) @@ -50,7 +54,10 @@ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( - "key_file", nargs="+", type=argparse.FileType("r"), help="The key file to read", + "key_file", + nargs="+", + type=argparse.FileType("r"), + help="The key file to read", ) parser.add_argument( @@ -63,7 +70,7 @@ if __name__ == "__main__": parser.add_argument( "--expiry-ts", type=int, - default=int(time.time() * 1000) + 6*3600000, + default=int(time.time() * 1000) + 6 * 3600000, help=( "The expiry time to use for -x, in milliseconds since 1970. The default " "is (now+6h)." diff --git a/scripts/generate_config b/scripts/generate_config index 771cbf8d95ad..931b40c045f3 100755 --- a/scripts/generate_config +++ b/scripts/generate_config @@ -11,23 +11,22 @@ if __name__ == "__main__": parser.add_argument( "--config-dir", default="CONFDIR", - help="The path where the config files are kept. Used to create filenames for " - "things like the log config and the signing key. Default: %(default)s", + "things like the log config and the signing key. Default: %(default)s", ) parser.add_argument( "--data-dir", default="DATADIR", help="The path where the data files are kept. Used to create filenames for " - "things like the database and media store. Default: %(default)s", + "things like the database and media store. Default: %(default)s", ) parser.add_argument( "--server-name", default="SERVERNAME", help="The server name. Used to initialise the server_name config param, but also " - "used in the names of some of the config files. Default: %(default)s", + "used in the names of some of the config files. Default: %(default)s", ) parser.add_argument( @@ -41,21 +40,22 @@ if __name__ == "__main__": "--generate-secrets", action="store_true", help="Enable generation of new secrets for things like the macaroon_secret_key." - "By default, these parameters will be left unset." + "By default, these parameters will be left unset.", ) parser.add_argument( - "-o", "--output-file", - type=argparse.FileType('w'), + "-o", + "--output-file", + type=argparse.FileType("w"), default=sys.stdout, help="File to write the configuration to. Default: stdout", ) parser.add_argument( "--header-file", - type=argparse.FileType('r'), + type=argparse.FileType("r"), help="File from which to read a header, which will be printed before the " - "generated config.", + "generated config.", ) args = parser.parse_args() diff --git a/scripts/hash_password b/scripts/hash_password index a30767f758a1..1d6fb0d70022 100755 --- a/scripts/hash_password +++ b/scripts/hash_password @@ -41,7 +41,7 @@ if __name__ == "__main__": parser.add_argument( "-c", "--config", - type=argparse.FileType('r'), + type=argparse.FileType("r"), help=( "Path to server config file. " "Used to read in bcrypt_rounds and password_pepper." @@ -72,8 +72,8 @@ if __name__ == "__main__": pw = unicodedata.normalize("NFKC", password) hashed = bcrypt.hashpw( - pw.encode('utf8') + password_pepper.encode("utf8"), + pw.encode("utf8") + password_pepper.encode("utf8"), bcrypt.gensalt(bcrypt_rounds), - ).decode('ascii') + ).decode("ascii") print(hashed) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 5fb5bb35f77d..7c7645c05a63 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -294,8 +294,7 @@ class Porter(object): return table, already_ported, total_to_port, forward_chunk, backward_chunk async def get_table_constraints(self) -> Dict[str, Set[str]]: - """Returns a map of tables that have foreign key constraints to tables they depend on. - """ + """Returns a map of tables that have foreign key constraints to tables they depend on.""" def _get_constraints(txn): # We can pull the information about foreign key constraints out from @@ -504,7 +503,9 @@ class Porter(object): return def build_db_store( - self, db_config: DatabaseConnectionConfig, allow_outdated_version: bool = False, + self, + db_config: DatabaseConnectionConfig, + allow_outdated_version: bool = False, ): """Builds and returns a database store using the provided configuration. @@ -740,7 +741,7 @@ class Porter(object): return col outrows = [] - for i, row in enumerate(rows): + for row in rows: try: outrows.append( tuple(conv(j, col) for j, col in enumerate(row) if j > 0) @@ -890,8 +891,7 @@ class Porter(object): await self.postgres_store.db_pool.runInteraction("setup_user_id_seq", r) async def _setup_events_stream_seqs(self) -> None: - """Set the event stream sequences to the correct values. - """ + """Set the event stream sequences to the correct values.""" # We get called before we've ported the events table, so we need to # fetch the current positions from the SQLite store. @@ -920,12 +920,14 @@ class Porter(object): ) await self.postgres_store.db_pool.runInteraction( - "_setup_events_stream_seqs", _setup_events_stream_seqs_set_pos, + "_setup_events_stream_seqs", + _setup_events_stream_seqs_set_pos, ) - async def _setup_sequence(self, sequence_name: str, stream_id_tables: Iterable[str]) -> None: - """Set a sequence to the correct value. - """ + async def _setup_sequence( + self, sequence_name: str, stream_id_tables: Iterable[str] + ) -> None: + """Set a sequence to the correct value.""" current_stream_ids = [] for stream_id_table in stream_id_tables: max_stream_id = await self.sqlite_store.db_pool.simple_select_one_onecol( @@ -939,14 +941,19 @@ class Porter(object): next_id = max(current_stream_ids) + 1 def r(txn): - sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name, ) - txn.execute(sql + " %s", (next_id, )) + sql = "ALTER SEQUENCE %s RESTART WITH" % (sequence_name,) + txn.execute(sql + " %s", (next_id,)) - await self.postgres_store.db_pool.runInteraction("_setup_%s" % (sequence_name,), r) + await self.postgres_store.db_pool.runInteraction( + "_setup_%s" % (sequence_name,), r + ) async def _setup_auth_chain_sequence(self) -> None: curr_chain_id = await self.sqlite_store.db_pool.simple_select_one_onecol( - table="event_auth_chains", keyvalues={}, retcol="MAX(chain_id)", allow_none=True + table="event_auth_chains", + keyvalues={}, + retcol="MAX(chain_id)", + allow_none=True, ) def r(txn): @@ -968,8 +975,7 @@ class Porter(object): class Progress(object): - """Used to report progress of the port - """ + """Used to report progress of the port""" def __init__(self): self.tables = {} @@ -994,8 +1000,7 @@ class Progress(object): class CursesProgress(Progress): - """Reports progress to a curses window - """ + """Reports progress to a curses window""" def __init__(self, stdscr): self.stdscr = stdscr @@ -1020,7 +1025,7 @@ class CursesProgress(Progress): self.total_processed = 0 self.total_remaining = 0 - for table, data in self.tables.items(): + for data in self.tables.values(): self.total_processed += data["num_done"] - data["start"] self.total_remaining += data["total"] - data["num_done"] @@ -1111,8 +1116,7 @@ class CursesProgress(Progress): class TerminalProgress(Progress): - """Just prints progress to the terminal - """ + """Just prints progress to the terminal""" def update(self, table, num_done): super(TerminalProgress, self).update(table, num_done) diff --git a/tox.ini b/tox.ini index ecd609271d1d..da77d124fc0e 100644 --- a/tox.ini +++ b/tox.ini @@ -34,7 +34,17 @@ lint_targets = synapse tests scripts + # annoyingly, black doesn't find these so we have to list them + scripts/export_signing_key + scripts/generate_config + scripts/generate_log_config + scripts/hash_password + scripts/register_new_matrix_user + scripts/synapse_port_db scripts-dev + scripts-dev/build_debian_packages + scripts-dev/sign_json + scripts-dev/update_database stubs contrib synctl From 66609122260ad151359b9c0028634094cf51b5c5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 14 May 2021 13:14:48 +0100 Subject: [PATCH 147/222] Update postgres docs (#9989) --- changelog.d/9988.doc | 2 +- changelog.d/9989.doc | 1 + docs/postgres.md | 198 +++++++++++++++++++++---------------------- 3 files changed, 98 insertions(+), 103 deletions(-) create mode 100644 changelog.d/9989.doc diff --git a/changelog.d/9988.doc b/changelog.d/9988.doc index 87cda376a678..25338c44c312 100644 --- a/changelog.d/9988.doc +++ b/changelog.d/9988.doc @@ -1 +1 @@ -Fix outdated minimum PostgreSQL version in postgres.md. +Updates to the PostgreSQL documentation (`postgres.md`). diff --git a/changelog.d/9989.doc b/changelog.d/9989.doc new file mode 100644 index 000000000000..25338c44c312 --- /dev/null +++ b/changelog.d/9989.doc @@ -0,0 +1 @@ +Updates to the PostgreSQL documentation (`postgres.md`). diff --git a/docs/postgres.md b/docs/postgres.md index b99fad8a6e24..f83155e52a30 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -33,28 +33,15 @@ Assuming your PostgreSQL database user is called `postgres`, first authenticate # Or, if your system uses sudo to get administrative rights sudo -u postgres bash -Then, create a user ``synapse_user`` with: +Then, create a postgres user and a database with: + # this will prompt for a password for the new user createuser --pwprompt synapse_user -Before you can authenticate with the `synapse_user`, you must create a -database that it can access. To create a database, first connect to the -database with your database user: + createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse - su - postgres # Or: sudo -u postgres bash - psql - -and then run: - - CREATE DATABASE synapse - ENCODING 'UTF8' - LC_COLLATE='C' - LC_CTYPE='C' - template=template0 - OWNER synapse_user; - -This would create an appropriate database named `synapse` owned by the -`synapse_user` user (which must already have been created as above). +The above will create a user called `synapse_user`, and a database called +`synapse`. Note that the PostgreSQL database *must* have the correct encoding set (as shown above), otherwise it will not be able to store UTF8 strings. @@ -63,79 +50,6 @@ You may need to enable password authentication so `synapse_user` can connect to the database. See . -If you get an error along the lines of `FATAL: Ident authentication failed for -user "synapse_user"`, you may need to use an authentication method other than -`ident`: - -* If the `synapse_user` user has a password, add the password to the `database:` - section of `homeserver.yaml`. Then add the following to `pg_hba.conf`: - - ``` - host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that - ``` - -* If the `synapse_user` user does not have a password, then a password doesn't - have to be added to `homeserver.yaml`. But the following does need to be added - to `pg_hba.conf`: - - ``` - host synapse synapse_user ::1/128 trust - ``` - -Note that line order matters in `pg_hba.conf`, so make sure that if you do add a -new line, it is inserted before: - -``` -host all all ::1/128 ident -``` - -### Fixing incorrect `COLLATE` or `CTYPE` - -Synapse will refuse to set up a new database if it has the wrong values of -`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using -different locales can cause issues if the locale library is updated from -underneath the database, or if a different version of the locale is used on any -replicas. - -The safest way to fix the issue is to take a dump and recreate the database with -the correct `COLLATE` and `CTYPE` parameters (as shown above). It is also possible to change the -parameters on a live database and run a `REINDEX` on the entire database, -however extreme care must be taken to avoid database corruption. - -Note that the above may fail with an error about duplicate rows if corruption -has already occurred, and such duplicate rows will need to be manually removed. - - -## Fixing inconsistent sequences error - -Synapse uses Postgres sequences to generate IDs for various tables. A sequence -and associated table can get out of sync if, for example, Synapse has been -downgraded and then upgraded again. - -To fix the issue shut down Synapse (including any and all workers) and run the -SQL command included in the error message. Once done Synapse should start -successfully. - - -## Tuning Postgres - -The default settings should be fine for most deployments. For larger -scale deployments tuning some of the settings is recommended, details of -which can be found at -. - -In particular, we've found tuning the following values helpful for -performance: - -- `shared_buffers` -- `effective_cache_size` -- `work_mem` -- `maintenance_work_mem` -- `autovacuum_work_mem` - -Note that the appropriate values for those fields depend on the amount -of free memory the database host has available. - ## Synapse config When you are ready to start using PostgreSQL, edit the `database` @@ -165,18 +79,42 @@ may block for an extended period while it waits for a response from the database server. Example values might be: ```yaml -# seconds of inactivity after which TCP should send a keepalive message to the server -keepalives_idle: 10 +database: + args: + # ... as above + + # seconds of inactivity after which TCP should send a keepalive message to the server + keepalives_idle: 10 -# the number of seconds after which a TCP keepalive message that is not -# acknowledged by the server should be retransmitted -keepalives_interval: 10 + # the number of seconds after which a TCP keepalive message that is not + # acknowledged by the server should be retransmitted + keepalives_interval: 10 -# the number of TCP keepalives that can be lost before the client's connection -# to the server is considered dead -keepalives_count: 3 + # the number of TCP keepalives that can be lost before the client's connection + # to the server is considered dead + keepalives_count: 3 ``` +## Tuning Postgres + +The default settings should be fine for most deployments. For larger +scale deployments tuning some of the settings is recommended, details of +which can be found at +. + +In particular, we've found tuning the following values helpful for +performance: + +- `shared_buffers` +- `effective_cache_size` +- `work_mem` +- `maintenance_work_mem` +- `autovacuum_work_mem` + +Note that the appropriate values for those fields depend on the amount +of free memory the database host has available. + + ## Porting from SQLite ### Overview @@ -185,9 +123,8 @@ The script `synapse_port_db` allows porting an existing synapse server backed by SQLite to using PostgreSQL. This is done in as a two phase process: -1. Copy the existing SQLite database to a separate location (while the - server is down) and running the port script against that offline - database. +1. Copy the existing SQLite database to a separate location and run + the port script against that offline database. 2. Shut down the server. Rerun the port script to port any data that has come in since taking the first snapshot. Restart server against the PostgreSQL database. @@ -245,3 +182,60 @@ PostgreSQL database configuration file `homeserver-postgres.yaml`: ./synctl start Synapse should now be running against PostgreSQL. + + +## Troubleshooting + +### Alternative auth methods + +If you get an error along the lines of `FATAL: Ident authentication failed for +user "synapse_user"`, you may need to use an authentication method other than +`ident`: + +* If the `synapse_user` user has a password, add the password to the `database:` + section of `homeserver.yaml`. Then add the following to `pg_hba.conf`: + + ``` + host synapse synapse_user ::1/128 md5 # or `scram-sha-256` instead of `md5` if you use that + ``` + +* If the `synapse_user` user does not have a password, then a password doesn't + have to be added to `homeserver.yaml`. But the following does need to be added + to `pg_hba.conf`: + + ``` + host synapse synapse_user ::1/128 trust + ``` + +Note that line order matters in `pg_hba.conf`, so make sure that if you do add a +new line, it is inserted before: + +``` +host all all ::1/128 ident +``` + +### Fixing incorrect `COLLATE` or `CTYPE` + +Synapse will refuse to set up a new database if it has the wrong values of +`COLLATE` and `CTYPE` set, and will log warnings on existing databases. Using +different locales can cause issues if the locale library is updated from +underneath the database, or if a different version of the locale is used on any +replicas. + +The safest way to fix the issue is to dump the database and recreate it with +the correct locale parameter (as shown above). It is also possible to change the +parameters on a live database and run a `REINDEX` on the entire database, +however extreme care must be taken to avoid database corruption. + +Note that the above may fail with an error about duplicate rows if corruption +has already occurred, and such duplicate rows will need to be manually removed. + +### Fixing inconsistent sequences error + +Synapse uses Postgres sequences to generate IDs for various tables. A sequence +and associated table can get out of sync if, for example, Synapse has been +downgraded and then upgraded again. + +To fix the issue shut down Synapse (including any and all workers) and run the +SQL command included in the error message. Once done Synapse should start +successfully. From 41ac128fd39b30fe33b6c871a8317ba833eb4ef7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 17 May 2021 12:33:38 +0200 Subject: [PATCH 148/222] Split multiplart email sending into a dedicated handler (#9977) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/9977.misc | 1 + synapse/handlers/account_validity.py | 55 +++------------- synapse/handlers/send_email.py | 98 ++++++++++++++++++++++++++++ synapse/push/mailer.py | 53 +++------------ synapse/server.py | 5 ++ 5 files changed, 122 insertions(+), 90 deletions(-) create mode 100644 changelog.d/9977.misc create mode 100644 synapse/handlers/send_email.py diff --git a/changelog.d/9977.misc b/changelog.d/9977.misc new file mode 100644 index 000000000000..093dffc6beac --- /dev/null +++ b/changelog.d/9977.misc @@ -0,0 +1 @@ +Split multipart email sending into a dedicated handler. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 5b927f10b3a9..d752cf34f0cc 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -15,12 +15,9 @@ import email.mime.multipart import email.utils import logging -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.errors import StoreError, SynapseError -from synapse.logging.context import make_deferred_yieldable from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.types import UserID from synapse.util import stringutils @@ -36,9 +33,11 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.config = hs.config self.store = self.hs.get_datastore() - self.sendmail = self.hs.get_sendmail() + self.send_email_handler = self.hs.get_send_email_handler() self.clock = self.hs.get_clock() + self._app_name = self.hs.config.email_app_name + self._account_validity_enabled = ( hs.config.account_validity.account_validity_enabled ) @@ -63,23 +62,10 @@ def __init__(self, hs: "HomeServer"): self._template_text = ( hs.config.account_validity.account_validity_template_text ) - account_validity_renew_email_subject = ( + self._renew_email_subject = ( hs.config.account_validity.account_validity_renew_email_subject ) - try: - app_name = hs.config.email_app_name - - self._subject = account_validity_renew_email_subject % {"app": app_name} - - self._from_string = hs.config.email_notif_from % {"app": app_name} - except Exception: - # If substitution failed, fall back to the bare strings. - self._subject = account_validity_renew_email_subject - self._from_string = hs.config.email_notif_from - - self._raw_from = email.utils.parseaddr(self._from_string)[1] - # Check the renewal emails to send and send them every 30min. if hs.config.run_background_tasks: self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) @@ -159,38 +145,17 @@ async def _send_renewal_email(self, user_id: str, expiration_ts: int) -> None: } html_text = self._template_html.render(**template_vars) - html_part = MIMEText(html_text, "html", "utf8") - plain_text = self._template_text.render(**template_vars) - text_part = MIMEText(plain_text, "plain", "utf8") for address in addresses: raw_to = email.utils.parseaddr(address)[1] - multipart_msg = MIMEMultipart("alternative") - multipart_msg["Subject"] = self._subject - multipart_msg["From"] = self._from_string - multipart_msg["To"] = address - multipart_msg["Date"] = email.utils.formatdate() - multipart_msg["Message-ID"] = email.utils.make_msgid() - multipart_msg.attach(text_part) - multipart_msg.attach(html_part) - - logger.info("Sending renewal email to %s", address) - - await make_deferred_yieldable( - self.sendmail( - self.hs.config.email_smtp_host, - self._raw_from, - raw_to, - multipart_msg.as_string().encode("utf8"), - reactor=self.hs.get_reactor(), - port=self.hs.config.email_smtp_port, - requireAuthentication=self.hs.config.email_smtp_user is not None, - username=self.hs.config.email_smtp_user, - password=self.hs.config.email_smtp_pass, - requireTransportSecurity=self.hs.config.require_transport_security, - ) + await self.send_email_handler.send_email( + email_address=raw_to, + subject=self._renew_email_subject, + app_name=self._app_name, + html=html_text, + text=plain_text, ) await self.store.set_renewal_mail_status(user_id=user_id, email_sent=True) diff --git a/synapse/handlers/send_email.py b/synapse/handlers/send_email.py new file mode 100644 index 000000000000..e9f6aef06f01 --- /dev/null +++ b/synapse/handlers/send_email.py @@ -0,0 +1,98 @@ +# Copyright 2021 The Matrix.org C.I.C. Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import email.utils +import logging +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText +from typing import TYPE_CHECKING + +from synapse.logging.context import make_deferred_yieldable + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class SendEmailHandler: + def __init__(self, hs: "HomeServer"): + self.hs = hs + + self._sendmail = hs.get_sendmail() + self._reactor = hs.get_reactor() + + self._from = hs.config.email.email_notif_from + self._smtp_host = hs.config.email.email_smtp_host + self._smtp_port = hs.config.email.email_smtp_port + self._smtp_user = hs.config.email.email_smtp_user + self._smtp_pass = hs.config.email.email_smtp_pass + self._require_transport_security = hs.config.email.require_transport_security + + async def send_email( + self, + email_address: str, + subject: str, + app_name: str, + html: str, + text: str, + ) -> None: + """Send a multipart email with the given information. + + Args: + email_address: The address to send the email to. + subject: The email's subject. + app_name: The app name to include in the From header. + html: The HTML content to include in the email. + text: The plain text content to include in the email. + """ + try: + from_string = self._from % {"app": app_name} + except (KeyError, TypeError): + from_string = self._from + + raw_from = email.utils.parseaddr(from_string)[1] + raw_to = email.utils.parseaddr(email_address)[1] + + if raw_to == "": + raise RuntimeError("Invalid 'to' address") + + html_part = MIMEText(html, "html", "utf8") + text_part = MIMEText(text, "plain", "utf8") + + multipart_msg = MIMEMultipart("alternative") + multipart_msg["Subject"] = subject + multipart_msg["From"] = from_string + multipart_msg["To"] = email_address + multipart_msg["Date"] = email.utils.formatdate() + multipart_msg["Message-ID"] = email.utils.make_msgid() + multipart_msg.attach(text_part) + multipart_msg.attach(html_part) + + logger.info("Sending email to %s" % email_address) + + await make_deferred_yieldable( + self._sendmail( + self._smtp_host, + raw_from, + raw_to, + multipart_msg.as_string().encode("utf8"), + reactor=self._reactor, + port=self._smtp_port, + requireAuthentication=self._smtp_user is not None, + username=self._smtp_user, + password=self._smtp_pass, + requireTransportSecurity=self._require_transport_security, + ) + ) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index c4b43b0d3fc8..5f9ea5003a82 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -12,12 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import email.mime.multipart -import email.utils import logging import urllib.parse -from email.mime.multipart import MIMEMultipart -from email.mime.text import MIMEText from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, TypeVar import bleach @@ -27,7 +23,6 @@ from synapse.api.errors import StoreError from synapse.config.emailconfig import EmailSubjectConfig from synapse.events import EventBase -from synapse.logging.context import make_deferred_yieldable from synapse.push.presentable_names import ( calculate_room_name, descriptor_from_member_events, @@ -108,7 +103,7 @@ def __init__( self.template_html = template_html self.template_text = template_text - self.sendmail = self.hs.get_sendmail() + self.send_email_handler = hs.get_send_email_handler() self.store = self.hs.get_datastore() self.state_store = self.hs.get_storage().state self.macaroon_gen = self.hs.get_macaroon_generator() @@ -310,17 +305,6 @@ async def send_email( self, email_address: str, subject: str, extra_template_vars: Dict[str, Any] ) -> None: """Send an email with the given information and template text""" - try: - from_string = self.hs.config.email_notif_from % {"app": self.app_name} - except TypeError: - from_string = self.hs.config.email_notif_from - - raw_from = email.utils.parseaddr(from_string)[1] - raw_to = email.utils.parseaddr(email_address)[1] - - if raw_to == "": - raise RuntimeError("Invalid 'to' address") - template_vars = { "app_name": self.app_name, "server_name": self.hs.config.server.server_name, @@ -329,35 +313,14 @@ async def send_email( template_vars.update(extra_template_vars) html_text = self.template_html.render(**template_vars) - html_part = MIMEText(html_text, "html", "utf8") - plain_text = self.template_text.render(**template_vars) - text_part = MIMEText(plain_text, "plain", "utf8") - - multipart_msg = MIMEMultipart("alternative") - multipart_msg["Subject"] = subject - multipart_msg["From"] = from_string - multipart_msg["To"] = email_address - multipart_msg["Date"] = email.utils.formatdate() - multipart_msg["Message-ID"] = email.utils.make_msgid() - multipart_msg.attach(text_part) - multipart_msg.attach(html_part) - - logger.info("Sending email to %s" % email_address) - - await make_deferred_yieldable( - self.sendmail( - self.hs.config.email_smtp_host, - raw_from, - raw_to, - multipart_msg.as_string().encode("utf8"), - reactor=self.hs.get_reactor(), - port=self.hs.config.email_smtp_port, - requireAuthentication=self.hs.config.email_smtp_user is not None, - username=self.hs.config.email_smtp_user, - password=self.hs.config.email_smtp_pass, - requireTransportSecurity=self.hs.config.require_transport_security, - ) + + await self.send_email_handler.send_email( + email_address=email_address, + subject=subject, + app_name=self.app_name, + html=html_text, + text=plain_text, ) async def _get_room_vars( diff --git a/synapse/server.py b/synapse/server.py index 2337d2d9b450..fec0024c89a5 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -104,6 +104,7 @@ from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler from synapse.handlers.room_member_worker import RoomMemberWorkerHandler from synapse.handlers.search import SearchHandler +from synapse.handlers.send_email import SendEmailHandler from synapse.handlers.set_password import SetPasswordHandler from synapse.handlers.space_summary import SpaceSummaryHandler from synapse.handlers.sso import SsoHandler @@ -549,6 +550,10 @@ def get_deactivate_account_handler(self) -> DeactivateAccountHandler: def get_search_handler(self) -> SearchHandler: return SearchHandler(self) + @cache_in_self + def get_send_email_handler(self) -> SendEmailHandler: + return SendEmailHandler(self) + @cache_in_self def get_set_password_handler(self) -> SetPasswordHandler: return SetPasswordHandler(self) From afb6dcf806d5a290d8cbd2c911c6a712ae3cf391 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 17 May 2021 11:34:39 +0100 Subject: [PATCH 149/222] 1.34.0 --- CHANGES.md | 11 +++++++++-- debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 2ceae0ac8c2b..1e3fd130fd11 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,10 +1,17 @@ -Synapse 1.34.0rc1 (2021-05-12) -============================== +Synapse 1.34.0 (2021-05-17) +=========================== This release deprecates the `room_invite_state_types` configuration setting. See the [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.34.0/UPGRADE.rst#upgrading-to-v1340) for instructions on updating your configuration file to use the new `room_prejoin_state` setting. This release also deprecates the `POST /_synapse/admin/v1/rooms//delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/` route instead. + +No significant changes. + + +Synapse 1.34.0rc1 (2021-05-12) +============================== + Features -------- diff --git a/debian/changelog b/debian/changelog index 76b82c172e97..bf99ae772c9e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.34.0) stable; urgency=medium + + * New synapse release 1.34.0. + + -- Synapse Packaging team Mon, 17 May 2021 11:34:18 +0100 + matrix-synapse-py3 (1.33.2) stable; urgency=medium * New synapse release 1.33.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 15d54a1ceb81..7498a6016f0b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.34.0rc1" +__version__ = "1.34.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 8dde0bf8b3faa75763d6b0f0fb9413f3b8691067 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 17 May 2021 11:50:08 +0100 Subject: [PATCH 150/222] Update UPGRADE.rst --- UPGRADE.rst | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/UPGRADE.rst b/UPGRADE.rst index 606e357b6e39..9f61aad4120d 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -88,7 +88,7 @@ for example: Upgrading to v1.34.0 ==================== -`room_invite_state_types` configuration setting +``room_invite_state_types`` configuration setting ----------------------------------------------- The ``room_invite_state_types`` configuration setting has been deprecated and @@ -106,13 +106,10 @@ remove it from your configuration file. The default value used to be: - "m.room.encryption" - "m.room.name" -If you have customised this value by adding addition state types, you should -remove ``room_invite_state_types`` and configure ``additional_event_types`` with -your customisations. +If you have customised this value, you should remove ``room_invite_state_types`` and +configure ``room_prejoin_state`` instead. + -If you have customised this value by removing state types, you should rename -``room_invite_state_types`` to ``additional_event_types``, and set -``disable_default_event_types`` to ``true``. Upgrading to v1.33.0 ==================== From 13b0673b5a0bceafbcfce1407544c2421fd69210 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 17 May 2021 12:00:28 +0100 Subject: [PATCH 151/222] Changelog --- CHANGES.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1e3fd130fd11..709436da978c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ This release deprecates the `room_invite_state_types` configuration setting. See This release also deprecates the `POST /_synapse/admin/v1/rooms//delete` admin API route. Server administrators are encouraged to update their scripts to use the new `DELETE /_synapse/admin/v1/rooms/` route instead. -No significant changes. +No significant changes since v1.34.0rc1. Synapse 1.34.0rc1 (2021-05-12) @@ -181,7 +181,7 @@ Synapse 1.32.1 (2021-04-21) =========================== This release fixes [a regression](https://github.com/matrix-org/synapse/issues/9853) -in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. +in Synapse 1.32.0 that caused connected Prometheus instances to become unstable. However, as this release is still subject to the `LoggingContext` change in 1.32.0, it is recommended to remain on or downgrade to 1.31.0. @@ -197,11 +197,11 @@ Synapse 1.32.0 (2021-04-20) **Note:** This release introduces [a regression](https://github.com/matrix-org/synapse/issues/9853) that can overwhelm connected Prometheus instances. This issue was not present in -1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and +1.32.0rc1. If affected, it is recommended to downgrade to 1.31.0 in the meantime, and follow [these instructions](https://github.com/matrix-org/synapse/pull/9854#issuecomment-823472183) to clean up any excess writeahead logs. -**Note:** This release also mistakenly included a change that may affected Synapse +**Note:** This release also mistakenly included a change that may affected Synapse modules that import `synapse.logging.context.LoggingContext`, such as [synapse-s3-storage-provider](https://github.com/matrix-org/synapse-s3-storage-provider). This will be fixed in a later Synapse version. @@ -212,8 +212,8 @@ This release removes the deprecated `GET /_synapse/admin/v1/users/` adm This release requires Application Services to use type `m.login.application_service` when registering users via the `/_matrix/client/r0/register` endpoint to comply with the spec. Please ensure your Application Services are up to date. -If you are using the `packages.matrix.org` Debian repository for Synapse packages, -note that we have recently updated the expiry date on the gpg signing key. If you see an +If you are using the `packages.matrix.org` Debian repository for Synapse packages, +note that we have recently updated the expiry date on the gpg signing key. If you see an error similar to `The following signatures were invalid: EXPKEYSIG F473DD4473365DE1`, you will need to get a fresh copy of the keys. You can do so with: From 9752849e2b41968613ca244e86311d805bdd27df Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 17 May 2021 09:01:19 -0400 Subject: [PATCH 152/222] Clarify comments in the space summary handler. (#9974) --- changelog.d/9974.misc | 1 + synapse/handlers/space_summary.py | 51 ++++++++++++++++++++++++++++--- 2 files changed, 47 insertions(+), 5 deletions(-) create mode 100644 changelog.d/9974.misc diff --git a/changelog.d/9974.misc b/changelog.d/9974.misc new file mode 100644 index 000000000000..9ddee2618e21 --- /dev/null +++ b/changelog.d/9974.misc @@ -0,0 +1 @@ +Update comments in the space summary handler. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index e35d91832b42..953356f34d90 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -32,7 +32,6 @@ logger = logging.getLogger(__name__) # number of rooms to return. We'll stop once we hit this limit. -# TODO: allow clients to reduce this with a request param. MAX_ROOMS = 50 # max number of events to return per room. @@ -231,11 +230,15 @@ async def _summarize_local_room( Generate a room entry and a list of event entries for a given room. Args: - requester: The requesting user, or None if this is over federation. + requester: + The user requesting the summary, if it is a local request. None + if this is a federation request. room_id: The room ID to summarize. suggested_only: True if only suggested children should be returned. Otherwise, all children are returned. - max_children: The maximum number of children to return for this node. + max_children: + The maximum number of children rooms to include. This is capped + to a server-set limit. Returns: A tuple of: @@ -278,6 +281,26 @@ async def _summarize_remote_room( max_children: Optional[int], exclude_rooms: Iterable[str], ) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]: + """ + Request room entries and a list of event entries for a given room by querying a remote server. + + Args: + room: The room to summarize. + suggested_only: True if only suggested children should be returned. + Otherwise, all children are returned. + max_children: + The maximum number of children rooms to include. This is capped + to a server-set limit. + exclude_rooms: + Rooms IDs which do not need to be summarized. + + Returns: + A tuple of: + An iterable of rooms. + + An iterable of the sorted children events. This may be limited + to a maximum size or may include all children. + """ room_id = room.room_id logger.info("Requesting summary for %s via %s", room_id, room.via) @@ -310,8 +333,26 @@ async def _summarize_remote_room( ) async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> bool: - # if we have an authenticated requesting user, first check if they are in the - # room + """ + Calculate whether the room should be shown in the spaces summary. + + It should be included if: + + * The requester is joined or invited to the room. + * The history visibility is set to world readable. + + Args: + room_id: The room ID to summarize. + requester: + The user requesting the summary, if it is a local request. None + if this is a federation request. + + Returns: + True if the room should be included in the spaces summary. + """ + + # if we have an authenticated requesting user, first check if they are able to view + # stripped state in the room. if requester: try: await self._auth.check_user_in_room(room_id, requester) From 206a7b5f12fd3d88ec24a1f53ce75e5b701faed8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 17 May 2021 09:59:17 -0400 Subject: [PATCH 153/222] Fix the allowed range of valid ordering characters for spaces. (#10002) \x7F was meant to be \0x7E (~) this was originally incorrect in MSC1772. --- changelog.d/10002.bugfix | 1 + synapse/handlers/space_summary.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10002.bugfix diff --git a/changelog.d/10002.bugfix b/changelog.d/10002.bugfix new file mode 100644 index 000000000000..1fabdad22eee --- /dev/null +++ b/changelog.d/10002.bugfix @@ -0,0 +1 @@ +Fix a validation bug introduced in v1.34.0 in the ordering of spaces in the space summary API. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 953356f34d90..eb80a5ad67db 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -471,8 +471,8 @@ def _is_suggested_child_event(edge_event: EventBase) -> bool: return False -# Order may only contain characters in the range of \x20 (space) to \x7F (~). -_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7F]") +# Order may only contain characters in the range of \x20 (space) to \x7E (~) inclusive. +_INVALID_ORDER_CHARS_RE = re.compile(r"[^\x20-\x7E]") def _child_events_comparison_key(child: EventBase) -> Tuple[bool, Optional[str], str]: From 4d6e5a5e995590efe44855d10dcd2a89b841dae8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 18 May 2021 14:13:45 +0100 Subject: [PATCH 154/222] Use a database table to hold the users that should have full presence sent to them, instead of something in-memory (#9823) --- changelog.d/9823.misc | 1 + docs/presence_router_module.md | 6 +- synapse/handlers/presence.py | 136 ++++++-- synapse/module_api/__init__.py | 63 ++-- synapse/replication/http/presence.py | 11 +- synapse/rest/admin/server_notice_servlet.py | 8 +- synapse/storage/databases/main/presence.py | 58 +++- .../59/13users_to_send_full_presence_to.sql | 34 ++ tests/events/test_presence_router.py | 15 +- tests/module_api/test_api.py | 303 +++++++++++++----- .../test_sharded_event_persister.py | 2 +- 11 files changed, 479 insertions(+), 158 deletions(-) create mode 100644 changelog.d/9823.misc create mode 100644 synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql diff --git a/changelog.d/9823.misc b/changelog.d/9823.misc new file mode 100644 index 000000000000..bf924ab68cc4 --- /dev/null +++ b/changelog.d/9823.misc @@ -0,0 +1 @@ +Allow sending full presence to users via workers other than the one that called `ModuleApi.send_local_online_presence_to`. \ No newline at end of file diff --git a/docs/presence_router_module.md b/docs/presence_router_module.md index d6566d978d06..d2844915dffe 100644 --- a/docs/presence_router_module.md +++ b/docs/presence_router_module.md @@ -28,7 +28,11 @@ async def ModuleApi.send_local_online_presence_to(users: Iterable[str]) -> None which can be given a list of local or remote MXIDs to broadcast known, online user presence to (for those users that the receiving user is considered interested in). It does not include state for users who are currently offline, and it can only be -called on workers that support sending federation. +called on workers that support sending federation. Additionally, this method must +only be called from the process that has been configured to write to the +the [presence stream](https://github.com/matrix-org/synapse/blob/master/docs/workers.md#stream-writers). +By default, this is the main process, but another worker can be configured to do +so. ### Module structure diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6fd1f34289f8..f5a049d75461 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -222,9 +222,21 @@ async def current_state_for_users( @abc.abstractmethod async def set_state( - self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False + self, + target_user: UserID, + state: JsonDict, + ignore_status_msg: bool = False, + force_notify: bool = False, ) -> None: - """Set the presence state of the user. """ + """Set the presence state of the user. + + Args: + target_user: The ID of the user to set the presence state of. + state: The presence state as a JSON dictionary. + ignore_status_msg: True to ignore the "status_msg" field of the `state` dict. + If False, the user's current status will be updated. + force_notify: Whether to force notification of the update to clients. + """ @abc.abstractmethod async def bump_presence_active_time(self, user: UserID): @@ -296,6 +308,51 @@ async def maybe_send_presence_to_interested_destinations( for destinations, states in hosts_and_states: self._federation.send_presence_to_destinations(states, destinations) + async def send_full_presence_to_users(self, user_ids: Collection[str]): + """ + Adds to the list of users who should receive a full snapshot of presence + upon their next sync. Note that this only works for local users. + + Then, grabs the current presence state for a given set of users and adds it + to the top of the presence stream. + + Args: + user_ids: The IDs of the local users to send full presence to. + """ + # Retrieve one of the users from the given set + if not user_ids: + raise Exception( + "send_full_presence_to_users must be called with at least one user" + ) + user_id = next(iter(user_ids)) + + # Mark all users as receiving full presence on their next sync + await self.store.add_users_to_send_full_presence_to(user_ids) + + # Add a new entry to the presence stream. Since we use stream tokens to determine whether a + # local user should receive a full snapshot of presence when they sync, we need to bump the + # presence stream so that subsequent syncs with no presence activity in between won't result + # in the client receiving multiple full snapshots of presence. + # + # If we bump the stream ID, then the user will get a higher stream token next sync, and thus + # correctly won't receive a second snapshot. + + # Get the current presence state for one of the users (defaults to offline if not found) + current_presence_state = await self.get_state(UserID.from_string(user_id)) + + # Convert the UserPresenceState object into a serializable dict + state = { + "presence": current_presence_state.state, + "status_message": current_presence_state.status_msg, + } + + # Copy the presence state to the tip of the presence stream. + + # We set force_notify=True here so that this presence update is guaranteed to + # increment the presence stream ID (which resending the current user's presence + # otherwise would not do). + await self.set_state(UserID.from_string(user_id), state, force_notify=True) + class _NullContextManager(ContextManager[None]): """A context manager which does nothing.""" @@ -480,8 +537,17 @@ async def set_state( target_user: UserID, state: JsonDict, ignore_status_msg: bool = False, + force_notify: bool = False, ) -> None: - """Set the presence state of the user.""" + """Set the presence state of the user. + + Args: + target_user: The ID of the user to set the presence state of. + state: The presence state as a JSON dictionary. + ignore_status_msg: True to ignore the "status_msg" field of the `state` dict. + If False, the user's current status will be updated. + force_notify: Whether to force notification of the update to clients. + """ presence = state["presence"] valid_presence = ( @@ -508,6 +574,7 @@ async def set_state( user_id=user_id, state=state, ignore_status_msg=ignore_status_msg, + force_notify=force_notify, ) async def bump_presence_active_time(self, user: UserID) -> None: @@ -677,13 +744,19 @@ async def _persist_unpersisted_changes(self) -> None: [self.user_to_current_state[user_id] for user_id in unpersisted] ) - async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: + async def _update_states( + self, new_states: Iterable[UserPresenceState], force_notify: bool = False + ) -> None: """Updates presence of users. Sets the appropriate timeouts. Pokes the notifier and federation if and only if the changed presence state should be sent to clients/servers. Args: new_states: The new user presence state updates to process. + force_notify: Whether to force notifying clients of this presence state update, + even if it doesn't change the state of a user's presence (e.g online -> online). + This is currently used to bump the max presence stream ID without changing any + user's presence (see PresenceHandler.add_users_to_send_full_presence_to). """ now = self.clock.time_msec() @@ -720,6 +793,9 @@ async def _update_states(self, new_states: Iterable[UserPresenceState]) -> None: now=now, ) + if force_notify: + should_notify = True + self.user_to_current_state[user_id] = new_state if should_notify: @@ -1058,9 +1134,21 @@ async def incoming_presence(self, origin: str, content: JsonDict) -> None: await self._update_states(updates) async def set_state( - self, target_user: UserID, state: JsonDict, ignore_status_msg: bool = False + self, + target_user: UserID, + state: JsonDict, + ignore_status_msg: bool = False, + force_notify: bool = False, ) -> None: - """Set the presence state of the user.""" + """Set the presence state of the user. + + Args: + target_user: The ID of the user to set the presence state of. + state: The presence state as a JSON dictionary. + ignore_status_msg: True to ignore the "status_msg" field of the `state` dict. + If False, the user's current status will be updated. + force_notify: Whether to force notification of the update to clients. + """ status_msg = state.get("status_msg", None) presence = state["presence"] @@ -1091,7 +1179,9 @@ async def set_state( ): new_fields["last_active_ts"] = self.clock.time_msec() - await self._update_states([prev_state.copy_and_replace(**new_fields)]) + await self._update_states( + [prev_state.copy_and_replace(**new_fields)], force_notify=force_notify + ) async def is_visible(self, observed_user: UserID, observer_user: UserID) -> bool: """Returns whether a user can see another user's presence.""" @@ -1389,11 +1479,10 @@ def __init__(self, hs: "HomeServer"): # # Presence -> Notifier -> PresenceEventSource -> Presence # - # Same with get_module_api, get_presence_router + # Same with get_presence_router: # # AuthHandler -> Notifier -> PresenceEventSource -> ModuleApi -> AuthHandler self.get_presence_handler = hs.get_presence_handler - self.get_module_api = hs.get_module_api self.get_presence_router = hs.get_presence_router self.clock = hs.get_clock() self.store = hs.get_datastore() @@ -1424,16 +1513,21 @@ async def get_new_events( stream_change_cache = self.store.presence_stream_cache with Measure(self.clock, "presence.get_new_events"): - if user_id in self.get_module_api()._send_full_presence_to_local_users: - # This user has been specified by a module to receive all current, online - # user presence. Removing from_key and setting include_offline to false - # will do effectively this. - from_key = None - include_offline = False - if from_key is not None: from_key = int(from_key) + # Check if this user should receive all current, online user presence. We only + # bother to do this if from_key is set, as otherwise the user will receive all + # user presence anyways. + if await self.store.should_user_receive_full_presence_with_token( + user_id, from_key + ): + # This user has been specified by a module to receive all current, online + # user presence. Removing from_key and setting include_offline to false + # will do effectively this. + from_key = None + include_offline = False + max_token = self.store.get_current_presence_token() if from_key == max_token: # This is necessary as due to the way stream ID generators work @@ -1467,12 +1561,6 @@ async def get_new_events( user_id, include_offline, from_key ) - # Remove the user from the list of users to receive all presence - if user_id in self.get_module_api()._send_full_presence_to_local_users: - self.get_module_api()._send_full_presence_to_local_users.remove( - user_id - ) - return presence_updates, max_token # Make mypy happy. users_interested_in should now be a set @@ -1522,10 +1610,6 @@ async def get_new_events( ) presence_updates = list(users_to_state.values()) - # Remove the user from the list of users to receive all presence - if user_id in self.get_module_api()._send_full_presence_to_local_users: - self.get_module_api()._send_full_presence_to_local_users.remove(user_id) - if not include_offline: # Filter out offline presence states presence_updates = self._filter_offline_presence_state(presence_updates) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index a1a2b9aeccd3..cecdc96bf517 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -56,14 +56,6 @@ def __init__(self, hs, auth_handler): self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient self._public_room_list_manager = PublicRoomListManager(hs) - # The next time these users sync, they will receive the current presence - # state of all local users. Users are added by send_local_online_presence_to, - # and removed after a successful sync. - # - # We make this a private variable to deter modules from accessing it directly, - # though other classes in Synapse will still do so. - self._send_full_presence_to_local_users = set() - @property def http_client(self): """Allows making outbound HTTP requests to remote resources. @@ -405,39 +397,44 @@ async def send_local_online_presence_to(self, users: Iterable[str]) -> None: Updates to remote users will be sent immediately, whereas local users will receive them on their next sync attempt. - Note that this method can only be run on the main or federation_sender worker - processes. + Note that this method can only be run on the process that is configured to write to the + presence stream. By default this is the main process. """ - if not self._hs.should_send_federation(): + if self._hs._instance_name not in self._hs.config.worker.writers.presence: raise Exception( "send_local_online_presence_to can only be run " - "on processes that send federation", + "on the process that is configured to write to the " + "presence stream (by default this is the main process)", ) + local_users = set() + remote_users = set() for user in users: if self._hs.is_mine_id(user): - # Modify SyncHandler._generate_sync_entry_for_presence to call - # presence_source.get_new_events with an empty `from_key` if - # that user's ID were in a list modified by ModuleApi somewhere. - # That user would then get all presence state on next incremental sync. - - # Force a presence initial_sync for this user next time - self._send_full_presence_to_local_users.add(user) + local_users.add(user) else: - # Retrieve presence state for currently online users that this user - # is considered interested in - presence_events, _ = await self._presence_stream.get_new_events( - UserID.from_string(user), from_key=None, include_offline=False - ) - - # Send to remote destinations. - - # We pull out the presence handler here to break a cyclic - # dependency between the presence router and module API. - presence_handler = self._hs.get_presence_handler() - await presence_handler.maybe_send_presence_to_interested_destinations( - presence_events - ) + remote_users.add(user) + + # We pull out the presence handler here to break a cyclic + # dependency between the presence router and module API. + presence_handler = self._hs.get_presence_handler() + + if local_users: + # Force a presence initial_sync for these users next time they sync. + await presence_handler.send_full_presence_to_users(local_users) + + for user in remote_users: + # Retrieve presence state for currently online users that this user + # is considered interested in. + presence_events, _ = await self._presence_stream.get_new_events( + UserID.from_string(user), from_key=None, include_offline=False + ) + + # Send to remote destinations. + destination = UserID.from_string(user).domain + presence_handler.get_federation_queue().send_presence_to_destinations( + presence_events, destination + ) class PublicRoomListManager: diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index f25307620d55..bb0024795349 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -73,6 +73,7 @@ class ReplicationPresenceSetState(ReplicationEndpoint): { "state": { ... }, "ignore_status_msg": false, + "force_notify": false } 200 OK @@ -91,17 +92,23 @@ def __init__(self, hs: "HomeServer"): self._presence_handler = hs.get_presence_handler() @staticmethod - async def _serialize_payload(user_id, state, ignore_status_msg=False): + async def _serialize_payload( + user_id, state, ignore_status_msg=False, force_notify=False + ): return { "state": state, "ignore_status_msg": ignore_status_msg, + "force_notify": force_notify, } async def _handle_request(self, request, user_id): content = parse_json_object_from_request(request) await self._presence_handler.set_state( - UserID.from_string(user_id), content["state"], content["ignore_status_msg"] + UserID.from_string(user_id), + content["state"], + content["ignore_status_msg"], + content["force_notify"], ) return ( diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index cc3ab5854b0b..b5e4c474efc8 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -54,7 +54,6 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.auth = hs.get_auth() self.txns = HttpTransactionCache(hs) - self.snm = hs.get_server_notices_manager() def register(self, json_resource: HttpServer): PATTERN = "/send_server_notice" @@ -77,7 +76,10 @@ async def on_POST( event_type = body.get("type", EventTypes.Message) state_key = body.get("state_key") - if not self.snm.is_enabled(): + # We grab the server notices manager here as its initialisation has a check for worker processes, + # but worker processes still need to initialise SendServerNoticeServlet (as it is part of the + # admin api). + if not self.hs.get_server_notices_manager().is_enabled(): raise SynapseError(400, "Server notices are not enabled on this server") user_id = body["user_id"] @@ -85,7 +87,7 @@ async def on_POST( if not self.hs.is_mine_id(user_id): raise SynapseError(400, "Server notices can only be sent to local users") - event = await self.snm.send_notice( + event = await self.hs.get_server_notices_manager().send_notice( user_id=body["user_id"], type=event_type, state_key=state_key, diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index db22fab23ea0..669a2af8845a 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Dict, List, Tuple +from typing import TYPE_CHECKING, Dict, Iterable, List, Tuple from synapse.api.presence import PresenceState, UserPresenceState from synapse.replication.tcp.streams import PresenceStream @@ -57,6 +57,7 @@ def __init__( db_conn, "presence_stream", "stream_id" ) + self.hs = hs self._presence_on_startup = self._get_active_presence(db_conn) presence_cache_prefill, min_presence_val = self.db_pool.get_cache_dict( @@ -210,6 +211,61 @@ async def get_presence_for_users(self, user_ids): return {row["user_id"]: UserPresenceState(**row) for row in rows} + async def should_user_receive_full_presence_with_token( + self, + user_id: str, + from_token: int, + ) -> bool: + """Check whether the given user should receive full presence using the stream token + they're updating from. + + Args: + user_id: The ID of the user to check. + from_token: The stream token included in their /sync token. + + Returns: + True if the user should have full presence sent to them, False otherwise. + """ + + def _should_user_receive_full_presence_with_token_txn(txn): + sql = """ + SELECT 1 FROM users_to_send_full_presence_to + WHERE user_id = ? + AND presence_stream_id >= ? + """ + txn.execute(sql, (user_id, from_token)) + return bool(txn.fetchone()) + + return await self.db_pool.runInteraction( + "should_user_receive_full_presence_with_token", + _should_user_receive_full_presence_with_token_txn, + ) + + async def add_users_to_send_full_presence_to(self, user_ids: Iterable[str]): + """Adds to the list of users who should receive a full snapshot of presence + upon their next sync. + + Args: + user_ids: An iterable of user IDs. + """ + # Add user entries to the table, updating the presence_stream_id column if the user already + # exists in the table. + await self.db_pool.simple_upsert_many( + table="users_to_send_full_presence_to", + key_names=("user_id",), + key_values=[(user_id,) for user_id in user_ids], + value_names=("presence_stream_id",), + # We save the current presence stream ID token along with the user ID entry so + # that when a user /sync's, even if they syncing multiple times across separate + # devices at different times, each device will receive full presence once - when + # the presence stream ID in their sync token is less than the one in the table + # for their user ID. + value_values=( + (self._presence_id_gen.get_current_token(),) for _ in user_ids + ), + desc="add_users_to_send_full_presence_to", + ) + async def get_presence_for_all_users( self, include_offline: bool = True, diff --git a/synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql b/synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql new file mode 100644 index 000000000000..07b0f53ecfa1 --- /dev/null +++ b/synapse/storage/schema/main/delta/59/13users_to_send_full_presence_to.sql @@ -0,0 +1,34 @@ +/* Copyright 2021 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a table that keeps track of a list of users who should, upon their next +-- sync request, receive presence for all currently online users that they are +-- "interested" in. + +-- The motivation for a DB table over an in-memory list is so that this list +-- can be added to and retrieved from by any worker. Specifically, we don't +-- want to duplicate work across multiple sync workers. + +CREATE TABLE IF NOT EXISTS users_to_send_full_presence_to( + -- The user ID to send full presence to. + user_id TEXT PRIMARY KEY, + -- A presence stream ID token - the current presence stream token when the row was last upserted. + -- If a user calls /sync and this token is part of the update they're to receive, we also include + -- full user presence in the response. + -- This allows multiple devices for a user to receive full presence whenever they next call /sync. + presence_stream_id BIGINT, + FOREIGN KEY (user_id) + REFERENCES users (name) +); \ No newline at end of file diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index 01d257307c6b..875b0d0a114b 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -302,11 +302,18 @@ def test_send_local_online_presence_to_with_module(self): ) # Check that the expected presence updates were sent - expected_users = [ + # We explicitly compare using sets as we expect that calling + # module_api.send_local_online_presence_to will create a presence + # update that is a duplicate of the specified user's current presence. + # These are sent to clients and will be picked up below, thus we use a + # set to deduplicate. We're just interested that non-offline updates were + # sent out for each user ID. + expected_users = { self.other_user_id, self.presence_receiving_user_one_id, self.presence_receiving_user_two_id, - ] + } + found_users = set() calls = ( self.hs.get_federation_transport_client().send_transaction.call_args_list @@ -326,12 +333,12 @@ def test_send_local_online_presence_to_with_module(self): # EDUs can contain multiple presence updates for presence_update in edu["content"]["push"]: # Check for presence updates that contain the user IDs we're after - expected_users.remove(presence_update["user_id"]) + found_users.add(presence_update["user_id"]) # Ensure that no offline states are being sent out self.assertNotEqual(presence_update["presence"], "offline") - self.assertEqual(len(expected_users), 0) + self.assertEqual(found_users, expected_users) def send_presence_update( diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 742ad14b8c3a..2c68b9a13c85 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -13,6 +13,8 @@ # limitations under the License. from unittest.mock import Mock +from twisted.internet import defer + from synapse.api.constants import EduTypes from synapse.events import EventBase from synapse.federation.units import Transaction @@ -22,11 +24,13 @@ from synapse.types import create_requester from tests.events.test_presence_router import send_presence_update, sync_presence +from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.test_utils.event_injection import inject_member_event -from tests.unittest import FederatingHomeserverTestCase, override_config +from tests.unittest import HomeserverTestCase, override_config +from tests.utils import USE_POSTGRES_FOR_TESTS -class ModuleApiTestCase(FederatingHomeserverTestCase): +class ModuleApiTestCase(HomeserverTestCase): servlets = [ admin.register_servlets, login.register_servlets, @@ -217,97 +221,16 @@ def test_public_rooms(self): ) self.assertFalse(is_in_public_rooms) - # The ability to send federation is required by send_local_online_presence_to. - @override_config({"send_federation": True}) def test_send_local_online_presence_to(self): - """Tests that send_local_presence_to_users sends local online presence to local users.""" - # Create a user who will send presence updates - self.presence_receiver_id = self.register_user("presence_receiver", "monkey") - self.presence_receiver_tok = self.login("presence_receiver", "monkey") - - # And another user that will send presence updates out - self.presence_sender_id = self.register_user("presence_sender", "monkey") - self.presence_sender_tok = self.login("presence_sender", "monkey") - - # Put them in a room together so they will receive each other's presence updates - room_id = self.helper.create_room_as( - self.presence_receiver_id, - tok=self.presence_receiver_tok, - ) - self.helper.join(room_id, self.presence_sender_id, tok=self.presence_sender_tok) - - # Presence sender comes online - send_presence_update( - self, - self.presence_sender_id, - self.presence_sender_tok, - "online", - "I'm online!", - ) - - # Presence receiver should have received it - presence_updates, sync_token = sync_presence(self, self.presence_receiver_id) - self.assertEqual(len(presence_updates), 1) - - presence_update = presence_updates[0] # type: UserPresenceState - self.assertEqual(presence_update.user_id, self.presence_sender_id) - self.assertEqual(presence_update.state, "online") - - # Syncing again should result in no presence updates - presence_updates, sync_token = sync_presence( - self, self.presence_receiver_id, sync_token - ) - self.assertEqual(len(presence_updates), 0) - - # Trigger sending local online presence - self.get_success( - self.module_api.send_local_online_presence_to( - [ - self.presence_receiver_id, - ] - ) - ) - - # Presence receiver should have received online presence again - presence_updates, sync_token = sync_presence( - self, self.presence_receiver_id, sync_token - ) - self.assertEqual(len(presence_updates), 1) - - presence_update = presence_updates[0] # type: UserPresenceState - self.assertEqual(presence_update.user_id, self.presence_sender_id) - self.assertEqual(presence_update.state, "online") - - # Presence sender goes offline - send_presence_update( - self, - self.presence_sender_id, - self.presence_sender_tok, - "offline", - "I slink back into the darkness.", - ) - - # Trigger sending local online presence - self.get_success( - self.module_api.send_local_online_presence_to( - [ - self.presence_receiver_id, - ] - ) - ) - - # Presence receiver should *not* have received offline state - presence_updates, sync_token = sync_presence( - self, self.presence_receiver_id, sync_token - ) - self.assertEqual(len(presence_updates), 0) + # Test sending local online presence to users from the main process + _test_sending_local_online_presence_to_local_user(self, test_with_workers=False) @override_config({"send_federation": True}) def test_send_local_online_presence_to_federation(self): """Tests that send_local_presence_to_users sends local online presence to remote users.""" # Create a user who will send presence updates - self.presence_sender_id = self.register_user("presence_sender", "monkey") - self.presence_sender_tok = self.login("presence_sender", "monkey") + self.presence_sender_id = self.register_user("presence_sender1", "monkey") + self.presence_sender_tok = self.login("presence_sender1", "monkey") # And a room they're a part of room_id = self.helper.create_room_as( @@ -374,3 +297,209 @@ def test_send_local_online_presence_to_federation(self): found_update = True self.assertTrue(found_update) + + +class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): + """For testing ModuleApi functionality in a multi-worker setup""" + + # Testing stream ID replication from the main to worker processes requires postgres + # (due to needing `MultiWriterIdGenerator`). + if not USE_POSTGRES_FOR_TESTS: + skip = "Requires Postgres" + + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + presence.register_servlets, + ] + + def default_config(self): + conf = super().default_config() + conf["redis"] = {"enabled": "true"} + conf["stream_writers"] = {"presence": ["presence_writer"]} + conf["instance_map"] = { + "presence_writer": {"host": "testserv", "port": 1001}, + } + return conf + + def prepare(self, reactor, clock, homeserver): + self.module_api = homeserver.get_module_api() + self.sync_handler = homeserver.get_sync_handler() + + def test_send_local_online_presence_to_workers(self): + # Test sending local online presence to users from a worker process + _test_sending_local_online_presence_to_local_user(self, test_with_workers=True) + + +def _test_sending_local_online_presence_to_local_user( + test_case: HomeserverTestCase, test_with_workers: bool = False +): + """Tests that send_local_presence_to_users sends local online presence to local users. + + This simultaneously tests two different usecases: + * Testing that this method works when either called from a worker or the main process. + - We test this by calling this method from both a TestCase that runs in monolith mode, and one that + runs with a main and generic_worker. + * Testing that multiple devices syncing simultaneously will all receive a snapshot of local, + online presence - but only once per device. + + Args: + test_with_workers: If True, this method will call ModuleApi.send_local_online_presence_to on a + worker process. The test users will still sync with the main process. The purpose of testing + with a worker is to check whether a Synapse module running on a worker can inform other workers/ + the main process that they should include additional presence when a user next syncs. + """ + if test_with_workers: + # Create a worker process to make module_api calls against + worker_hs = test_case.make_worker_hs( + "synapse.app.generic_worker", {"worker_name": "presence_writer"} + ) + + # Create a user who will send presence updates + test_case.presence_receiver_id = test_case.register_user( + "presence_receiver1", "monkey" + ) + test_case.presence_receiver_tok = test_case.login("presence_receiver1", "monkey") + + # And another user that will send presence updates out + test_case.presence_sender_id = test_case.register_user("presence_sender2", "monkey") + test_case.presence_sender_tok = test_case.login("presence_sender2", "monkey") + + # Put them in a room together so they will receive each other's presence updates + room_id = test_case.helper.create_room_as( + test_case.presence_receiver_id, + tok=test_case.presence_receiver_tok, + ) + test_case.helper.join( + room_id, test_case.presence_sender_id, tok=test_case.presence_sender_tok + ) + + # Presence sender comes online + send_presence_update( + test_case, + test_case.presence_sender_id, + test_case.presence_sender_tok, + "online", + "I'm online!", + ) + + # Presence receiver should have received it + presence_updates, sync_token = sync_presence( + test_case, test_case.presence_receiver_id + ) + test_case.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) + test_case.assertEqual(presence_update.state, "online") + + if test_with_workers: + # Replicate the current sync presence token from the main process to the worker process. + # We need to do this so that the worker process knows the current presence stream ID to + # insert into the database when we call ModuleApi.send_local_online_presence_to. + test_case.replicate() + + # Syncing again should result in no presence updates + presence_updates, sync_token = sync_presence( + test_case, test_case.presence_receiver_id, sync_token + ) + test_case.assertEqual(len(presence_updates), 0) + + # We do an (initial) sync with a second "device" now, getting a new sync token. + # We'll use this in a moment. + _, sync_token_second_device = sync_presence( + test_case, test_case.presence_receiver_id + ) + + # Determine on which process (main or worker) to call ModuleApi.send_local_online_presence_to on + if test_with_workers: + module_api_to_use = worker_hs.get_module_api() + else: + module_api_to_use = test_case.module_api + + # Trigger sending local online presence. We expect this information + # to be saved to the database where all processes can access it. + # Note that we're syncing via the master. + d = module_api_to_use.send_local_online_presence_to( + [ + test_case.presence_receiver_id, + ] + ) + d = defer.ensureDeferred(d) + + if test_with_workers: + # In order for the required presence_set_state replication request to occur between the + # worker and main process, we need to pump the reactor. Otherwise, the coordinator that + # reads the request on the main process won't do so, and the request will time out. + while not d.called: + test_case.reactor.advance(0.1) + + test_case.get_success(d) + + # The presence receiver should have received online presence again. + presence_updates, sync_token = sync_presence( + test_case, test_case.presence_receiver_id, sync_token + ) + test_case.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) + test_case.assertEqual(presence_update.state, "online") + + # We attempt to sync with the second sync token we received above - just to check that + # multiple syncing devices will each receive the necessary online presence. + presence_updates, sync_token_second_device = sync_presence( + test_case, test_case.presence_receiver_id, sync_token_second_device + ) + test_case.assertEqual(len(presence_updates), 1) + + presence_update = presence_updates[0] # type: UserPresenceState + test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) + test_case.assertEqual(presence_update.state, "online") + + # However, if we now sync with either "device", we won't receive another burst of online presence + # until the API is called again sometime in the future + presence_updates, sync_token = sync_presence( + test_case, test_case.presence_receiver_id, sync_token + ) + + # Now we check that we don't receive *offline* updates using ModuleApi.send_local_online_presence_to. + + # Presence sender goes offline + send_presence_update( + test_case, + test_case.presence_sender_id, + test_case.presence_sender_tok, + "offline", + "I slink back into the darkness.", + ) + + # Presence receiver should have received the updated, offline state + presence_updates, sync_token = sync_presence( + test_case, test_case.presence_receiver_id, sync_token + ) + test_case.assertEqual(len(presence_updates), 1) + + # Now trigger sending local online presence. + d = module_api_to_use.send_local_online_presence_to( + [ + test_case.presence_receiver_id, + ] + ) + d = defer.ensureDeferred(d) + + if test_with_workers: + # In order for the required presence_set_state replication request to occur between the + # worker and main process, we need to pump the reactor. Otherwise, the coordinator that + # reads the request on the main process won't do so, and the request will time out. + while not d.called: + test_case.reactor.advance(0.1) + + test_case.get_success(d) + + # Presence receiver should *not* have received offline state + presence_updates, sync_token = sync_presence( + test_case, test_case.presence_receiver_id, sync_token + ) + test_case.assertEqual(len(presence_updates), 0) diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index d739eb6b17f0..5eca5c165d0a 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -30,7 +30,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): """Checks event persisting sharding works""" # Event persister sharding requires postgres (due to needing - # `MutliWriterIdGenerator`). + # `MultiWriterIdGenerator`). if not USE_POSTGRES_FOR_TESTS: skip = "Requires Postgres" From ac6bfcd52f03e9574324978f83a281cf35f4ea89 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 18 May 2021 12:17:04 -0400 Subject: [PATCH 155/222] Refactor checking restricted join rules (#10007) To be more consistent with similar code. The check now automatically raises an AuthError instead of passing back a boolean. It also absorbs some shared logic between callers. --- changelog.d/10007.feature | 1 + synapse/handlers/event_auth.py | 51 ++++++++++++++++++++++----------- synapse/handlers/federation.py | 29 ++++++------------- synapse/handlers/room_member.py | 20 ++++--------- 4 files changed, 50 insertions(+), 51 deletions(-) create mode 100644 changelog.d/10007.feature diff --git a/changelog.d/10007.feature b/changelog.d/10007.feature new file mode 100644 index 000000000000..2c655350c04f --- /dev/null +++ b/changelog.d/10007.feature @@ -0,0 +1 @@ +Experimental support to allow a user who could join a restricted room to view it in the spaces summary. diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index eff639f40760..5b2fe103e742 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -11,10 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional -from synapse.api.constants import EventTypes, JoinRules +from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.errors import AuthError from synapse.api.room_versions import RoomVersion +from synapse.events import EventBase from synapse.types import StateMap if TYPE_CHECKING: @@ -29,44 +31,58 @@ class EventAuthHandler: def __init__(self, hs: "HomeServer"): self._store = hs.get_datastore() - async def can_join_without_invite( - self, state_ids: StateMap[str], room_version: RoomVersion, user_id: str - ) -> bool: + async def check_restricted_join_rules( + self, + state_ids: StateMap[str], + room_version: RoomVersion, + user_id: str, + prev_member_event: Optional[EventBase], + ) -> None: """ - Check whether a user can join a room without an invite. + Check whether a user can join a room without an invite due to restricted join rules. When joining a room with restricted joined rules (as defined in MSC3083), - the membership of spaces must be checked during join. + the membership of spaces must be checked during a room join. Args: state_ids: The state of the room as it currently is. room_version: The room version of the room being joined. user_id: The user joining the room. + prev_member_event: The current membership event for this user. - Returns: - True if the user can join the room, false otherwise. + Raises: + AuthError if the user cannot join the room. """ + # If the member is invited or currently joined, then nothing to do. + if prev_member_event and ( + prev_member_event.membership in (Membership.JOIN, Membership.INVITE) + ): + return + # This only applies to room versions which support the new join rule. if not room_version.msc3083_join_rules: - return True + return # If there's no join rule, then it defaults to invite (so this doesn't apply). join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) if not join_rules_event_id: - return True + return # If the join rule is not restricted, this doesn't apply. join_rules_event = await self._store.get_event(join_rules_event_id) if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: - return True + return # If allowed is of the wrong form, then only allow invited users. allowed_spaces = join_rules_event.content.get("allow", []) if not isinstance(allowed_spaces, list): - return False + allowed_spaces = () # Get the list of joined rooms and see if there's an overlap. - joined_rooms = await self._store.get_rooms_for_user(user_id) + if allowed_spaces: + joined_rooms = await self._store.get_rooms_for_user(user_id) + else: + joined_rooms = () # Pull out the other room IDs, invalid data gets filtered. for space in allowed_spaces: @@ -80,7 +96,10 @@ async def can_join_without_invite( # The user was joined to one of the spaces specified, they can join # this room! if space_id in joined_rooms: - return True + return # The user was not in any of the required spaces. - return False + raise AuthError( + 403, + "You do not belong to any of the required spaces to join this room.", + ) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 798ed75b30fa..678f6b770797 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1668,28 +1668,17 @@ async def on_send_join_request(self, origin: str, pdu: EventBase) -> JsonDict: # Check if the user is already in the room or invited to the room. user_id = event.state_key prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) - newly_joined = True - user_is_invited = False + prev_member_event = None if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) - newly_joined = prev_member_event.membership != Membership.JOIN - user_is_invited = prev_member_event.membership == Membership.INVITE - - # If the member is not already in the room, and not invited, check if - # they should be allowed access via membership in a space. - if ( - newly_joined - and not user_is_invited - and not await self._event_auth_handler.can_join_without_invite( - prev_state_ids, - event.room_version, - user_id, - ) - ): - raise AuthError( - 403, - "You do not belong to any of the required spaces to join this room.", - ) + + # Check if the member should be allowed access via membership in a space. + await self._event_auth_handler.check_restricted_join_rules( + prev_state_ids, + event.room_version, + user_id, + prev_member_event, + ) # Persist the event. await self._auth_and_persist_event(origin, event, context) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 9a092da71597..d6fc43e7984c 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -260,25 +260,15 @@ async def _local_membership_update( if event.membership == Membership.JOIN: newly_joined = True - user_is_invited = False + prev_member_event = None if prev_member_event_id: prev_member_event = await self.store.get_event(prev_member_event_id) newly_joined = prev_member_event.membership != Membership.JOIN - user_is_invited = prev_member_event.membership == Membership.INVITE - # If the member is not already in the room and is not accepting an invite, - # check if they should be allowed access via membership in a space. - if ( - newly_joined - and not user_is_invited - and not await self.event_auth_handler.can_join_without_invite( - prev_state_ids, event.room_version, user_id - ) - ): - raise AuthError( - 403, - "You do not belong to any of the required spaces to join this room.", - ) + # Check if the member should be allowed access via membership in a space. + await self.event_auth_handler.check_restricted_join_rules( + prev_state_ids, event.room_version, user_id, prev_member_event + ) # Only rate-limit if the user actually joined the room, otherwise we'll end # up blocking profile updates. From 5bba1b49058a648197f217268a3978d8acf09c51 Mon Sep 17 00:00:00 2001 From: Savyasachee Jha Date: Wed, 19 May 2021 16:14:16 +0530 Subject: [PATCH 156/222] Hardened systemd unit files (#9803) Signed-off-by: Savyasachee Jha savya.jha@hawkradius.com --- changelog.d/9803.doc | 1 + contrib/systemd/override-hardened.conf | 71 ++++++++++++++++++++++++++ docs/systemd-with-workers/README.md | 30 +++++++++++ 3 files changed, 102 insertions(+) create mode 100644 changelog.d/9803.doc create mode 100644 contrib/systemd/override-hardened.conf diff --git a/changelog.d/9803.doc b/changelog.d/9803.doc new file mode 100644 index 000000000000..16c7ba703394 --- /dev/null +++ b/changelog.d/9803.doc @@ -0,0 +1 @@ +Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files. diff --git a/contrib/systemd/override-hardened.conf b/contrib/systemd/override-hardened.conf new file mode 100644 index 000000000000..b2fa3ae7c5db --- /dev/null +++ b/contrib/systemd/override-hardened.conf @@ -0,0 +1,71 @@ +[Service] +# The following directives give the synapse service R/W access to: +# - /run/matrix-synapse +# - /var/lib/matrix-synapse +# - /var/log/matrix-synapse + +RuntimeDirectory=matrix-synapse +StateDirectory=matrix-synapse +LogsDirectory=matrix-synapse + +###################### +## Security Sandbox ## +###################### + +# Make sure that the service has its own unshared tmpfs at /tmp and that it +# cannot see or change any real devices +PrivateTmp=true +PrivateDevices=true + +# We give no capabilities to a service by default +CapabilityBoundingSet= +AmbientCapabilities= + +# Protect the following from modification: +# - The entire filesystem +# - sysctl settings and loaded kernel modules +# - No modifications allowed to Control Groups +# - Hostname +# - System Clock +ProtectSystem=strict +ProtectKernelTunables=true +ProtectKernelModules=true +ProtectControlGroups=true +ProtectClock=true +ProtectHostname=true + +# Prevent access to the following: +# - /home directory +# - Kernel logs +ProtectHome=tmpfs +ProtectKernelLogs=true + +# Make sure that the process can only see PIDs and process details of itself, +# and the second option disables seeing details of things like system load and +# I/O etc +ProtectProc=invisible +ProcSubset=pid + +# While not needed, we set these options explicitly +# - This process has been given access to the host network +# - It can also communicate with any IP Address +PrivateNetwork=false +RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX +IPAddressAllow=any + +# Restrict system calls to a sane bunch +SystemCallArchitectures=native +SystemCallFilter=@system-service +SystemCallFilter=~@privileged @resources @obsolete + +# Misc restrictions +# - Since the process is a python process it needs to be able to write and +# execute memory regions, so we set MemoryDenyWriteExecute to false +RestrictSUIDSGID=true +RemoveIPC=true +NoNewPrivileges=true +RestrictRealtime=true +RestrictNamespaces=true +LockPersonality=true +PrivateUsers=true +MemoryDenyWriteExecute=false diff --git a/docs/systemd-with-workers/README.md b/docs/systemd-with-workers/README.md index cfa36be7b4c5..a1135e9ed578 100644 --- a/docs/systemd-with-workers/README.md +++ b/docs/systemd-with-workers/README.md @@ -65,3 +65,33 @@ systemctl restart matrix-synapse-worker@federation_reader.service systemctl enable matrix-synapse-worker@federation_writer.service systemctl restart matrix-synapse.target ``` + +## Hardening + +**Optional:** If further hardening is desired, the file +`override-hardened.conf` may be copied from +`contrib/systemd/override-hardened.conf` in this repository to the location +`/etc/systemd/system/matrix-synapse.service.d/override-hardened.conf` (the +directory may have to be created). It enables certain sandboxing features in +systemd to further secure the synapse service. You may read the comments to +understand what the override file is doing. The same file will need to be copied +to +`/etc/systemd/system/matrix-synapse-worker@.service.d/override-hardened-worker.conf` +(this directory may also have to be created) in order to apply the same +hardening options to any worker processes. + +Once these files have been copied to their appropriate locations, simply reload +systemd's manager config files and restart all Synapse services to apply the hardening options. They will automatically +be applied at every restart as long as the override files are present at the +specified locations. + +```sh +systemctl daemon-reload + +# Restart services +systemctl restart matrix-synapse.target +``` + +In order to see their effect, you may run `systemd-analyze security +matrix-synapse.service` before and after applying the hardening options to see +the changes being applied at a glance. From 9c76d0561bdbe6741088fe8af1d336166058bb01 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 19 May 2021 11:47:16 +0100 Subject: [PATCH 157/222] Update the contrib grafana dashboard (#10001) --- changelog.d/10001.misc | 1 + contrib/grafana/synapse.json | 5623 ++++++++++++++++++++++++++-------- 2 files changed, 4269 insertions(+), 1355 deletions(-) create mode 100644 changelog.d/10001.misc diff --git a/changelog.d/10001.misc b/changelog.d/10001.misc new file mode 100644 index 000000000000..8740cc478dda --- /dev/null +++ b/changelog.d/10001.misc @@ -0,0 +1 @@ +Update the Grafana dashboard in `contrib/`. diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index 539569b5b12a..0c4816b7cd53 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -14,7 +14,7 @@ "type": "grafana", "id": "grafana", "name": "Grafana", - "version": "6.7.4" + "version": "7.3.7" }, { "type": "panel", @@ -38,7 +38,6 @@ "annotations": { "list": [ { - "$$hashKey": "object:76", "builtIn": 1, "datasource": "$datasource", "enable": false, @@ -55,11 +54,12 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1594646317221, + "iteration": 1621258266004, "links": [ { - "asDropdown": true, + "asDropdown": false, "icon": "external link", + "includeVars": true, "keepTime": true, "tags": [ "matrix" @@ -83,73 +83,255 @@ "title": "Overview", "type": "row" }, + { + "cards": { + "cardPadding": -1, + "cardRound": 0 + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "heatmap": {}, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 189, + "legend": { + "show": false + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le)", + "format": "heatmap", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "title": "Event Send Time (excluding errors, all workers)", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": null, + "format": "s", + "logBase": 2, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 1, + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, "gridPos": { "h": 9, "w": 12, - "x": 0, + "x": 12, "y": 1 }, "hiddenSeries": false, - "id": 75, + "id": 152, "legend": { "avg": false, "current": false, "max": false, "min": false, + "rightSide": false, "show": true, "total": false, "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 0, "links": [], - "nullPointMode": "null", + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "Avg", + "fill": 0, + "linewidth": 3 + }, + { + "alias": "99%", + "color": "#C4162A", + "fillBelowTo": "90%" + }, + { + "alias": "90%", + "color": "#FF7383", + "fillBelowTo": "75%" + }, + { + "alias": "75%", + "color": "#FFEE52", + "fillBelowTo": "50%" + }, + { + "alias": "50%", + "color": "#73BF69", + "fillBelowTo": "25%" + }, + { + "alias": "25%", + "color": "#1F60C4", + "fillBelowTo": "5%" + }, + { + "alias": "5%", + "lines": false + }, + { + "alias": "Average", + "color": "rgb(255, 255, 255)", + "lines": true, + "linewidth": 3 + }, + { + "alias": "Events", + "color": "#B877D9", + "hideTooltip": true, + "points": true, + "yaxis": 2, + "zindex": -3 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} ", + "legendFormat": "99%", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.9, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "90%", "refId": "A" + }, + { + "expr": "histogram_quantile(0.75, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "75%", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "50%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.25, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", + "legendFormat": "25%", + "refId": "F" + }, + { + "expr": "histogram_quantile(0.05, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) by (le))", + "legendFormat": "5%", + "refId": "G" + }, + { + "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size])) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',index=~\"$index\",instance=\"$instance\",code=~\"2..\"}[$bucket_size]))", + "legendFormat": "Average", + "refId": "H" + }, + { + "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size]))", + "hide": false, + "instant": false, + "legendFormat": "Events", + "refId": "E" } ], "thresholds": [ { - "colorMode": "critical", - "fill": true, + "$$hashKey": "object:283", + "colorMode": "warning", + "fill": false, "line": true, "op": "gt", "value": 1, "yaxis": "left" + }, + { + "$$hashKey": "object:284", + "colorMode": "critical", + "fill": false, + "line": true, + "op": "gt", + "value": 2, + "yaxis": "left" } ], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "CPU usage", + "title": "Event Send Time Quantiles (excluding errors, all workers)", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -162,20 +344,22 @@ }, "yaxes": [ { + "$$hashKey": "object:255", "decimals": null, - "format": "percentunit", - "label": null, + "format": "s", + "label": "", "logBase": 1, - "max": "1.5", + "max": null, "min": "0", "show": true }, { - "format": "short", - "label": null, + "$$hashKey": "object:256", + "format": "hertz", + "label": "", "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true } ], @@ -190,37 +374,42 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "editable": true, - "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, - "grid": {}, "gridPos": { "h": 9, "w": 12, - "x": 12, - "y": 1 + "x": 0, + "y": 10 }, "hiddenSeries": false, - "id": 33, + "id": 75, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": false, + "show": true, "total": false, "values": false }, "lines": true, - "linewidth": 2, + "linewidth": 3, "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -230,24 +419,33 @@ "steppedLine": false, "targets": [ { - "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)", + "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 20, - "target": "" + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} ", + "refId": "A" + } + ], + "thresholds": [ + { + "$$hashKey": "object:566", + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 1, + "yaxis": "left" } ], - "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Events Persisted", + "title": "CPU usage", "tooltip": { - "shared": true, + "shared": false, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -259,14 +457,19 @@ }, "yaxes": [ { - "format": "hertz", + "$$hashKey": "object:538", + "decimals": null, + "format": "percentunit", + "label": null, "logBase": 1, - "max": null, - "min": null, + "max": "1.5", + "min": "0", "show": true }, { + "$$hashKey": "object:539", "format": "short", + "label": null, "logBase": 1, "max": null, "min": null, @@ -278,76 +481,24 @@ "alignLevel": null } }, - { - "cards": { - "cardPadding": 0, - "cardRound": null - }, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateSpectral", - "exponent": 0.5, - "mode": "spectrum" - }, - "dataFormat": "tsbuckets", - "datasource": "$datasource", - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 10 - }, - "heatmap": {}, - "hideZeroBuckets": true, - "highlightCards": true, - "id": 85, - "legend": { - "show": false - }, - "links": [], - "reverseYBuckets": false, - "targets": [ - { - "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)", - "format": "heatmap", - "intervalFactor": 1, - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "title": "Event Send Time", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": null, - "format": "s", - "logBase": 2, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 0, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, + "grid": {}, "gridPos": { "h": 9, "w": 12, @@ -355,7 +506,7 @@ "y": 10 }, "hiddenSeries": false, - "id": 107, + "id": 198, "legend": { "avg": false, "current": false, @@ -366,76 +517,52 @@ "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 3, "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "repeat": null, - "repeatDirection": "h", - "seriesOverrides": [ - { - "alias": "mean", - "linewidth": 2 - } - ], + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))", + "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "interval": "", - "intervalFactor": 1, - "legendFormat": "99%", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "95%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "90%", - "refId": "C" - }, - { - "expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "50%", - "refId": "D" + "intervalFactor": 2, + "legendFormat": "{{job}} {{index}}", + "refId": "A", + "step": 20, + "target": "" }, { - "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method)", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "mean", - "refId": "E" + "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", + "hide": true, + "interval": "", + "legendFormat": "total", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Event send time quantiles", + "title": "Memory", "tooltip": { - "shared": true, + "shared": false, "sort": 0, - "value_type": "individual" + "value_type": "cumulative" }, + "transformations": [], "type": "graph", "xaxis": { "buckets": null, @@ -446,16 +573,16 @@ }, "yaxes": [ { - "format": "s", - "label": null, + "$$hashKey": "object:1560", + "format": "bytes", "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { + "$$hashKey": "object:1561", "format": "short", - "label": null, "logBase": 1, "max": null, "min": null, @@ -473,16 +600,23 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, + "h": 7, "w": 12, - "x": 0, + "x": 12, "y": 19 }, "hiddenSeries": false, - "id": 118, + "id": 37, "legend": { "avg": false, "current": false, @@ -497,18 +631,21 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "repeatDirection": "h", "seriesOverrides": [ { - "alias": "mean", - "linewidth": 2 + "$$hashKey": "object:639", + "alias": "/max$/", + "color": "#890F02", + "fill": 0, + "legend": false } ], "spaceLength": 10, @@ -516,49 +653,33 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", + "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", + "hide": false, "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 99%", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 95%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 90%", - "refId": "C" - }, - { - "expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 50%", - "refId": "D" + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}}", + "refId": "A", + "step": 20 }, { - "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)", + "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} mean", - "refId": "E" + "hide": true, + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}} max", + "refId": "B", + "step": 20 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Event send time quantiles by worker", + "title": "Open FDs", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -572,14 +693,18 @@ }, "yaxes": [ { - "format": "s", - "label": null, + "$$hashKey": "object:650", + "decimals": null, + "format": "none", + "label": "", "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:651", + "decimals": null, "format": "short", "label": null, "logBase": 1, @@ -600,7 +725,7 @@ "h": 1, "w": 24, "x": 0, - "y": 28 + "y": 26 }, "id": 54, "panels": [ @@ -612,6 +737,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -619,7 +751,7 @@ "h": 7, "w": 12, "x": 0, - "y": 2 + "y": 25 }, "hiddenSeries": false, "id": 5, @@ -637,22 +769,25 @@ "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 3, "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [ { + "$$hashKey": "object:1240", "alias": "/user/" }, { + "$$hashKey": "object:1241", "alias": "/system/" } ], @@ -682,20 +817,33 @@ ], "thresholds": [ { + "$$hashKey": "object:1278", "colorMode": "custom", "fillColor": "rgba(255, 255, 255, 1)", "line": true, "lineColor": "rgba(216, 200, 27, 0.27)", "op": "gt", - "value": 0.5 + "value": 0.5, + "yaxis": "left" }, { + "$$hashKey": "object:1279", "colorMode": "custom", "fillColor": "rgba(255, 255, 255, 1)", "line": true, - "lineColor": "rgba(234, 112, 112, 0.22)", + "lineColor": "rgb(87, 6, 16)", + "op": "gt", + "value": 0.8, + "yaxis": "left" + }, + { + "$$hashKey": "object:1498", + "colorMode": "critical", + "fill": true, + "line": true, "op": "gt", - "value": 0.8 + "value": 1, + "yaxis": "left" } ], "timeFrom": null, @@ -703,7 +851,7 @@ "timeShift": null, "title": "CPU", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -717,6 +865,7 @@ }, "yaxes": [ { + "$$hashKey": "object:1250", "decimals": null, "format": "percentunit", "label": "", @@ -726,6 +875,7 @@ "show": true }, { + "$$hashKey": "object:1251", "format": "short", "logBase": 1, "max": null, @@ -744,16 +894,25 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 2 + "y": 25 }, "hiddenSeries": false, - "id": 37, + "id": 105, + "interval": "", "legend": { "avg": false, "current": false, @@ -768,51 +927,57 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/max$/", - "color": "#890F02", - "fill": 0, - "legend": false - } - ], + "seriesOverrides": [], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", "format": "time_series", - "hide": false, + "interval": "", "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", + "legendFormat": "{{job}}-{{index}} 99%", "refId": "A", "step": 20 }, { - "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", "format": "time_series", - "hide": true, - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} max", - "refId": "B", - "step": 20 + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 95%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 90%", + "refId": "C" + }, + { + "expr": "rate(python_twisted_reactor_tick_time_sum{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]) / rate(python_twisted_reactor_tick_time_count{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} mean", + "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Open FDs", + "title": "Reactor tick quantiles", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -826,7 +991,7 @@ }, "yaxes": [ { - "format": "none", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -839,7 +1004,7 @@ "logBase": 1, "max": null, "min": null, - "show": true + "show": false } ], "yaxis": { @@ -855,6 +1020,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "grid": {}, @@ -862,7 +1034,7 @@ "h": 7, "w": 12, "x": 0, - "y": 9 + "y": 32 }, "hiddenSeries": false, "id": 34, @@ -880,10 +1052,11 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -895,11 +1068,18 @@ { "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", + "interval": "", "intervalFactor": 2, "legendFormat": "{{job}} {{index}}", "refId": "A", "step": 20, "target": "" + }, + { + "expr": "sum(process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", + "interval": "", + "legendFormat": "total", + "refId": "B" } ], "thresholds": [], @@ -908,10 +1088,11 @@ "timeShift": null, "title": "Memory", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "cumulative" }, + "transformations": [], "type": "graph", "xaxis": { "buckets": null, @@ -947,18 +1128,23 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 9 + "y": 32 }, "hiddenSeries": false, - "id": 105, - "interval": "", + "id": 49, "legend": { "avg": false, "current": false, @@ -973,54 +1159,40 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "/^up/", + "legend": false, + "yaxis": 2 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", + "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", "format": "time_series", "interval": "", "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} 99%", + "legendFormat": "{{job}}-{{index}}", "refId": "A", "step": 20 - }, - { - "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 95%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 90%", - "refId": "C" - }, - { - "expr": "rate(python_twisted_reactor_tick_time_sum{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]) / rate(python_twisted_reactor_tick_time_count{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} mean", - "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Reactor tick quantiles", + "title": "Prometheus scrape time", "tooltip": { "shared": false, "sort": 0, @@ -1040,15 +1212,16 @@ "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { - "format": "short", - "label": null, + "decimals": 0, + "format": "none", + "label": "", "logBase": 1, - "max": null, - "min": null, + "max": "0", + "min": "-1", "show": false } ], @@ -1063,13 +1236,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 16 + "y": 39 }, "hiddenSeries": false, "id": 53, @@ -1087,10 +1267,11 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -1113,7 +1294,7 @@ "timeShift": null, "title": "Up", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -1154,16 +1335,23 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 16 + "y": 39 }, "hiddenSeries": false, - "id": 49, + "id": 120, "legend": { "avg": false, "current": false, @@ -1176,43 +1364,56 @@ "lines": true, "linewidth": 1, "links": [], - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { - "dataLinks": [] + "alertThreshold": true }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/^up/", - "legend": false, - "yaxis": 2 - } - ], + "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "format": "time_series", + "hide": false, + "instant": false, + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", + "refId": "A" + }, + { + "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", + "hide": false, + "instant": false, "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A", - "step": 20 + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{name}}", + "refId": "B" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 1, + "yaxis": "left" } ], - "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Prometheus scrape time", + "title": "Stacked CPU usage", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -1226,21 +1427,22 @@ }, "yaxes": [ { - "format": "s", + "$$hashKey": "object:572", + "format": "percentunit", "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { - "decimals": 0, - "format": "none", - "label": "", + "$$hashKey": "object:573", + "format": "short", + "label": null, "logBase": 1, - "max": "0", - "min": "-1", - "show": false + "max": null, + "min": null, + "show": true } ], "yaxis": { @@ -1254,13 +1456,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 23 + "y": 46 }, "hiddenSeries": false, "id": 136, @@ -1278,9 +1487,10 @@ "linewidth": 1, "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", @@ -1306,7 +1516,7 @@ "timeShift": null, "title": "Outgoing HTTP request rate", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -1340,6 +1550,90 @@ "align": false, "alignLevel": null } + } + ], + "repeat": null, + "title": "Process info", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 56, + "panels": [ + { + "cards": { + "cardPadding": -1, + "cardRound": 0 + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 21 + }, + "heatmap": {}, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 85, + "legend": { + "show": false + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)", + "format": "heatmap", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "title": "Event Send Time (Including errors, across all workers)", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": null, + "format": "s", + "logBase": 2, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null }, { "aliasColors": {}, @@ -1347,79 +1641,74 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, + "grid": {}, "gridPos": { - "h": 7, + "h": 9, "w": 12, "x": 12, - "y": 23 + "y": 21 }, "hiddenSeries": false, - "id": 120, + "id": 33, "legend": { "avg": false, "current": false, "max": false, "min": false, - "show": true, + "show": false, "total": false, "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, + "paceLength": 10, "percentage": false, - "pointradius": 2, + "pluginVersion": "7.3.7", + "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, - "stack": true, + "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "hide": false, - "instant": false, - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", - "refId": "A" - }, - { - "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)", "format": "time_series", - "hide": false, - "instant": false, "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{name}}", - "refId": "B" - } - ], - "thresholds": [ - { - "colorMode": "critical", - "fill": true, - "line": true, - "op": "gt", - "value": 1, - "yaxis": "left" + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 20, + "target": "" } ], + "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Stacked CPU usage", + "title": "Events Persisted (all workers)", "tooltip": { - "shared": false, + "shared": true, "sort": 0, - "value_type": "individual" + "value_type": "cumulative" }, "type": "graph", "xaxis": { @@ -1431,16 +1720,16 @@ }, "yaxes": [ { - "format": "percentunit", - "label": null, + "$$hashKey": "object:102", + "format": "hertz", "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:103", "format": "short", - "label": null, "logBase": 1, "max": null, "min": null, @@ -1451,23 +1740,7 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "Process info", - "type": "row" - }, - { - "collapsed": true, - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 29 - }, - "id": 56, - "panels": [ + }, { "aliasColors": {}, "bars": false, @@ -1475,13 +1748,21 @@ "dashes": false, "datasource": "$datasource", "decimals": 1, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 58 + "y": 30 }, + "hiddenSeries": false, "id": 40, "legend": { "avg": false, @@ -1496,7 +1777,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -1561,13 +1846,21 @@ "dashes": false, "datasource": "$datasource", "decimals": 1, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 58 + "y": 30 }, + "hiddenSeries": false, "id": 46, "legend": { "avg": false, @@ -1582,7 +1875,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -1651,13 +1948,21 @@ "dashes": false, "datasource": "$datasource", "decimals": 1, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 65 + "y": 37 }, + "hiddenSeries": false, "id": 44, "legend": { "alignAsTable": true, @@ -1675,7 +1980,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -1741,13 +2050,21 @@ "dashes": false, "datasource": "$datasource", "decimals": 1, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, + "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 65 + "y": 37 }, + "hiddenSeries": false, "id": 45, "legend": { "alignAsTable": true, @@ -1765,7 +2082,11 @@ "linewidth": 1, "links": [], "nullPointMode": "null", + "options": { + "alertThreshold": true + }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -1823,52 +2144,35 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "Event persist rates", - "type": "row" - }, - { - "collapsed": true, - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 30 - }, - "id": 57, - "panels": [ + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, - "fill": 2, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, - "grid": {}, "gridPos": { - "h": 8, + "h": 9, "w": 12, "x": 0, - "y": 31 + "y": 44 }, "hiddenSeries": false, - "id": 4, + "id": 118, "legend": { - "alignAsTable": true, "avg": false, "current": false, - "hideEmpty": false, - "hideZero": true, "max": false, "min": false, - "rightSide": false, "show": true, "total": false, "values": false @@ -1878,50 +2182,212 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, + "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "repeatDirection": "h", + "seriesOverrides": [ + { + "alias": "mean", + "linewidth": 2 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", "format": "time_series", "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [ + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 99%", + "refId": "A" + }, { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(216, 200, 27, 0.27)", - "op": "gt", - "value": 100 + "expr": "histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 95%", + "refId": "B" }, { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(234, 112, 112, 0.22)", - "op": "gt", - "value": 250 - } - ], + "expr": "histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 90%", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} 50%", + "refId": "D" + }, + { + "expr": "sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} mean", + "refId": "E" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Event send time quantiles by worker", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": null, + "title": "Event persistence", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 28 + }, + "id": 57, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": null, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 2, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 31 + }, + "hiddenSeries": false, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": true, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [ + { + "colorMode": "custom", + "fill": true, + "fillColor": "rgba(216, 200, 27, 0.27)", + "op": "gt", + "value": 100, + "yaxis": "left" + }, + { + "colorMode": "custom", + "fill": true, + "fillColor": "rgba(234, 112, 112, 0.22)", + "op": "gt", + "value": 250, + "yaxis": "left" + } + ], "timeFrom": null, "timeRegions": [], "timeShift": null, "title": "Request Count by arrival time", "tooltip": { "shared": false, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -1961,6 +2427,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -1986,9 +2459,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -2014,7 +2488,7 @@ "title": "Top 10 Request Counts", "tooltip": { "shared": false, - "sort": 0, + "sort": 2, "value_type": "cumulative" }, "type": "graph", @@ -2055,6 +2529,13 @@ "decimals": null, "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 2, "fillGradient": 0, "grid": {}, @@ -2084,9 +2565,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -2129,7 +2611,7 @@ "title": "Total CPU Usage by Endpoint", "tooltip": { "shared": false, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2170,7 +2652,14 @@ "decimals": null, "editable": true, "error": false, - "fill": 2, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, "grid": {}, "gridPos": { @@ -2199,9 +2688,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -2214,7 +2704,7 @@ "expr": "(rate(synapse_http_server_in_flight_requests_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_in_flight_requests_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", - "intervalFactor": 2, + "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", "refId": "A", "step": 20 @@ -2226,14 +2716,16 @@ "fill": true, "fillColor": "rgba(216, 200, 27, 0.27)", "op": "gt", - "value": 100 + "value": 100, + "yaxis": "left" }, { "colorMode": "custom", "fill": true, "fillColor": "rgba(234, 112, 112, 0.22)", "op": "gt", - "value": 250 + "value": 250, + "yaxis": "left" } ], "timeFrom": null, @@ -2242,7 +2734,7 @@ "title": "Average CPU Usage by Endpoint", "tooltip": { "shared": false, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2282,6 +2774,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -2310,9 +2809,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -2325,7 +2825,7 @@ "expr": "rate(synapse_http_server_in_flight_requests_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "time_series", "interval": "", - "intervalFactor": 2, + "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", "refId": "A", "step": 20 @@ -2338,7 +2838,7 @@ "title": "DB Usage by endpoint", "tooltip": { "shared": false, - "sort": 0, + "sort": 2, "value_type": "cumulative" }, "type": "graph", @@ -2379,6 +2879,13 @@ "decimals": null, "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 2, "fillGradient": 0, "grid": {}, @@ -2408,9 +2915,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -2424,7 +2932,7 @@ "format": "time_series", "hide": false, "interval": "", - "intervalFactor": 2, + "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}", "refId": "A", "step": 20 @@ -2437,7 +2945,7 @@ "title": "Non-sync avg response time", "tooltip": { "shared": false, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2475,6 +2983,13 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2499,13 +3014,21 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "Total", + "color": "rgb(255, 255, 255)", + "fill": 0, + "linewidth": 3 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, @@ -2517,6 +3040,12 @@ "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}", "refId": "A" + }, + { + "expr": "sum(avg_over_time(synapse_http_server_in_flight_requests_count{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", + "interval": "", + "legendFormat": "Total", + "refId": "B" } ], "thresholds": [], @@ -2526,7 +3055,7 @@ "title": "Requests in flight", "tooltip": { "shared": false, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -2572,7 +3101,7 @@ "h": 1, "w": 24, "x": 0, - "y": 31 + "y": 29 }, "id": 97, "panels": [ @@ -2582,6 +3111,13 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2605,11 +3141,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -2674,6 +3208,13 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2697,11 +3238,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -2717,12 +3256,6 @@ "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} {{name}}", "refId": "A" - }, - { - "expr": "", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" } ], "thresholds": [], @@ -2731,7 +3264,7 @@ "timeShift": null, "title": "DB usage by background jobs (including scheduling time)", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -2772,6 +3305,13 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -2794,10 +3334,8 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -2864,7 +3402,7 @@ "h": 1, "w": 24, "x": 0, - "y": 32 + "y": 30 }, "id": 81, "panels": [ @@ -2874,13 +3412,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 6 + "y": 33 }, "hiddenSeries": false, "id": 79, @@ -2897,11 +3442,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -2970,13 +3513,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 6 + "y": 33 }, "hiddenSeries": false, "id": 83, @@ -2993,11 +3543,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3068,13 +3616,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 15 + "y": 42 }, "hiddenSeries": false, "id": 109, @@ -3091,11 +3646,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3167,13 +3720,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 15 + "y": 42 }, "hiddenSeries": false, "id": 111, @@ -3190,11 +3750,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3258,18 +3816,25 @@ "bars": false, "dashLength": 10, "dashes": false, - "datasource": "$datasource", - "description": "Number of events queued up on the master process for processing by the federation sender", + "datasource": "${DS_PROMETHEUS}", + "description": "The number of events in the in-memory queues ", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, + "h": 8, "w": 12, "x": 0, - "y": 24 + "y": 51 }, "hiddenSeries": false, - "id": 140, + "id": 142, "legend": { "avg": false, "current": false, @@ -3281,14 +3846,112 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "interval": "", + "legendFormat": "pending PDUs {{job}}-{{index}}", + "refId": "A" + }, + { + "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "interval": "", + "legendFormat": "pending EDUs {{job}}-{{index}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "In-memory federation transmission queues", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "events", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Number of events queued up on the master process for processing by the federation sender", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 51 + }, + "hiddenSeries": false, + "id": 140, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 5, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -3391,68 +4054,243 @@ "alignLevel": null } }, + { + "cards": { + "cardPadding": -1, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "min": 0, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 59 + }, + "heatmap": {}, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 166, + "legend": { + "show": false + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "sum(rate(synapse_event_processing_lag_by_event_bucket{instance=\"$instance\",name=\"federation_sender\"}[$bucket_size])) by (le)", + "format": "heatmap", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ le }}", + "refId": "A" + } + ], + "title": "Federation send PDU lag", + "tooltip": { + "show": true, + "showHistogram": true + }, + "tooltipDecimals": 2, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": 0, + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, - "datasource": "${DS_PROMETHEUS}", - "description": "The number of events in the in-memory queues ", - "fill": 1, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, "fillGradient": 0, "gridPos": { - "h": 8, + "h": 9, "w": 12, "x": 12, - "y": 24 + "y": 60 }, "hiddenSeries": false, - "id": 142, + "id": 162, "legend": { "avg": false, "current": false, "max": false, "min": false, + "rightSide": false, "show": true, "total": false, "values": false }, "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, + "linewidth": 0, + "links": [], + "nullPointMode": "connected", + "paceLength": 10, "percentage": false, - "pointradius": 2, + "pluginVersion": "7.1.3", + "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "Avg", + "fill": 0, + "linewidth": 3 + }, + { + "alias": "99%", + "color": "#C4162A", + "fillBelowTo": "90%" + }, + { + "alias": "90%", + "color": "#FF7383", + "fillBelowTo": "75%" + }, + { + "alias": "75%", + "color": "#FFEE52", + "fillBelowTo": "50%" + }, + { + "alias": "50%", + "color": "#73BF69", + "fillBelowTo": "25%" + }, + { + "alias": "25%", + "color": "#1F60C4", + "fillBelowTo": "5%" + }, + { + "alias": "5%", + "lines": false + }, + { + "alias": "Average", + "color": "rgb(255, 255, 255)", + "lines": true, + "linewidth": 3 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "synapse_federation_transaction_queue_pending_pdus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "histogram_quantile(0.99, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", + "format": "time_series", "interval": "", - "legendFormat": "pending PDUs {{job}}-{{index}}", + "intervalFactor": 1, + "legendFormat": "99%", + "refId": "D" + }, + { + "expr": "histogram_quantile(0.9, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "90%", "refId": "A" }, { - "expr": "synapse_federation_transaction_queue_pending_edus{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "expr": "histogram_quantile(0.75, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", + "format": "time_series", "interval": "", - "legendFormat": "pending EDUs {{job}}-{{index}}", + "intervalFactor": 1, + "legendFormat": "75%", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.5, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "50%", "refId": "B" + }, + { + "expr": "histogram_quantile(0.25, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", + "interval": "", + "legendFormat": "25%", + "refId": "F" + }, + { + "expr": "histogram_quantile(0.05, sum(rate(synapse_event_processing_lag_by_event_bucket{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) by (le))", + "interval": "", + "legendFormat": "5%", + "refId": "G" + }, + { + "expr": "sum(rate(synapse_event_processing_lag_by_event_sum{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size])) / sum(rate(synapse_event_processing_lag_by_event_count{name='federation_sender',index=~\"$index\",instance=\"$instance\"}[$bucket_size]))", + "interval": "", + "legendFormat": "Average", + "refId": "H" + } + ], + "thresholds": [ + { + "colorMode": "warning", + "fill": false, + "line": true, + "op": "gt", + "value": 0.25, + "yaxis": "left" + }, + { + "colorMode": "critical", + "fill": false, + "line": true, + "op": "gt", + "value": 1, + "yaxis": "left" } ], - "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "In-memory federation transmission queues", + "title": "Federation send PDU lag quantiles", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -3465,21 +4303,20 @@ }, "yaxes": [ { - "$$hashKey": "object:317", - "format": "short", - "label": "events", + "decimals": null, + "format": "s", + "label": "", "logBase": 1, "max": null, "min": "0", "show": true }, { - "$$hashKey": "object:318", - "format": "short", + "format": "hertz", "label": "", "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true } ], @@ -3487,7 +4324,79 @@ "align": false, "alignLevel": null } - } + }, + { + "cards": { + "cardPadding": -1, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "min": 0, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 68 + }, + "heatmap": {}, + "hideZeroBuckets": false, + "highlightCards": true, + "id": 164, + "legend": { + "show": false + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "sum(rate(synapse_federation_server_pdu_process_time_bucket{instance=\"$instance\"}[$bucket_size])) by (le)", + "format": "heatmap", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{ le }}", + "refId": "A" + } + ], + "title": "Handle inbound PDU time", + "tooltip": { + "show": true, + "showHistogram": true + }, + "tooltipDecimals": 2, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": 0, + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + } ], "title": "Federation", "type": "row" @@ -3499,7 +4408,7 @@ "h": 1, "w": 24, "x": 0, - "y": 33 + "y": 31 }, "id": 60, "panels": [ @@ -3509,6 +4418,13 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3532,11 +4448,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -3611,6 +4525,13 @@ "dashes": false, "datasource": "$datasource", "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { @@ -3634,10 +4555,8 @@ "lines": true, "linewidth": 1, "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 2, "points": false, "renderer": "flot", @@ -3705,7 +4624,7 @@ "h": 1, "w": 24, "x": 0, - "y": 34 + "y": 32 }, "id": 58, "panels": [ @@ -3715,13 +4634,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, - "y": 79 + "y": 8 }, "hiddenSeries": false, "id": 48, @@ -3739,10 +4665,11 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -3809,13 +4736,20 @@ "dashes": false, "datasource": "$datasource", "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, - "y": 79 + "y": 8 }, "hiddenSeries": false, "id": 104, @@ -3834,10 +4768,11 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -3928,6 +4863,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 0, "fillGradient": 0, "grid": {}, @@ -3935,7 +4877,7 @@ "h": 7, "w": 12, "x": 0, - "y": 86 + "y": 15 }, "hiddenSeries": false, "id": 10, @@ -3955,10 +4897,11 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -4024,6 +4967,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4031,7 +4981,7 @@ "h": 7, "w": 12, "x": 12, - "y": 86 + "y": 15 }, "hiddenSeries": false, "id": 11, @@ -4051,10 +5001,11 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -4078,7 +5029,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Top DB transactions by total txn time", + "title": "DB transactions by total txn time", "tooltip": { "shared": false, "sort": 0, @@ -4112,6 +5063,111 @@ "align": false, "alignLevel": null } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 22 + }, + "hiddenSeries": false, + "id": 180, + "legend": { + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": true, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": false + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{desc}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average DB txn time", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } } ], "repeat": null, @@ -4125,7 +5181,7 @@ "h": 1, "w": 24, "x": 0, - "y": 35 + "y": 33 }, "id": 59, "panels": [ @@ -4137,6 +5193,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4144,7 +5207,7 @@ "h": 13, "w": 12, "x": 0, - "y": 80 + "y": 9 }, "hiddenSeries": false, "id": 12, @@ -4162,11 +5225,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4191,8 +5252,8 @@ "timeShift": null, "title": "Total CPU Usage by Block", "tooltip": { - "shared": false, - "sort": 0, + "shared": true, + "sort": 2, "value_type": "cumulative" }, "type": "graph", @@ -4232,6 +5293,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4239,7 +5307,7 @@ "h": 13, "w": 12, "x": 12, - "y": 80 + "y": 9 }, "hiddenSeries": false, "id": 26, @@ -4257,11 +5325,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4286,8 +5352,8 @@ "timeShift": null, "title": "Average CPU Time per Block", "tooltip": { - "shared": false, - "sort": 0, + "shared": true, + "sort": 2, "value_type": "cumulative" }, "type": "graph", @@ -4327,6 +5393,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4334,7 +5407,7 @@ "h": 13, "w": 12, "x": 0, - "y": 93 + "y": 22 }, "hiddenSeries": false, "id": 13, @@ -4352,11 +5425,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4381,8 +5452,8 @@ "timeShift": null, "title": "Total DB Usage by Block", "tooltip": { - "shared": false, - "sort": 0, + "shared": true, + "sort": 2, "value_type": "cumulative" }, "type": "graph", @@ -4423,6 +5494,13 @@ "description": "The time each database transaction takes to execute, on average, broken down by metrics block.", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4430,7 +5508,7 @@ "h": 13, "w": 12, "x": 12, - "y": 93 + "y": 22 }, "hiddenSeries": false, "id": 27, @@ -4448,11 +5526,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4477,8 +5553,8 @@ "timeShift": null, "title": "Average Database Transaction time, by Block", "tooltip": { - "shared": false, - "sort": 0, + "shared": true, + "sort": 2, "value_type": "cumulative" }, "type": "graph", @@ -4518,6 +5594,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4525,7 +5608,7 @@ "h": 13, "w": 12, "x": 0, - "y": 106 + "y": 35 }, "hiddenSeries": false, "id": 28, @@ -4542,11 +5625,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4612,6 +5693,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4619,7 +5707,7 @@ "h": 13, "w": 12, "x": 12, - "y": 106 + "y": 35 }, "hiddenSeries": false, "id": 25, @@ -4636,11 +5724,9 @@ "linewidth": 2, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -4697,49 +5783,33 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "Per-block metrics", - "type": "row" - }, - { - "collapsed": true, - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 36 - }, - "id": 61, - "panels": [ + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", - "decimals": 2, - "editable": true, - "error": false, - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, - "grid": {}, "gridPos": { - "h": 10, + "h": 15, "w": 12, "x": 0, - "y": 37 + "y": 48 }, "hiddenSeries": false, - "id": 1, + "id": 154, "legend": { "alignAsTable": true, "avg": false, "current": false, - "hideEmpty": true, - "hideZero": false, "max": false, "min": false, "show": true, @@ -4747,13 +5817,130 @@ "values": false }, "lines": true, - "linewidth": 2, - "links": [], + "linewidth": 1, "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(synapse_util_metrics_block_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "interval": "", + "legendFormat": "{{job}}-{{index}} {{block_name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Block count", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": null, + "title": "Per-block metrics", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 61, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "decimals": 2, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 84 + }, + "hiddenSeries": false, + "id": 1, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -4821,6 +6008,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4828,7 +6022,7 @@ "h": 10, "w": 12, "x": 12, - "y": 37 + "y": 84 }, "hiddenSeries": false, "id": 8, @@ -4848,9 +6042,10 @@ "links": [], "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -4917,6 +6112,13 @@ "datasource": "$datasource", "editable": true, "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "grid": {}, @@ -4924,7 +6126,7 @@ "h": 10, "w": 12, "x": 0, - "y": 47 + "y": 94 }, "hiddenSeries": false, "id": 38, @@ -4944,9 +6146,10 @@ "links": [], "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -5010,13 +6213,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 10, "w": 12, "x": 12, - "y": 47 + "y": 94 }, "hiddenSeries": false, "id": 39, @@ -5035,9 +6245,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -5102,13 +6313,20 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 57 + "y": 104 }, "hiddenSeries": false, "id": 65, @@ -5127,9 +6345,10 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -5200,9 +6419,9 @@ "h": 1, "w": 24, "x": 0, - "y": 37 + "y": 35 }, - "id": 62, + "id": 148, "panels": [ { "aliasColors": {}, @@ -5210,16 +6429,23 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, + "h": 8, "w": 12, "x": 0, - "y": 121 + "y": 29 }, "hiddenSeries": false, - "id": 91, + "id": 146, "legend": { "avg": false, "current": false, @@ -5231,26 +6457,24 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], "spaceLength": 10, - "stack": true, + "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} gen {{gen}}", + "expr": "synapse_util_caches_response_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "interval": "", + "legendFormat": "{{name}} {{job}}-{{index}}", "refId": "A" } ], @@ -5258,9 +6482,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Total GC time by bucket (10m smoothing)", + "title": "Response cache size", "tooltip": { - "shared": true, + "shared": false, "sort": 0, "value_type": "individual" }, @@ -5274,12 +6498,11 @@ }, "yaxes": [ { - "decimals": null, - "format": "percentunit", + "format": "short", "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { @@ -5302,22 +6525,24 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "decimals": 3, - "editable": true, - "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, - "grid": {}, "gridPos": { - "h": 9, + "h": 8, "w": 12, "x": 12, - "y": 121 + "y": 29 }, "hiddenSeries": false, - "id": 21, + "id": 150, "legend": { - "alignAsTable": true, "avg": false, "current": false, "max": false, @@ -5327,14 +6552,14 @@ "values": false }, "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null as zero", + "linewidth": 1, + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -5343,24 +6568,27 @@ "steppedLine": false, "targets": [ { - "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}} {{index}} gen {{gen}} ", - "refId": "A", - "step": 20, - "target": "" + "expr": "rate(synapse_util_caches_response_cache:hits{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])/rate(synapse_util_caches_response_cache:total{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])", + "interval": "", + "legendFormat": "{{name}} {{job}}-{{index}}", + "refId": "A" + }, + { + "expr": "", + "interval": "", + "legendFormat": "", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Average GC Time Per Collection", + "title": "Response cache hit rate", "tooltip": { "shared": false, "sort": 0, - "value_type": "cumulative" + "value_type": "individual" }, "type": "graph", "xaxis": { @@ -5372,14 +6600,17 @@ }, "yaxes": [ { - "format": "s", + "decimals": null, + "format": "percentunit", + "label": null, "logBase": 1, - "max": null, - "min": null, + "max": "1", + "min": "0", "show": true }, { "format": "short", + "label": null, "logBase": 1, "max": null, "min": null, @@ -5390,29 +6621,48 @@ "align": false, "alignLevel": null } - }, + } + ], + "title": "Response caches", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 62, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 9, "w": 12, "x": 0, - "y": 130 + "y": 30 }, "hiddenSeries": false, - "id": 89, + "id": 91, "legend": { "avg": false, "current": false, - "hideEmpty": true, - "hideZero": false, "max": false, "min": false, "show": true, @@ -5424,25 +6674,22 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/gen 0$/", - "yaxis": 2 - } - ], + "seriesOverrides": [], "spaceLength": 10, - "stack": false, + "stack": true, "steppedLine": false, "targets": [ { - "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", + "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])", "format": "time_series", + "instant": false, "intervalFactor": 1, "legendFormat": "{{job}}-{{index}} gen {{gen}}", "refId": "A" @@ -5452,9 +6699,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Allocation counts", + "title": "Total GC time by bucket (10m smoothing)", "tooltip": { - "shared": false, + "shared": true, "sort": 0, "value_type": "individual" }, @@ -5468,17 +6715,17 @@ }, "yaxes": [ { - "format": "short", - "label": "Gen N-1 GCs since last Gen N GC", + "decimals": null, + "format": "percentunit", + "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { - "decimals": null, "format": "short", - "label": "Objects since last Gen 0 GC", + "label": null, "logBase": 1, "max": null, "min": null, @@ -5496,17 +6743,29 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "decimals": 3, + "editable": true, + "error": false, + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, + "grid": {}, "gridPos": { "h": 9, "w": 12, "x": 12, - "y": 130 + "y": 30 }, "hiddenSeries": false, - "id": 93, + "id": 21, "legend": { + "alignAsTable": true, "avg": false, "current": false, "max": false, @@ -5516,13 +6775,219 @@ "values": false }, "lines": true, - "linewidth": 1, + "linewidth": 2, "links": [], - "nullPointMode": "connected", + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{job}} {{index}} gen {{gen}} ", + "refId": "A", + "step": 20, + "target": "" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Average GC Time Per Collection", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 39 + }, + "hiddenSeries": false, + "id": 89, + "legend": { + "avg": false, + "current": false, + "hideEmpty": true, + "hideZero": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/gen 0$/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} gen {{gen}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Allocation counts", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "Gen N-1 GCs since last Gen N GC", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "decimals": null, + "format": "short", + "label": "Objects since last Gen 0 GC", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 39 + }, + "hiddenSeries": false, + "id": 93, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", @@ -5586,16 +7051,1579 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 95, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} gen {{gen}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "GC frequency", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cards": { + "cardPadding": 0, + "cardRound": null + }, + "color": { + "cardColor": "#b4ff00", + "colorScale": "sqrt", + "colorScheme": "interpolateSpectral", + "exponent": 0.5, + "max": null, + "min": 0, + "mode": "spectrum" + }, + "dataFormat": "tsbuckets", + "datasource": "${DS_PROMETHEUS}", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 48 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 87, + "legend": { + "show": true + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)", + "format": "heatmap", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "title": "GC durations", + "tooltip": { + "show": true, + "showHistogram": false + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": null, + "format": "s", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + } + ], + "repeat": null, + "title": "GC", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 37 + }, + "id": 63, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 13 + }, + "hiddenSeries": false, + "id": 42, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}} {{command}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of incoming commands", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 13 + }, + "hiddenSeries": false, + "id": 144, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "synapse_replication_tcp_command_queue{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "interval": "", + "legendFormat": "{{stream_name}} {{job}}-{{index}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Queued incoming RDATA commands, by stream", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 20 + }, + "hiddenSeries": false, + "id": 43, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{job}}-{{index}} {{command}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Rate of outgoing commands", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 20 + }, + "hiddenSeries": false, + "id": 41, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{stream_name}}", + "refId": "A", + "step": 20 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Outgoing stream updates", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 27 + }, + "hiddenSeries": false, + "id": 113, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{stream_name}}", + "refId": "A" + }, + { + "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Replication connections", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 27 + }, + "hiddenSeries": false, + "id": 115, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{reason_type}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Replication connection close reasons", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "hertz", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": null, + "title": "Replication", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 38 + }, + "id": 69, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 41 + }, + "hiddenSeries": false, + "id": 67, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - on() group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{name}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Event processing lag", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "events", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 41 + }, + "hiddenSeries": false, + "id": 71, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{name}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Age of last processed event", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 50 + }, + "hiddenSeries": false, + "id": 121, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "paceLength": 10, + "percentage": false, + "pluginVersion": "7.3.7", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "deriv(synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/1000 - 1", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{index}} {{name}}", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Event processing catchup rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "none", + "label": "fallbehind(-) / catchup(+): s/sec", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Event processing loop positions", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 39 + }, + "id": 126, + "panels": [ + { + "cards": { + "cardPadding": 0, + "cardRound": null + }, + "color": { + "cardColor": "#B877D9", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "max": null, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "$datasource", + "description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 42 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 122, + "legend": { + "show": true + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", + "format": "heatmap", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Number of rooms, by number of forward extremities in room", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": 0, + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 0, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 42 + }, + "hiddenSeries": false, + "id": 124, + "interval": "", + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} > 0", + "format": "heatmap", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Room counts, by number of extremities", + "tooltip": { + "shared": true, + "sort": 2, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "none", + "label": "Number of rooms", + "logBase": 10, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cards": { + "cardPadding": 0, + "cardRound": null + }, + "color": { + "cardColor": "#5794F2", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "$datasource", + "description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 50 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 127, + "legend": { + "show": true + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", + "format": "heatmap", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Events persisted, by number of forward extremities in room (heatmap)", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": 0, + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 50 + }, + "hiddenSeries": false, + "id": 128, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "50%", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "75%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "90%", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "99%", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Events persisted, by number of forward extremities in room (quantiles)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "Number of extremities in room", + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "cards": { + "cardPadding": 0, + "cardRound": null + }, + "color": { + "cardColor": "#FF9830", + "colorScale": "sqrt", + "colorScheme": "interpolateInferno", + "exponent": 0.5, + "min": 0, + "mode": "opacity" + }, + "dataFormat": "tsbuckets", + "datasource": "$datasource", + "description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 58 + }, + "heatmap": {}, + "hideZeroBuckets": true, + "highlightCards": true, + "id": 129, + "legend": { + "show": true + }, + "links": [], + "reverseYBuckets": false, + "targets": [ + { + "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", + "format": "heatmap", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Events persisted, by number of stale forward extremities in room (heatmap)", + "tooltip": { + "show": true, + "showHistogram": true + }, + "type": "heatmap", + "xAxis": { + "show": true + }, + "xBucketNumber": null, + "xBucketSize": null, + "yAxis": { + "decimals": 0, + "format": "short", + "logBase": 1, + "max": null, + "min": null, + "show": true, + "splitFactor": null + }, + "yBucketBound": "auto", + "yBucketNumber": null, + "yBucketSize": null + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, + "h": 8, "w": 12, - "x": 0, - "y": 139 + "x": 12, + "y": 58 }, "hiddenSeries": false, - "id": 95, + "id": 130, "legend": { "avg": false, "current": false, @@ -5609,11 +8637,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.1.3", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -5622,18 +8648,39 @@ "steppedLine": false, "targets": [ { - "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", "format": "time_series", "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} gen {{gen}}", + "legendFormat": "50%", "refId": "A" + }, + { + "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "75%", + "refId": "B" + }, + { + "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "90%", + "refId": "C" + }, + { + "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "99%", + "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "GC frequency", + "title": "Events persisted, by number of stale forward extremities in room (quantiles)", "tooltip": { "shared": true, "sort": 0, @@ -5649,11 +8696,11 @@ }, "yaxes": [ { - "format": "hertz", - "label": null, + "format": "short", + "label": "Number of stale forward extremities in room", "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -5676,26 +8723,32 @@ "cardRound": null }, "color": { - "cardColor": "#b4ff00", + "cardColor": "#73BF69", "colorScale": "sqrt", - "colorScheme": "interpolateSpectral", + "colorScheme": "interpolateInferno", "exponent": 0.5, - "max": null, "min": 0, - "mode": "spectrum" + "mode": "opacity" }, "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", + "datasource": "$datasource", + "description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "gridPos": { - "h": 9, + "h": 8, "w": 12, - "x": 12, - "y": 139 + "x": 0, + "y": 66 }, "heatmap": {}, "hideZeroBuckets": true, "highlightCards": true, - "id": 87, + "id": 131, "legend": { "show": true }, @@ -5703,17 +8756,20 @@ "reverseYBuckets": false, "targets": [ { - "expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)", + "expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", "format": "heatmap", + "interval": "", "intervalFactor": 1, "legendFormat": "{{le}}", "refId": "A" } ], - "title": "GC durations", + "timeFrom": null, + "timeShift": null, + "title": "Number of state resolution performed, by number of state groups involved (heatmap)", "tooltip": { "show": true, - "showHistogram": false + "showHistogram": true }, "type": "heatmap", "xAxis": { @@ -5722,8 +8778,8 @@ "xBucketNumber": null, "xBucketSize": null, "yAxis": { - "decimals": null, - "format": "s", + "decimals": 0, + "format": "short", "logBase": 1, "max": null, "min": null, @@ -5733,39 +8789,32 @@ "yBucketBound": "auto", "yBucketNumber": null, "yBucketSize": null - } - ], - "repeat": null, - "title": "GC", - "type": "row" - }, - { - "collapsed": true, - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 38 - }, - "id": 63, - "panels": [ + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", + "description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 8, "w": 12, - "x": 0, + "x": 12, "y": 66 }, "hiddenSeries": false, - "id": 2, + "id": 132, + "interval": "", "legend": { "avg": false, "current": false, @@ -5779,12 +8828,9 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.1.3", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -5793,53 +8839,150 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", - "intervalFactor": 2, - "legendFormat": "user started/stopped syncing", - "refId": "A", - "step": 20 + "interval": "", + "intervalFactor": 1, + "legendFormat": "50%", + "refId": "A" }, { - "expr": "rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", - "intervalFactor": 2, - "legendFormat": "federation ack", - "refId": "B", - "step": 20 + "interval": "", + "intervalFactor": 1, + "legendFormat": "75%", + "refId": "B" }, { - "expr": "rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", - "intervalFactor": 2, - "legendFormat": "remove pusher", - "refId": "C", - "step": 20 + "interval": "", + "intervalFactor": 1, + "legendFormat": "90%", + "refId": "C" }, { - "expr": "rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", "format": "time_series", - "intervalFactor": 2, - "legendFormat": "invalidate cache", - "refId": "D", - "step": 20 + "interval": "", + "intervalFactor": 1, + "legendFormat": "99%", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Number of state resolutions performed, by number of state groups involved (quantiles)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "Number of state groups", + "logBase": 1, + "max": null, + "min": "0", + "show": true }, { - "expr": "rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "user ip cache", - "refId": "E", - "step": 20 + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "$datasource", + "description": "When we do a state res while persisting events we try and see if we can prune any stale extremities.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 74 + }, + "hiddenSeries": false, + "id": 179, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "percentage": false, + "pluginVersion": "7.1.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(synapse_storage_events_state_resolutions_during_persistence{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "interval": "", + "legendFormat": "State res ", + "refId": "A" + }, + { + "expr": "sum(rate(synapse_storage_events_potential_times_prune_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "interval": "", + "legendFormat": "Potential to prune", + "refId": "B" + }, + { + "expr": "sum(rate(synapse_storage_events_times_pruned_extremities{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", + "interval": "", + "legendFormat": "Pruned", + "refId": "C" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Rate of events on replication master", + "title": "Stale extremity dropping", "tooltip": { - "shared": false, + "shared": true, "sort": 0, "value_type": "individual" }, @@ -5873,23 +9016,45 @@ "align": false, "alignLevel": null } - }, + } + ], + "title": "Extremities", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 40 + }, + "id": 158, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 8, "w": 12, - "x": 12, - "y": 66 + "x": 0, + "y": 119 }, "hiddenSeries": false, - "id": 41, + "id": 156, "legend": { "avg": false, "current": false, @@ -5904,35 +9069,49 @@ "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, - "paceLength": 10, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 5, "points": false, "renderer": "flot", - "seriesOverrides": [], + "seriesOverrides": [ + { + "alias": "Max", + "color": "#bf1b00", + "fill": 0, + "linewidth": 2 + } + ], "spaceLength": 10, "stack": false, "steppedLine": false, "targets": [ { - "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "expr": "synapse_admin_mau:current{instance=\"$instance\"}", "format": "time_series", "interval": "", - "intervalFactor": 2, - "legendFormat": "{{stream_name}}", - "refId": "A", - "step": 20 + "intervalFactor": 1, + "legendFormat": "Current", + "refId": "A" + }, + { + "expr": "synapse_admin_mau:max{instance=\"$instance\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "Max", + "refId": "B" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Outgoing stream updates", + "title": "MAU Limits", "tooltip": { - "shared": false, + "shared": true, "sort": 0, "value_type": "individual" }, @@ -5946,11 +9125,11 @@ }, "yaxes": [ { - "format": "hertz", + "format": "short", "label": null, "logBase": 1, "max": null, - "min": null, + "min": "0", "show": true }, { @@ -5973,16 +9152,22 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 8, "w": 12, - "x": 0, - "y": 73 + "x": 12, + "y": 119 }, "hiddenSeries": false, - "id": 42, + "id": 160, "legend": { "avg": false, "current": false, @@ -5994,14 +9179,13 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -6010,21 +9194,19 @@ "steppedLine": false, "targets": [ { - "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{command}}", - "refId": "A", - "step": 20 + "expr": "synapse_admin_mau_current_mau_by_service{instance=\"$instance\"}", + "interval": "", + "legendFormat": "{{ app_service }}", + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Rate of incoming commands", + "title": "MAU by Appservice", "tooltip": { - "shared": false, + "shared": true, "sort": 0, "value_type": "individual" }, @@ -6038,7 +9220,7 @@ }, "yaxes": [ { - "format": "hertz", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -6058,23 +9240,45 @@ "align": false, "alignLevel": null } - }, + } + ], + "title": "MAU", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 41 + }, + "id": 177, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, - "x": 12, - "y": 73 + "x": 0, + "y": 1 }, "hiddenSeries": false, - "id": 43, + "id": 173, "legend": { "avg": false, "current": false, @@ -6088,11 +9292,8 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -6102,22 +9303,24 @@ "steppedLine": false, "targets": [ { - "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", + "expr": "rate(synapse_notifier_users_woken_by_stream{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", + "hide": false, "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{command}}", + "legendFormat": "{{stream}} {{index}}", + "metric": "synapse_notifier", "refId": "A", - "step": 20 + "step": 2 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Rate of outgoing commands", + "title": "Notifier Streams Woken", "tooltip": { - "shared": false, - "sort": 0, + "shared": true, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -6157,16 +9360,23 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {}, + "links": [] + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 7, "w": 12, - "x": 0, - "y": 80 + "x": 12, + "y": 1 }, "hiddenSeries": false, - "id": 113, + "id": 175, "legend": { "avg": false, "current": false, @@ -6180,11 +9390,8 @@ "linewidth": 1, "links": [], "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "paceLength": 10, "percentage": false, + "pluginVersion": "7.1.3", "pointradius": 5, "points": false, "renderer": "flot", @@ -6194,28 +9401,23 @@ "steppedLine": false, "targets": [ { - "expr": "synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{stream_name}}", - "refId": "A" - }, - { - "expr": "synapse_replication_tcp_resource_total_connections{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", + "expr": "rate(synapse_handler_presence_get_updates{job=~\"$job\",instance=\"$instance\"}[$bucket_size])", "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}}", - "refId": "B" + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{type}} {{index}}", + "refId": "A", + "step": 2 } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Replication connections", + "title": "Presence Stream Fetch Type Rates", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -6228,7 +9430,7 @@ }, "yaxes": [ { - "format": "short", + "format": "hertz", "label": null, "logBase": 1, "max": null, @@ -6248,23 +9450,44 @@ "align": false, "alignLevel": null } - }, + } + ], + "title": "Notifier", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 42 + }, + "id": 170, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 7, + "h": 8, "w": 12, - "x": 12, - "y": 80 + "x": 0, + "y": 73 }, "hiddenSeries": false, - "id": 115, + "id": 168, "legend": { "avg": false, "current": false, @@ -6276,14 +9499,13 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -6292,10 +9514,9 @@ "steppedLine": false, "targets": [ { - "expr": "rate(synapse_replication_tcp_protocol_close_reason{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{reason_type}}", + "expr": "rate(synapse_appservice_api_sent_events{instance=\"$instance\"}[$bucket_size])", + "interval": "", + "legendFormat": "{{exported_service}}", "refId": "A" } ], @@ -6303,7 +9524,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Replication connection close reasons", + "title": "Sent Events rate", "tooltip": { "shared": true, "sort": 0, @@ -6339,39 +9560,29 @@ "align": false, "alignLevel": null } - } - ], - "repeat": null, - "title": "Replication", - "type": "row" - }, - { - "collapsed": true, - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 39 - }, - "id": 69, - "panels": [ + }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, + "h": 8, "w": 12, - "x": 0, - "y": 40 + "x": 12, + "y": 73 }, "hiddenSeries": false, - "id": 67, + "id": 171, "legend": { "avg": false, "current": false, @@ -6383,14 +9594,13 @@ }, "lines": true, "linewidth": 1, - "links": [], - "nullPointMode": "connected", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -6399,11 +9609,9 @@ "steppedLine": false, "targets": [ { - "expr": "max(synapse_event_persisted_position{instance=\"$instance\"}) - ignoring(instance,index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", + "expr": "rate(synapse_appservice_api_sent_transactions{instance=\"$instance\"}[$bucket_size])", "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{name}}", + "legendFormat": "{{exported_service}}", "refId": "A" } ], @@ -6411,7 +9619,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Event processing lag", + "title": "Transactions rate", "tooltip": { "shared": true, "sort": 0, @@ -6427,8 +9635,8 @@ }, "yaxes": [ { - "format": "short", - "label": "events", + "format": "hertz", + "label": null, "logBase": 1, "max": null, "min": null, @@ -6447,23 +9655,44 @@ "align": false, "alignLevel": null } - }, + } + ], + "title": "Appservices", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 43 + }, + "id": 188, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, + "h": 8, "w": 12, - "x": 12, - "y": 40 + "x": 0, + "y": 44 }, "hiddenSeries": false, - "id": 71, + "id": 182, "legend": { "avg": false, "current": false, @@ -6475,14 +9704,13 @@ }, "lines": true, "linewidth": 1, - "links": [], - "nullPointMode": "connected", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -6491,23 +9719,44 @@ "steppedLine": false, "targets": [ { - "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "hide": false, + "expr": "rate(synapse_handler_presence_notified_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{name}}", + "legendFormat": "Notified", + "refId": "A" + }, + { + "expr": "rate(synapse_handler_presence_federation_presence_out{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "interval": "", + "legendFormat": "Remote ping", "refId": "B" + }, + { + "expr": "rate(synapse_handler_presence_presence_updates{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "interval": "", + "legendFormat": "Total updates", + "refId": "C" + }, + { + "expr": "rate(synapse_handler_presence_federation_presence{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "interval": "", + "legendFormat": "Remote updates", + "refId": "D" + }, + { + "expr": "rate(synapse_handler_presence_bump_active_time{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", + "interval": "", + "legendFormat": "Bump active time", + "refId": "E" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Age of last processed event", + "title": "Presence", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -6520,7 +9769,7 @@ }, "yaxes": [ { - "format": "ms", + "format": "hertz", "label": null, "logBase": 1, "max": null, @@ -6547,17 +9796,22 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { - "h": 9, + "h": 8, "w": 12, - "x": 0, - "y": 49 + "x": 12, + "y": 44 }, "hiddenSeries": false, - "id": 121, - "interval": "", + "id": 184, "legend": { "avg": false, "current": false, @@ -6569,14 +9823,13 @@ }, "lines": true, "linewidth": 1, - "links": [], - "nullPointMode": "connected", + "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, - "paceLength": 10, "percentage": false, - "pointradius": 5, + "pluginVersion": "7.3.7", + "pointradius": 2, "points": false, "renderer": "flot", "seriesOverrides": [], @@ -6585,23 +9838,20 @@ "steppedLine": false, "targets": [ { - "expr": "deriv(synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/1000 - 1", - "format": "time_series", - "hide": false, + "expr": "rate(synapse_handler_presence_state_transition{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{name}}", - "refId": "B" + "legendFormat": "{{from}} -> {{to}}", + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Event processing catchup rate", + "title": "Presence state transitions", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -6614,9 +9864,8 @@ }, "yaxes": [ { - "decimals": null, - "format": "none", - "label": "fallbehind(-) / catchup(+): s/sec", + "format": "hertz", + "label": null, "logBase": 1, "max": null, "min": null, @@ -6635,88 +9884,6 @@ "align": false, "alignLevel": null } - } - ], - "title": "Event processing loop positions", - "type": "row" - }, - { - "collapsed": true, - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 40 - }, - "id": 126, - "panels": [ - { - "cards": { - "cardPadding": 0, - "cardRound": null - }, - "color": { - "cardColor": "#B877D9", - "colorScale": "sqrt", - "colorScheme": "interpolateInferno", - "exponent": 0.5, - "max": null, - "min": 0, - "mode": "opacity" - }, - "dataFormat": "tsbuckets", - "datasource": "$datasource", - "description": "Colour reflects the number of rooms with the given number of forward extremities, or fewer.\n\nThis is only updated once an hour.", - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 86 - }, - "heatmap": {}, - "hideZeroBuckets": true, - "highlightCards": true, - "id": 122, - "legend": { - "show": true - }, - "links": [], - "reverseYBuckets": false, - "targets": [ - { - "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", - "format": "heatmap", - "intervalFactor": 1, - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Number of rooms, by number of forward extremities in room", - "tooltip": { - "show": true, - "showHistogram": true - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": 0, - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null }, { "aliasColors": {}, @@ -6724,18 +9891,22 @@ "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "Number of rooms with the given number of forward extremities or fewer.\n\nThis is only updated once an hour.", - "fill": 0, + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, + "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 86 + "x": 0, + "y": 52 }, "hiddenSeries": false, - "id": 124, - "interval": "", + "id": 186, "legend": { "avg": false, "current": false, @@ -6747,12 +9918,12 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", @@ -6762,11 +9933,9 @@ "steppedLine": false, "targets": [ { - "expr": "synapse_forward_extremities_bucket{instance=\"$instance\"} > 0", - "format": "time_series", + "expr": "rate(synapse_handler_presence_notify_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", "interval": "", - "intervalFactor": 1, - "legendFormat": "{{le}}", + "legendFormat": "{{reason}}", "refId": "A" } ], @@ -6774,10 +9943,10 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Room counts, by number of extremities", + "title": "Presence notify reason", "tooltip": { - "shared": false, - "sort": 1, + "shared": true, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -6790,111 +9959,66 @@ }, "yaxes": [ { - "decimals": null, - "format": "none", - "label": "Number of rooms", + "$$hashKey": "object:165", + "format": "hertz", + "label": null, "logBase": 1, "max": null, "min": null, "show": true }, { + "$$hashKey": "object:166", "format": "short", "label": null, "logBase": 1, "max": null, "min": null, - "show": false + "show": true } ], "yaxis": { "align": false, "alignLevel": null } - }, - { - "cards": { - "cardPadding": 0, - "cardRound": null - }, - "color": { - "cardColor": "#5794F2", - "colorScale": "sqrt", - "colorScheme": "interpolateInferno", - "exponent": 0.5, - "min": 0, - "mode": "opacity" - }, - "dataFormat": "tsbuckets", - "datasource": "$datasource", - "description": "Colour reflects the number of events persisted to rooms with the given number of forward extremities, or fewer.", - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 94 - }, - "heatmap": {}, - "hideZeroBuckets": true, - "highlightCards": true, - "id": 127, - "legend": { - "show": true - }, - "links": [], - "reverseYBuckets": false, - "targets": [ - { - "expr": "rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", - "format": "heatmap", - "intervalFactor": 1, - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Events persisted, by number of forward extremities in room (heatmap)", - "tooltip": { - "show": true, - "showHistogram": true - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": 0, - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, + } + ], + "title": "Presence", + "type": "row" + }, + { + "collapsed": true, + "datasource": "${DS_PROMETHEUS}", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 44 + }, + "id": 197, + "panels": [ { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "For a given percentage P, the number X where P% of events were persisted to rooms with X forward extremities or fewer.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, - "x": 12, - "y": 94 + "x": 0, + "y": 1 }, "hiddenSeries": false, - "id": 128, + "id": 191, "legend": { "avg": false, "current": false, @@ -6906,12 +10030,12 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", @@ -6921,42 +10045,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.5, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "50%", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.75, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "75%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "90%", - "refId": "C" - }, - { - "expr": "histogram_quantile(0.99, rate(synapse_storage_events_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "99%", - "refId": "D" + "expr": "rate(synapse_external_cache_set{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])", + "interval": "", + "legendFormat": "{{ cache_name }} {{ index }}", + "refId": "A" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Events persisted, by number of forward extremities in room (quantiles)", + "title": "External Cache Set Rate", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -6969,14 +10071,16 @@ }, "yaxes": [ { - "format": "short", - "label": "Number of extremities in room", + "$$hashKey": "object:390", + "format": "hertz", + "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { + "$$hashKey": "object:391", "format": "short", "label": null, "logBase": 1, @@ -6990,89 +10094,29 @@ "alignLevel": null } }, - { - "cards": { - "cardPadding": 0, - "cardRound": null - }, - "color": { - "cardColor": "#FF9830", - "colorScale": "sqrt", - "colorScheme": "interpolateInferno", - "exponent": 0.5, - "min": 0, - "mode": "opacity" - }, - "dataFormat": "tsbuckets", - "datasource": "$datasource", - "description": "Colour reflects the number of events persisted to rooms with the given number of stale forward extremities, or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 102 - }, - "heatmap": {}, - "hideZeroBuckets": true, - "highlightCards": true, - "id": 129, - "legend": { - "show": true - }, - "links": [], - "reverseYBuckets": false, - "targets": [ - { - "expr": "rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0)", - "format": "heatmap", - "intervalFactor": 1, - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Events persisted, by number of stale forward extremities in room (heatmap)", - "tooltip": { - "show": true, - "showHistogram": true - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": 0, - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, { "aliasColors": {}, "bars": false, "dashLength": 10, "dashes": false, "datasource": "$datasource", - "description": "For given percentage P, the number X where P% of events were persisted to rooms with X stale forward extremities or fewer.\n\nStale forward extremities are those that were in the previous set of extremities as well as the new.", + "description": "", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "fill": 1, "fillGradient": 0, "gridPos": { "h": 8, "w": 12, "x": 12, - "y": 102 + "y": 1 }, "hiddenSeries": false, - "id": 130, + "id": 193, "legend": { "avg": false, "current": false, @@ -7084,12 +10128,12 @@ }, "lines": true, "linewidth": 1, - "links": [], "nullPointMode": "null", "options": { - "dataLinks": [] + "alertThreshold": true }, "percentage": false, + "pluginVersion": "7.3.7", "pointradius": 2, "points": false, "renderer": "flot", @@ -7099,42 +10143,20 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.5, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "50%", + "expr": "rate(synapse_external_cache_get{job=\"$job\", instance=\"$instance\", index=~\"$index\"}[$bucket_size])", + "interval": "", + "legendFormat": "{{ cache_name }} {{ index }}", "refId": "A" - }, - { - "expr": "histogram_quantile(0.75, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "75%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "90%", - "refId": "C" - }, - { - "expr": "histogram_quantile(0.99, rate(synapse_storage_events_stale_forward_extremities_persisted_bucket{instance=\"$instance\"}[$bucket_size]) and on (index, instance, job) (synapse_storage_events_persisted_events > 0))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "99%", - "refId": "D" } ], "thresholds": [], "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Events persisted, by number of stale forward extremities in room (quantiles)", + "title": "External Cache Get Rate", "tooltip": { "shared": true, - "sort": 0, + "sort": 2, "value_type": "individual" }, "type": "graph", @@ -7147,14 +10169,16 @@ }, "yaxes": [ { - "format": "short", - "label": "Number of stale forward extremities in room", + "$$hashKey": "object:390", + "format": "hertz", + "label": null, "logBase": 1, "max": null, - "min": "0", + "min": null, "show": true }, { + "$$hashKey": "object:391", "format": "short", "label": null, "logBase": 1, @@ -7170,52 +10194,57 @@ }, { "cards": { - "cardPadding": 0, + "cardPadding": -1, "cardRound": null }, "color": { - "cardColor": "#73BF69", + "cardColor": "#b4ff00", "colorScale": "sqrt", "colorScheme": "interpolateInferno", "exponent": 0.5, "min": 0, - "mode": "opacity" + "mode": "spectrum" }, "dataFormat": "tsbuckets", "datasource": "$datasource", - "description": "Colour reflects the number of state resolution operations performed over the given number of state groups, or fewer.", + "fieldConfig": { + "defaults": { + "custom": {} + }, + "overrides": [] + }, "gridPos": { - "h": 8, + "h": 9, "w": 12, "x": 0, - "y": 110 + "y": 9 }, "heatmap": {}, - "hideZeroBuckets": true, + "hideZeroBuckets": false, "highlightCards": true, - "id": 131, + "id": 195, "legend": { - "show": true + "show": false }, "links": [], "reverseYBuckets": false, "targets": [ { - "expr": "rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", + "expr": "sum(rate(synapse_external_cache_response_time_seconds_bucket{index=~\"$index\",instance=\"$instance\",job=\"$job\"}[$bucket_size])) by (le)", "format": "heatmap", + "instant": false, "interval": "", "intervalFactor": 1, "legendFormat": "{{le}}", "refId": "A" } ], - "timeFrom": null, - "timeShift": null, - "title": "Number of state resolution performed, by number of state groups involved (heatmap)", + "title": "External Cache Response Time", "tooltip": { "show": true, "showHistogram": true }, + "tooltipDecimals": 2, "type": "heatmap", "xAxis": { "show": true @@ -7224,7 +10253,7 @@ "xBucketSize": null, "yAxis": { "decimals": 0, - "format": "short", + "format": "s", "logBase": 1, "max": null, "min": null, @@ -7234,131 +10263,14 @@ "yBucketBound": "auto", "yBucketNumber": null, "yBucketSize": null - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "For a given percentage P, the number X where P% of state resolution operations took place over X state groups or fewer.", - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 110 - }, - "hiddenSeries": false, - "id": 132, - "interval": "", - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "dataLinks": [] - }, - "percentage": false, - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.5, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "50%", - "refId": "A" - }, - { - "expr": "histogram_quantile(0.75, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "75%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "90%", - "refId": "C" - }, - { - "expr": "histogram_quantile(0.99, rate(synapse_state_number_state_groups_in_resolution_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "99%", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeRegions": [], - "timeShift": null, - "title": "Number of state resolutions performed, by number of state groups involved (quantiles)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "Number of state groups", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } } ], - "title": "Extremities", + "title": "External Cache", "type": "row" } ], - "refresh": "5m", - "schemaVersion": 22, + "refresh": false, + "schemaVersion": 26, "style": "dark", "tags": [ "matrix" @@ -7368,9 +10280,10 @@ { "current": { "selected": false, - "text": "Prometheus", - "value": "Prometheus" + "text": "default", + "value": "default" }, + "error": null, "hide": 0, "includeAll": false, "label": null, @@ -7378,6 +10291,7 @@ "name": "datasource", "options": [], "query": "prometheus", + "queryValue": "", "refresh": 1, "regex": "", "skipUrlSync": false, @@ -7387,13 +10301,14 @@ "allFormat": "glob", "auto": true, "auto_count": 100, - "auto_min": "30s", + "auto_min": "60s", "current": { "selected": false, "text": "auto", "value": "$__auto_interval_bucket_size" }, "datasource": null, + "error": null, "hide": 0, "includeAll": false, "label": "Bucket Size", @@ -7438,6 +10353,7 @@ } ], "query": "30s,1m,2m,5m,10m,15m", + "queryValue": "", "refresh": 2, "skipUrlSync": false, "type": "interval" @@ -7447,9 +10363,9 @@ "current": {}, "datasource": "$datasource", "definition": "", + "error": null, "hide": 0, "includeAll": false, - "index": -1, "label": null, "multi": false, "name": "instance", @@ -7458,7 +10374,7 @@ "refresh": 2, "regex": "", "skipUrlSync": false, - "sort": 0, + "sort": 1, "tagValuesQuery": "", "tags": [], "tagsQuery": "", @@ -7471,10 +10387,10 @@ "current": {}, "datasource": "$datasource", "definition": "", + "error": null, "hide": 0, "hideLabel": false, "includeAll": true, - "index": -1, "label": "Job", "multi": true, "multiFormat": "regex values", @@ -7498,10 +10414,10 @@ "current": {}, "datasource": "$datasource", "definition": "", + "error": null, "hide": 0, "hideLabel": false, "includeAll": true, - "index": -1, "label": "", "multi": true, "multiFormat": "regex values", @@ -7522,7 +10438,7 @@ ] }, "time": { - "from": "now-1h", + "from": "now-3h", "to": "now" }, "timepicker": { @@ -7554,8 +10470,5 @@ "timezone": "", "title": "Synapse", "uid": "000000012", - "variables": { - "list": [] - }, - "version": 32 + "version": 90 } \ No newline at end of file From 141b073c7bf649ac12b7e12b118770677a64f51f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20Junquera=20S=C3=A1nchez?= Date: Thu, 20 May 2021 15:24:19 +0200 Subject: [PATCH 158/222] Update user_directory.md (#10016) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Javier Junquera Sánchez --- changelog.d/10016.doc | 1 + docs/user_directory.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10016.doc diff --git a/changelog.d/10016.doc b/changelog.d/10016.doc new file mode 100644 index 000000000000..f9b615d7d7ab --- /dev/null +++ b/changelog.d/10016.doc @@ -0,0 +1 @@ +Fix broken link in user directory documentation. Contributed by @junquera. diff --git a/docs/user_directory.md b/docs/user_directory.md index 872fc2197968..d4f38d2cf11a 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -7,6 +7,6 @@ who are present in a publicly viewable room present on the server. The directory info is stored in various tables, which can (typically after DB corruption) get stale or out of sync. If this happens, for now the -solution to fix it is to execute the SQL [here](../synapse/storage/databases/main/schema/delta/53/user_dir_populate.sql) +solution to fix it is to execute the SQL [here](https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/main/delta/53/user_dir_populate.sql) and then restart synapse. This should then start a background task to flush the current tables and regenerate the directory. From 551d2c3f4b492d59b3c670c1a8e82869b16a594d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 20 May 2021 11:10:36 -0400 Subject: [PATCH 159/222] Allow a user who could join a restricted room to see it in spaces summary. (#9922) This finishes up the experimental implementation of MSC3083 by showing the restricted rooms in the spaces summary (from MSC2946). --- changelog.d/9922.feature | 1 + synapse/federation/transport/server.py | 2 +- synapse/handlers/event_auth.py | 104 ++++++++++--- synapse/handlers/space_summary.py | 201 +++++++++++++++++++++---- 4 files changed, 254 insertions(+), 54 deletions(-) create mode 100644 changelog.d/9922.feature diff --git a/changelog.d/9922.feature b/changelog.d/9922.feature new file mode 100644 index 000000000000..2c655350c04f --- /dev/null +++ b/changelog.d/9922.feature @@ -0,0 +1 @@ +Experimental support to allow a user who could join a restricted room to view it in the spaces summary. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index e1b746247469..c17a085a4f93 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1428,7 +1428,7 @@ async def on_POST( ) return 200, await self.handler.federation_space_summary( - room_id, suggested_only, max_rooms_per_space, exclude_rooms + origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms ) diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 5b2fe103e742..a0df16a32f68 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Collection, Optional from synapse.api.constants import EventTypes, JoinRules, Membership from synapse.api.errors import AuthError @@ -59,32 +59,76 @@ async def check_restricted_join_rules( ): return + # This is not a room with a restricted join rule, so we don't need to do the + # restricted room specific checks. + # + # Note: We'll be applying the standard join rule checks later, which will + # catch the cases of e.g. trying to join private rooms without an invite. + if not await self.has_restricted_join_rules(state_ids, room_version): + return + + # Get the spaces which allow access to this room and check if the user is + # in any of them. + allowed_spaces = await self.get_spaces_that_allow_join(state_ids) + if not await self.is_user_in_rooms(allowed_spaces, user_id): + raise AuthError( + 403, + "You do not belong to any of the required spaces to join this room.", + ) + + async def has_restricted_join_rules( + self, state_ids: StateMap[str], room_version: RoomVersion + ) -> bool: + """ + Return if the room has the proper join rules set for access via spaces. + + Args: + state_ids: The state of the room as it currently is. + room_version: The room version of the room to query. + + Returns: + True if the proper room version and join rules are set for restricted access. + """ # This only applies to room versions which support the new join rule. if not room_version.msc3083_join_rules: - return + return False # If there's no join rule, then it defaults to invite (so this doesn't apply). join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) if not join_rules_event_id: - return + return False + + # If the join rule is not restricted, this doesn't apply. + join_rules_event = await self._store.get_event(join_rules_event_id) + return join_rules_event.content.get("join_rule") == JoinRules.MSC3083_RESTRICTED + + async def get_spaces_that_allow_join( + self, state_ids: StateMap[str] + ) -> Collection[str]: + """ + Generate a list of spaces which allow access to a room. + + Args: + state_ids: The state of the room as it currently is. + + Returns: + A collection of spaces which provide membership to the room. + """ + # If there's no join rule, then it defaults to invite (so this doesn't apply). + join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + if not join_rules_event_id: + return () # If the join rule is not restricted, this doesn't apply. join_rules_event = await self._store.get_event(join_rules_event_id) - if join_rules_event.content.get("join_rule") != JoinRules.MSC3083_RESTRICTED: - return # If allowed is of the wrong form, then only allow invited users. allowed_spaces = join_rules_event.content.get("allow", []) if not isinstance(allowed_spaces, list): - allowed_spaces = () - - # Get the list of joined rooms and see if there's an overlap. - if allowed_spaces: - joined_rooms = await self._store.get_rooms_for_user(user_id) - else: - joined_rooms = () + return () # Pull out the other room IDs, invalid data gets filtered. + result = [] for space in allowed_spaces: if not isinstance(space, dict): continue @@ -93,13 +137,31 @@ async def check_restricted_join_rules( if not isinstance(space_id, str): continue - # The user was joined to one of the spaces specified, they can join - # this room! - if space_id in joined_rooms: - return + result.append(space_id) + + return result + + async def is_user_in_rooms(self, room_ids: Collection[str], user_id: str) -> bool: + """ + Check whether a user is a member of any of the provided rooms. + + Args: + room_ids: The rooms to check for membership. + user_id: The user to check. + + Returns: + True if the user is in any of the rooms, false otherwise. + """ + if not room_ids: + return False + + # Get the list of joined rooms and see if there's an overlap. + joined_rooms = await self._store.get_rooms_for_user(user_id) + + # Check each room and see if the user is in it. + for room_id in room_ids: + if room_id in joined_rooms: + return True - # The user was not in any of the required spaces. - raise AuthError( - 403, - "You do not belong to any of the required spaces to join this room.", - ) + # The user was not in any of the rooms. + return False diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index eb80a5ad67db..8d49ba81646e 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -16,11 +16,16 @@ import logging import re from collections import deque -from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple, cast +from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Set, Tuple import attr -from synapse.api.constants import EventContentFields, EventTypes, HistoryVisibility +from synapse.api.constants import ( + EventContentFields, + EventTypes, + HistoryVisibility, + Membership, +) from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.events.utils import format_event_for_client_v2 @@ -47,6 +52,7 @@ def __init__(self, hs: "HomeServer"): self._auth = hs.get_auth() self._room_list_handler = hs.get_room_list_handler() self._state_handler = hs.get_state_handler() + self._event_auth_handler = hs.get_event_auth_handler() self._store = hs.get_datastore() self._event_serializer = hs.get_event_client_serializer() self._server_name = hs.hostname @@ -111,28 +117,88 @@ async def get_space_summary( max_children = max_rooms_per_space if processed_rooms else None if is_in_room: - rooms, events = await self._summarize_local_room( - requester, room_id, suggested_only, max_children + room, events = await self._summarize_local_room( + requester, None, room_id, suggested_only, max_children ) + + logger.debug( + "Query of local room %s returned events %s", + room_id, + ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events], + ) + + if room: + rooms_result.append(room) else: - rooms, events = await self._summarize_remote_room( + fed_rooms, fed_events = await self._summarize_remote_room( queue_entry, suggested_only, max_children, exclude_rooms=processed_rooms, ) - logger.debug( - "Query of %s returned rooms %s, events %s", - queue_entry.room_id, - [room.get("room_id") for room in rooms], - ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in events], - ) - - rooms_result.extend(rooms) - - # any rooms returned don't need visiting again - processed_rooms.update(cast(str, room.get("room_id")) for room in rooms) + # The results over federation might include rooms that the we, + # as the requesting server, are allowed to see, but the requesting + # user is not permitted see. + # + # Filter the returned results to only what is accessible to the user. + room_ids = set() + events = [] + for room in fed_rooms: + fed_room_id = room.get("room_id") + if not fed_room_id or not isinstance(fed_room_id, str): + continue + + # The room should only be included in the summary if: + # a. the user is in the room; + # b. the room is world readable; or + # c. the user is in a space that has been granted access to + # the room. + # + # Note that we know the user is not in the root room (which is + # why the remote call was made in the first place), but the user + # could be in one of the children rooms and we just didn't know + # about the link. + include_room = room.get("world_readable") is True + + # Check if the user is a member of any of the allowed spaces + # from the response. + allowed_spaces = room.get("allowed_spaces") + if ( + not include_room + and allowed_spaces + and isinstance(allowed_spaces, list) + ): + include_room = await self._event_auth_handler.is_user_in_rooms( + allowed_spaces, requester + ) + + # Finally, if this isn't the requested room, check ourselves + # if we can access the room. + if not include_room and fed_room_id != queue_entry.room_id: + include_room = await self._is_room_accessible( + fed_room_id, requester, None + ) + + # The user can see the room, include it! + if include_room: + rooms_result.append(room) + room_ids.add(fed_room_id) + + # All rooms returned don't need visiting again (even if the user + # didn't have access to them). + processed_rooms.add(fed_room_id) + + for event in fed_events: + if event.get("room_id") in room_ids: + events.append(event) + + logger.debug( + "Query of %s returned rooms %s, events %s", + room_id, + [room.get("room_id") for room in fed_rooms], + ["%s->%s" % (ev["room_id"], ev["state_key"]) for ev in fed_events], + ) # the room we queried may or may not have been returned, but don't process # it again, anyway. @@ -158,10 +224,16 @@ async def get_space_summary( ) processed_events.add(ev_key) + # Before returning to the client, remove the allowed_spaces key for any + # rooms. + for room in rooms_result: + room.pop("allowed_spaces", None) + return {"rooms": rooms_result, "events": events_result} async def federation_space_summary( self, + origin: str, room_id: str, suggested_only: bool, max_rooms_per_space: Optional[int], @@ -171,6 +243,8 @@ async def federation_space_summary( Implementation of the space summary Federation API Args: + origin: The server requesting the spaces summary. + room_id: room id to start the summary at suggested_only: whether we should only return children with the "suggested" @@ -205,14 +279,15 @@ async def federation_space_summary( logger.debug("Processing room %s", room_id) - rooms, events = await self._summarize_local_room( - None, room_id, suggested_only, max_rooms_per_space + room, events = await self._summarize_local_room( + None, origin, room_id, suggested_only, max_rooms_per_space ) processed_rooms.add(room_id) - rooms_result.extend(rooms) - events_result.extend(events) + if room: + rooms_result.append(room) + events_result.extend(events) # add any children to the queue room_queue.extend(edge_event["state_key"] for edge_event in events) @@ -222,10 +297,11 @@ async def federation_space_summary( async def _summarize_local_room( self, requester: Optional[str], + origin: Optional[str], room_id: str, suggested_only: bool, max_children: Optional[int], - ) -> Tuple[Sequence[JsonDict], Sequence[JsonDict]]: + ) -> Tuple[Optional[JsonDict], Sequence[JsonDict]]: """ Generate a room entry and a list of event entries for a given room. @@ -233,6 +309,9 @@ async def _summarize_local_room( requester: The user requesting the summary, if it is a local request. None if this is a federation request. + origin: + The server requesting the summary, if it is a federation request. + None if this is a local request. room_id: The room ID to summarize. suggested_only: True if only suggested children should be returned. Otherwise, all children are returned. @@ -247,8 +326,8 @@ async def _summarize_local_room( An iterable of the sorted children events. This may be limited to a maximum size or may include all children. """ - if not await self._is_room_accessible(room_id, requester): - return (), () + if not await self._is_room_accessible(room_id, requester, origin): + return None, () room_entry = await self._build_room_entry(room_id) @@ -272,7 +351,7 @@ async def _summarize_local_room( event_format=format_event_for_client_v2, ) ) - return (room_entry,), events_result + return room_entry, events_result async def _summarize_remote_room( self, @@ -332,13 +411,17 @@ async def _summarize_remote_room( or ev.event_type == EventTypes.SpaceChild ) - async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> bool: + async def _is_room_accessible( + self, room_id: str, requester: Optional[str], origin: Optional[str] + ) -> bool: """ Calculate whether the room should be shown in the spaces summary. It should be included if: * The requester is joined or invited to the room. + * The requester can join without an invite (per MSC3083). + * The origin server has any user that is joined or invited to the room. * The history visibility is set to world readable. Args: @@ -346,31 +429,75 @@ async def _is_room_accessible(self, room_id: str, requester: Optional[str]) -> b requester: The user requesting the summary, if it is a local request. None if this is a federation request. + origin: + The server requesting the summary, if it is a federation request. + None if this is a local request. Returns: True if the room should be included in the spaces summary. """ + state_ids = await self._store.get_current_state_ids(room_id) + + # If there's no state for the room, it isn't known. + if not state_ids: + logger.info("room %s is unknown, omitting from summary", room_id) + return False + + room_version = await self._store.get_room_version(room_id) # if we have an authenticated requesting user, first check if they are able to view # stripped state in the room. if requester: + member_event_id = state_ids.get((EventTypes.Member, requester), None) + + # If they're in the room they can see info on it. + member_event = None + if member_event_id: + member_event = await self._store.get_event(member_event_id) + if member_event.membership in (Membership.JOIN, Membership.INVITE): + return True + + # Otherwise, check if they should be allowed access via membership in a space. try: - await self._auth.check_user_in_room(room_id, requester) - return True + await self._event_auth_handler.check_restricted_join_rules( + state_ids, room_version, requester, member_event + ) except AuthError: + # The user doesn't have access due to spaces, but might have access + # another way. Keep trying. pass + else: + return True + + # If this is a request over federation, check if the host is in the room or + # is in one of the spaces specified via the join rules. + elif origin: + if await self._auth.check_host_in_room(room_id, origin): + return True + + # Alternately, if the host has a user in any of the spaces specified + # for access, then the host can see this room (and should do filtering + # if the requester cannot see it). + if await self._event_auth_handler.has_restricted_join_rules( + state_ids, room_version + ): + allowed_spaces = ( + await self._event_auth_handler.get_spaces_that_allow_join(state_ids) + ) + for space_id in allowed_spaces: + if await self._auth.check_host_in_room(space_id, origin): + return True # otherwise, check if the room is peekable - hist_vis_ev = await self._state_handler.get_current_state( - room_id, EventTypes.RoomHistoryVisibility, "" - ) - if hist_vis_ev: + hist_vis_event_id = state_ids.get((EventTypes.RoomHistoryVisibility, ""), None) + if hist_vis_event_id: + hist_vis_ev = await self._store.get_event(hist_vis_event_id) hist_vis = hist_vis_ev.content.get("history_visibility") if hist_vis == HistoryVisibility.WORLD_READABLE: return True logger.info( - "room %s is unpeekable and user %s is not a member, omitting from summary", + "room %s is unpeekable and user %s is not a member / not allowed to join, omitting from summary", room_id, requester, ) @@ -395,6 +522,15 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: if not room_type: room_type = create_event.content.get(EventContentFields.MSC1772_ROOM_TYPE) + room_version = await self._store.get_room_version(room_id) + allowed_spaces = None + if await self._event_auth_handler.has_restricted_join_rules( + current_state_ids, room_version + ): + allowed_spaces = await self._event_auth_handler.get_spaces_that_allow_join( + current_state_ids + ) + entry = { "room_id": stats["room_id"], "name": stats["name"], @@ -408,6 +544,7 @@ async def _build_room_entry(self, room_id: str) -> JsonDict: "guest_can_join": stats["guest_access"] == "can_join", "creation_ts": create_event.origin_server_ts, "room_type": room_type, + "allowed_spaces": allowed_spaces, } # Filter out Nones – rather omit the field altogether From 64887f06fcac63e069364d625d984b4951bf1ffc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 May 2021 16:11:48 +0100 Subject: [PATCH 160/222] Use ijson to parse the response to `/send_join`, reducing memory usage. (#9958) Instead of parsing the full response to `/send_join` into Python objects (which can be huge for large rooms) and *then* parsing that into events, we instead use ijson to stream parse the response directly into `EventBase` objects. --- changelog.d/9958.feature | 1 + mypy.ini | 3 + synapse/federation/federation_client.py | 28 ++--- synapse/federation/transport/client.py | 85 ++++++++++++- synapse/http/client.py | 7 +- synapse/http/matrixfederationclient.py | 160 ++++++++++++++++++------ synapse/python_dependencies.py | 1 + 7 files changed, 227 insertions(+), 58 deletions(-) create mode 100644 changelog.d/9958.feature diff --git a/changelog.d/9958.feature b/changelog.d/9958.feature new file mode 100644 index 000000000000..d86ba36519f4 --- /dev/null +++ b/changelog.d/9958.feature @@ -0,0 +1 @@ +Reduce memory usage when joining very large rooms over federation. diff --git a/mypy.ini b/mypy.ini index ea655a0d4d92..1d1d1ea0f25f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -174,3 +174,6 @@ ignore_missing_imports = True [mypy-pympler.*] ignore_missing_imports = True + +[mypy-ijson.*] +ignore_missing_imports = True diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index a5b6a611952b..e0e9f5d0beeb 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -55,6 +55,7 @@ ) from synapse.events import EventBase, builder from synapse.federation.federation_base import FederationBase, event_from_pdu_json +from synapse.federation.transport.client import SendJoinResponse from synapse.logging.context import make_deferred_yieldable, preserve_fn from synapse.logging.utils import log_function from synapse.types import JsonDict, get_domain_from_id @@ -665,19 +666,10 @@ async def send_join( """ async def send_request(destination) -> Dict[str, Any]: - content = await self._do_send_join(destination, pdu) + response = await self._do_send_join(room_version, destination, pdu) - logger.debug("Got content: %s", content) - - state = [ - event_from_pdu_json(p, room_version, outlier=True) - for p in content.get("state", []) - ] - - auth_chain = [ - event_from_pdu_json(p, room_version, outlier=True) - for p in content.get("auth_chain", []) - ] + state = response.state + auth_chain = response.auth_events pdus = {p.event_id: p for p in itertools.chain(state, auth_chain)} @@ -752,11 +744,14 @@ async def send_request(destination) -> Dict[str, Any]: return await self._try_destination_list("send_join", destinations, send_request) - async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict: + async def _do_send_join( + self, room_version: RoomVersion, destination: str, pdu: EventBase + ) -> SendJoinResponse: time_now = self._clock.time_msec() try: return await self.transport_layer.send_join_v2( + room_version=room_version, destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, @@ -771,17 +766,14 @@ async def _do_send_join(self, destination: str, pdu: EventBase) -> JsonDict: logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") - resp = await self.transport_layer.send_join_v1( + return await self.transport_layer.send_join_v1( + room_version=room_version, destination=destination, room_id=pdu.room_id, event_id=pdu.event_id, content=pdu.get_pdu_json(time_now), ) - # We expect the v1 API to respond with [200, content], so we only return the - # content. - return resp[1] - async def send_invite( self, destination: str, diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 497848a2b75b..e93ab83f7ff4 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -17,13 +17,19 @@ import urllib from typing import Any, Dict, List, Optional +import attr +import ijson + from synapse.api.constants import Membership from synapse.api.errors import Codes, HttpResponseException, SynapseError +from synapse.api.room_versions import RoomVersion from synapse.api.urls import ( FEDERATION_UNSTABLE_PREFIX, FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX, ) +from synapse.events import EventBase, make_event_from_dict +from synapse.http.matrixfederationclient import ByteParser from synapse.logging.utils import log_function from synapse.types import JsonDict @@ -240,21 +246,36 @@ async def make_membership_event( return content @log_function - async def send_join_v1(self, destination, room_id, event_id, content): + async def send_join_v1( + self, + room_version, + destination, + room_id, + event_id, + content, + ) -> "SendJoinResponse": path = _create_v1_path("/send_join/%s/%s", room_id, event_id) response = await self.client.put_json( - destination=destination, path=path, data=content + destination=destination, + path=path, + data=content, + parser=SendJoinParser(room_version, v1_api=True), ) return response @log_function - async def send_join_v2(self, destination, room_id, event_id, content): + async def send_join_v2( + self, room_version, destination, room_id, event_id, content + ) -> "SendJoinResponse": path = _create_v2_path("/send_join/%s/%s", room_id, event_id) response = await self.client.put_json( - destination=destination, path=path, data=content + destination=destination, + path=path, + data=content, + parser=SendJoinParser(room_version, v1_api=False), ) return response @@ -1053,3 +1074,59 @@ def _create_v2_path(path, *args): str """ return _create_path(FEDERATION_V2_PREFIX, path, *args) + + +@attr.s(slots=True, auto_attribs=True) +class SendJoinResponse: + """The parsed response of a `/send_join` request.""" + + auth_events: List[EventBase] + state: List[EventBase] + + +@ijson.coroutine +def _event_list_parser(room_version: RoomVersion, events: List[EventBase]): + """Helper function for use with `ijson.items_coro` to parse an array of + events and add them to the given list. + """ + + while True: + obj = yield + event = make_event_from_dict(obj, room_version) + events.append(event) + + +class SendJoinParser(ByteParser[SendJoinResponse]): + """A parser for the response to `/send_join` requests. + + Args: + room_version: The version of the room. + v1_api: Whether the response is in the v1 format. + """ + + CONTENT_TYPE = "application/json" + + def __init__(self, room_version: RoomVersion, v1_api: bool): + self._response = SendJoinResponse([], []) + + # The V1 API has the shape of `[200, {...}]`, which we handle by + # prefixing with `item.*`. + prefix = "item." if v1_api else "" + + self._coro_state = ijson.items_coro( + _event_list_parser(room_version, self._response.state), + prefix + "state.item", + ) + self._coro_auth = ijson.items_coro( + _event_list_parser(room_version, self._response.auth_events), + prefix + "auth_chain.item", + ) + + def write(self, data: bytes) -> int: + self._coro_state.send(data) + self._coro_auth.send(data) + + return len(data) + + def finish(self) -> SendJoinResponse: + return self._response diff --git a/synapse/http/client.py b/synapse/http/client.py index 5f40f16e24d6..1ca6624fd5da 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -813,7 +813,12 @@ def dataReceived(self, data: bytes) -> None: if self.deferred.called: return - self.stream.write(data) + try: + self.stream.write(data) + except Exception: + self.deferred.errback() + return + self.length += len(data) # The first time the maximum size is exceeded, error and cancel the # connection. dataReceived might be called again if data was received diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index bb837b7b1979..f5503b394b37 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import abc import cgi import codecs import logging @@ -19,13 +20,24 @@ import typing import urllib.parse from io import BytesIO, StringIO -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import ( + Callable, + Dict, + Generic, + List, + Optional, + Tuple, + TypeVar, + Union, + overload, +) import attr import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json +from typing_extensions import Literal from twisted.internet import defer from twisted.internet.error import DNSLookupError @@ -48,6 +60,7 @@ BlacklistingAgentWrapper, BlacklistingReactorWrapper, BodyExceededMaxSize, + ByteWriteable, encode_query_args, read_body_with_max_size, ) @@ -88,6 +101,27 @@ QueryArgs = Dict[str, Union[str, List[str]]] +T = TypeVar("T") + + +class ByteParser(ByteWriteable, Generic[T], abc.ABC): + """A `ByteWriteable` that has an additional `finish` function that returns + the parsed data. + """ + + CONTENT_TYPE = abc.abstractproperty() # type: str # type: ignore + """The expected content type of the response, e.g. `application/json`. If + the content type doesn't match we fail the request. + """ + + @abc.abstractmethod + def finish(self) -> T: + """Called when response has finished streaming and the parser should + return the final result (or error). + """ + pass + + @attr.s(slots=True, frozen=True) class MatrixFederationRequest: method = attr.ib(type=str) @@ -148,15 +182,32 @@ def get_json(self) -> Optional[JsonDict]: return self.json -async def _handle_json_response( +class JsonParser(ByteParser[Union[JsonDict, list]]): + """A parser that buffers the response and tries to parse it as JSON.""" + + CONTENT_TYPE = "application/json" + + def __init__(self): + self._buffer = StringIO() + self._binary_wrapper = BinaryIOWrapper(self._buffer) + + def write(self, data: bytes) -> int: + return self._binary_wrapper.write(data) + + def finish(self) -> Union[JsonDict, list]: + return json_decoder.decode(self._buffer.getvalue()) + + +async def _handle_response( reactor: IReactorTime, timeout_sec: float, request: MatrixFederationRequest, response: IResponse, start_ms: int, -) -> JsonDict: + parser: ByteParser[T], +) -> T: """ - Reads the JSON body of a response, with a timeout + Reads the body of a response with a timeout and sends it to a parser Args: reactor: twisted reactor, for the timeout @@ -164,23 +215,21 @@ async def _handle_json_response( request: the request that triggered the response response: response to the request start_ms: Timestamp when request was made + parser: The parser for the response Returns: - The parsed JSON response + The parsed response """ + try: - check_content_type_is_json(response.headers) + check_content_type_is(response.headers, parser.CONTENT_TYPE) - buf = StringIO() - d = read_body_with_max_size(response, BinaryIOWrapper(buf), MAX_RESPONSE_SIZE) + d = read_body_with_max_size(response, parser, MAX_RESPONSE_SIZE) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) - def parse(_len: int): - return json_decoder.decode(buf.getvalue()) - - d.addCallback(parse) + length = await make_deferred_yieldable(d) - body = await make_deferred_yieldable(d) + value = parser.finish() except BodyExceededMaxSize as e: # The response was too big. logger.warning( @@ -193,9 +242,9 @@ def parse(_len: int): ) raise RequestSendFailed(e, can_retry=False) from e except ValueError as e: - # The JSON content was invalid. + # The content was invalid. logger.warning( - "{%s} [%s] Failed to parse JSON response - %s %s", + "{%s} [%s] Failed to parse response - %s %s", request.txn_id, request.destination, request.method, @@ -225,16 +274,17 @@ def parse(_len: int): time_taken_secs = reactor.seconds() - start_ms / 1000 logger.info( - "{%s} [%s] Completed request: %d %s in %.2f secs - %s %s", + "{%s} [%s] Completed request: %d %s in %.2f secs, got %d bytes - %s %s", request.txn_id, request.destination, response.code, response.phrase.decode("ascii", errors="replace"), time_taken_secs, + length, request.method, request.uri.decode("ascii"), ) - return body + return value class BinaryIOWrapper: @@ -671,6 +721,7 @@ def build_auth_headers( ) return auth_headers + @overload async def put_json( self, destination: str, @@ -683,7 +734,41 @@ async def put_json( ignore_backoff: bool = False, backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, + parser: Literal[None] = None, ) -> Union[JsonDict, list]: + ... + + @overload + async def put_json( + self, + destination: str, + path: str, + args: Optional[QueryArgs] = None, + data: Optional[JsonDict] = None, + json_data_callback: Optional[Callable[[], JsonDict]] = None, + long_retries: bool = False, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + backoff_on_404: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser[T]] = None, + ) -> T: + ... + + async def put_json( + self, + destination: str, + path: str, + args: Optional[QueryArgs] = None, + data: Optional[JsonDict] = None, + json_data_callback: Optional[Callable[[], JsonDict]] = None, + long_retries: bool = False, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + backoff_on_404: bool = False, + try_trailing_slash_on_400: bool = False, + parser: Optional[ByteParser] = None, + ): """Sends the specified json data using PUT Args: @@ -716,6 +801,8 @@ async def put_json( of the request. Workaround for #3622 in Synapse <= v0.99.3. This will be attempted before backing off if backing off has been enabled. + parser: The parser to use to decode the response. Defaults to + parsing as JSON. Returns: Succeeds when we get a 2xx HTTP response. The @@ -756,8 +843,16 @@ async def put_json( else: _sec_timeout = self.default_timeout - body = await _handle_json_response( - self.reactor, _sec_timeout, request, response, start_ms + if parser is None: + parser = JsonParser() + + body = await _handle_response( + self.reactor, + _sec_timeout, + request, + response, + start_ms, + parser=parser, ) return body @@ -830,12 +925,8 @@ async def post_json( else: _sec_timeout = self.default_timeout - body = await _handle_json_response( - self.reactor, - _sec_timeout, - request, - response, - start_ms, + body = await _handle_response( + self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() ) return body @@ -907,8 +998,8 @@ async def get_json( else: _sec_timeout = self.default_timeout - body = await _handle_json_response( - self.reactor, _sec_timeout, request, response, start_ms + body = await _handle_response( + self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() ) return body @@ -975,8 +1066,8 @@ async def delete_json( else: _sec_timeout = self.default_timeout - body = await _handle_json_response( - self.reactor, _sec_timeout, request, response, start_ms + body = await _handle_response( + self.reactor, _sec_timeout, request, response, start_ms, parser=JsonParser() ) return body @@ -1068,16 +1159,16 @@ def _flatten_response_never_received(e): return repr(e) -def check_content_type_is_json(headers: Headers) -> None: +def check_content_type_is(headers: Headers, expected_content_type: str) -> None: """ Check that a set of HTTP headers have a Content-Type header, and that it - is application/json. + is the expected value.. Args: headers: headers to check Raises: - RequestSendFailed: if the Content-Type header is missing or isn't JSON + RequestSendFailed: if the Content-Type header is missing or doesn't match """ content_type_headers = headers.getRawHeaders(b"Content-Type") @@ -1089,11 +1180,10 @@ def check_content_type_is_json(headers: Headers) -> None: c_type = content_type_headers[0].decode("ascii") # only the first header val, options = cgi.parse_header(c_type) - if val != "application/json": + if val != expected_content_type: raise RequestSendFailed( RuntimeError( - "Remote server sent Content-Type header of '%s', not 'application/json'" - % c_type, + f"Remote server sent Content-Type header of '{c_type}', not '{expected_content_type}'", ), can_retry=False, ) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 989523c82374..546231bec0c9 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -87,6 +87,7 @@ # We enforce that we have a `cryptography` version that bundles an `openssl` # with the latest security patches. "cryptography>=3.4.7", + "ijson>=3.0", ] CONDITIONAL_REQUIREMENTS = { From 1c6a19002cb56cf93bc920854c81ae88bd7308ac Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 20 May 2021 16:25:11 +0100 Subject: [PATCH 161/222] Add `Keyring.verify_events_for_server` and reduce memory usage (#10018) Also add support for giving a callback to generate the JSON object to verify. This should reduce memory usage, as we no longer have the event in memory in dict form (which has a large memory footprint) for extend periods of time. --- changelog.d/10018.misc | 1 + synapse/crypto/keyring.py | 98 ++++++++++++++++++++++++--- synapse/federation/federation_base.py | 17 ++--- 3 files changed, 94 insertions(+), 22 deletions(-) create mode 100644 changelog.d/10018.misc diff --git a/changelog.d/10018.misc b/changelog.d/10018.misc new file mode 100644 index 000000000000..eaf9f64867a6 --- /dev/null +++ b/changelog.d/10018.misc @@ -0,0 +1 @@ +Reduce memory usage when verifying signatures on large numbers of events at once. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5f18ef77489d..6fc07129788f 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -17,7 +17,7 @@ import logging import urllib from collections import defaultdict -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Set, Tuple import attr from signedjson.key import ( @@ -42,6 +42,8 @@ SynapseError, ) from synapse.config.key import TrustedKeyServer +from synapse.events import EventBase +from synapse.events.utils import prune_event_dict from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, @@ -69,7 +71,11 @@ class VerifyJsonRequest: Attributes: server_name: The name of the server to verify against. - json_object: The JSON object to verify. + get_json_object: A callback to fetch the JSON object to verify. + A callback is used to allow deferring the creation of the JSON + object to verify until needed, e.g. for events we can defer + creating the redacted copy. This reduces the memory usage when + there are large numbers of in flight requests. minimum_valid_until_ts: time at which we require the signing key to be valid. (0 implies we don't care) @@ -88,14 +94,50 @@ class VerifyJsonRequest: """ server_name = attr.ib(type=str) - json_object = attr.ib(type=JsonDict) + get_json_object = attr.ib(type=Callable[[], JsonDict]) minimum_valid_until_ts = attr.ib(type=int) request_name = attr.ib(type=str) - key_ids = attr.ib(init=False, type=List[str]) + key_ids = attr.ib(type=List[str]) key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred) - def __attrs_post_init__(self): - self.key_ids = signature_ids(self.json_object, self.server_name) + @staticmethod + def from_json_object( + server_name: str, + json_object: JsonDict, + minimum_valid_until_ms: int, + request_name: str, + ): + """Create a VerifyJsonRequest to verify all signatures on a signed JSON + object for the given server. + """ + key_ids = signature_ids(json_object, server_name) + return VerifyJsonRequest( + server_name, + lambda: json_object, + minimum_valid_until_ms, + request_name=request_name, + key_ids=key_ids, + ) + + @staticmethod + def from_event( + server_name: str, + event: EventBase, + minimum_valid_until_ms: int, + ): + """Create a VerifyJsonRequest to verify all signatures on an event + object for the given server. + """ + key_ids = list(event.signatures.get(server_name, [])) + return VerifyJsonRequest( + server_name, + # We defer creating the redacted json object, as it uses a lot more + # memory than the Event object itself. + lambda: prune_event_dict(event.room_version, event.get_pdu_json()), + minimum_valid_until_ms, + request_name=event.event_id, + key_ids=key_ids, + ) class KeyLookupError(ValueError): @@ -147,8 +189,13 @@ def verify_json_for_server( Deferred[None]: completes if the the object was correctly signed, otherwise errbacks with an error """ - req = VerifyJsonRequest(server_name, json_object, validity_time, request_name) - requests = (req,) + request = VerifyJsonRequest.from_json_object( + server_name, + json_object, + validity_time, + request_name, + ) + requests = (request,) return make_deferred_yieldable(self._verify_objects(requests)[0]) def verify_json_objects_for_server( @@ -175,10 +222,41 @@ def verify_json_objects_for_server( logcontext. """ return self._verify_objects( - VerifyJsonRequest(server_name, json_object, validity_time, request_name) + VerifyJsonRequest.from_json_object( + server_name, json_object, validity_time, request_name + ) for server_name, json_object, validity_time, request_name in server_and_json ) + def verify_events_for_server( + self, server_and_events: Iterable[Tuple[str, EventBase, int]] + ) -> List[defer.Deferred]: + """Bulk verification of signatures on events. + + Args: + server_and_events: + Iterable of `(server_name, event, validity_time)` tuples. + + `server_name` is which server we are verifying the signature for + on the event. + + `event` is the event that we'll verify the signatures of for + the given `server_name`. + + `validity_time` is a timestamp at which the signing key must be + valid. + + Returns: + List: for each input triplet, a deferred indicating success + or failure to verify each event's signature for the given + server_name. The deferreds run their callbacks in the sentinel + logcontext. + """ + return self._verify_objects( + VerifyJsonRequest.from_event(server_name, event, validity_time) + for server_name, event, validity_time in server_and_events + ) + def _verify_objects( self, verify_requests: Iterable[VerifyJsonRequest] ) -> List[defer.Deferred]: @@ -892,7 +970,7 @@ async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None: with PreserveLoggingContext(): _, key_id, verify_key = await verify_request.key_ready - json_object = verify_request.json_object + json_object = verify_request.get_json_object() try: verify_signed_json(json_object, server_name, verify_key) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 949dcd46141f..3fe496dcd330 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -137,11 +137,7 @@ def errback(failure: Failure, pdu: EventBase): return deferreds -class PduToCheckSig( - namedtuple( - "PduToCheckSig", ["pdu", "redacted_pdu_json", "sender_domain", "deferreds"] - ) -): +class PduToCheckSig(namedtuple("PduToCheckSig", ["pdu", "sender_domain", "deferreds"])): pass @@ -184,7 +180,6 @@ def _check_sigs_on_pdus( pdus_to_check = [ PduToCheckSig( pdu=p, - redacted_pdu_json=prune_event(p).get_pdu_json(), sender_domain=get_domain_from_id(p.sender), deferreds=[], ) @@ -195,13 +190,12 @@ def _check_sigs_on_pdus( # (except if its a 3pid invite, in which case it may be sent by any server) pdus_to_check_sender = [p for p in pdus_to_check if not _is_invite_via_3pid(p.pdu)] - more_deferreds = keyring.verify_json_objects_for_server( + more_deferreds = keyring.verify_events_for_server( [ ( p.sender_domain, - p.redacted_pdu_json, + p.pdu, p.pdu.origin_server_ts if room_version.enforce_key_validity else 0, - p.pdu.event_id, ) for p in pdus_to_check_sender ] @@ -230,13 +224,12 @@ def sender_err(e, pdu_to_check): if p.sender_domain != get_domain_from_id(p.pdu.event_id) ] - more_deferreds = keyring.verify_json_objects_for_server( + more_deferreds = keyring.verify_events_for_server( [ ( get_domain_from_id(p.pdu.event_id), - p.redacted_pdu_json, + p.pdu, p.pdu.origin_server_ts if room_version.enforce_key_validity else 0, - p.pdu.event_id, ) for p in pdus_to_check_event_id ] From 7958eadcd16087e6aaf7cce240c1f82856e0bcc7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 May 2021 11:20:51 +0100 Subject: [PATCH 162/222] Add a batching queue implementation. (#10017) --- changelog.d/10017.misc | 1 + synapse/util/batching_queue.py | 153 +++++++++++++++++++++++++++ tests/util/test_batching_queue.py | 169 ++++++++++++++++++++++++++++++ 3 files changed, 323 insertions(+) create mode 100644 changelog.d/10017.misc create mode 100644 synapse/util/batching_queue.py create mode 100644 tests/util/test_batching_queue.py diff --git a/changelog.d/10017.misc b/changelog.d/10017.misc new file mode 100644 index 000000000000..4777b7fb57b1 --- /dev/null +++ b/changelog.d/10017.misc @@ -0,0 +1 @@ +Add a batching queue implementation. diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py new file mode 100644 index 000000000000..44bbb7b1a8ef --- /dev/null +++ b/synapse/util/batching_queue.py @@ -0,0 +1,153 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import ( + Awaitable, + Callable, + Dict, + Generic, + Hashable, + List, + Set, + Tuple, + TypeVar, +) + +from twisted.internet import defer + +from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable +from synapse.metrics import LaterGauge +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.util import Clock + +logger = logging.getLogger(__name__) + + +V = TypeVar("V") +R = TypeVar("R") + + +class BatchingQueue(Generic[V, R]): + """A queue that batches up work, calling the provided processing function + with all pending work (for a given key). + + The provided processing function will only be called once at a time for each + key. It will be called the next reactor tick after `add_to_queue` has been + called, and will keep being called until the queue has been drained (for the + given key). + + Note that the return value of `add_to_queue` will be the return value of the + processing function that processed the given item. This means that the + returned value will likely include data for other items that were in the + batch. + """ + + def __init__( + self, + name: str, + clock: Clock, + process_batch_callback: Callable[[List[V]], Awaitable[R]], + ): + self._name = name + self._clock = clock + + # The set of keys currently being processed. + self._processing_keys = set() # type: Set[Hashable] + + # The currently pending batch of values by key, with a Deferred to call + # with the result of the corresponding `_process_batch_callback` call. + self._next_values = {} # type: Dict[Hashable, List[Tuple[V, defer.Deferred]]] + + # The function to call with batches of values. + self._process_batch_callback = process_batch_callback + + LaterGauge( + "synapse_util_batching_queue_number_queued", + "The number of items waiting in the queue across all keys", + labels=("name",), + caller=lambda: sum(len(v) for v in self._next_values.values()), + ) + + LaterGauge( + "synapse_util_batching_queue_number_of_keys", + "The number of distinct keys that have items queued", + labels=("name",), + caller=lambda: len(self._next_values), + ) + + async def add_to_queue(self, value: V, key: Hashable = ()) -> R: + """Adds the value to the queue with the given key, returning the result + of the processing function for the batch that included the given value. + + The optional `key` argument allows sharding the queue by some key. The + queues will then be processed in parallel, i.e. the process batch + function will be called in parallel with batched values from a single + key. + """ + + # First we create a defer and add it and the value to the list of + # pending items. + d = defer.Deferred() + self._next_values.setdefault(key, []).append((value, d)) + + # If we're not currently processing the key fire off a background + # process to start processing. + if key not in self._processing_keys: + run_as_background_process(self._name, self._process_queue, key) + + return await make_deferred_yieldable(d) + + async def _process_queue(self, key: Hashable) -> None: + """A background task to repeatedly pull things off the queue for the + given key and call the `self._process_batch_callback` with the values. + """ + + try: + if key in self._processing_keys: + return + + self._processing_keys.add(key) + + while True: + # We purposefully wait a reactor tick to allow us to batch + # together requests that we're about to receive. A common + # pattern is to call `add_to_queue` multiple times at once, and + # deferring to the next reactor tick allows us to batch all of + # those up. + await self._clock.sleep(0) + + next_values = self._next_values.pop(key, []) + if not next_values: + # We've exhausted the queue. + break + + try: + values = [value for value, _ in next_values] + results = await self._process_batch_callback(values) + + for _, deferred in next_values: + with PreserveLoggingContext(): + deferred.callback(results) + + except Exception as e: + for _, deferred in next_values: + if deferred.called: + continue + + with PreserveLoggingContext(): + deferred.errback(e) + + finally: + self._processing_keys.discard(key) diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py new file mode 100644 index 000000000000..5def1e56c9bb --- /dev/null +++ b/tests/util/test_batching_queue.py @@ -0,0 +1,169 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.internet import defer + +from synapse.logging.context import make_deferred_yieldable +from synapse.util.batching_queue import BatchingQueue + +from tests.server import get_clock +from tests.unittest import TestCase + + +class BatchingQueueTestCase(TestCase): + def setUp(self): + self.clock, hs_clock = get_clock() + + self._pending_calls = [] + self.queue = BatchingQueue("test_queue", hs_clock, self._process_queue) + + async def _process_queue(self, values): + d = defer.Deferred() + self._pending_calls.append((values, d)) + return await make_deferred_yieldable(d) + + def test_simple(self): + """Tests the basic case of calling `add_to_queue` once and having + `_process_queue` return. + """ + + self.assertFalse(self._pending_calls) + + queue_d = defer.ensureDeferred(self.queue.add_to_queue("foo")) + + # The queue should wait a reactor tick before calling the processing + # function. + self.assertFalse(self._pending_calls) + self.assertFalse(queue_d.called) + + # We should see a call to `_process_queue` after a reactor tick. + self.clock.pump([0]) + + self.assertEqual(len(self._pending_calls), 1) + self.assertEqual(self._pending_calls[0][0], ["foo"]) + self.assertFalse(queue_d.called) + + # Return value of the `_process_queue` should be propagated back. + self._pending_calls.pop()[1].callback("bar") + + self.assertEqual(self.successResultOf(queue_d), "bar") + + def test_batching(self): + """Test that multiple calls at the same time get batched up into one + call to `_process_queue`. + """ + + self.assertFalse(self._pending_calls) + + queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1")) + queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2")) + + self.clock.pump([0]) + + # We should see only *one* call to `_process_queue` + self.assertEqual(len(self._pending_calls), 1) + self.assertEqual(self._pending_calls[0][0], ["foo1", "foo2"]) + self.assertFalse(queue_d1.called) + self.assertFalse(queue_d2.called) + + # Return value of the `_process_queue` should be propagated back to both. + self._pending_calls.pop()[1].callback("bar") + + self.assertEqual(self.successResultOf(queue_d1), "bar") + self.assertEqual(self.successResultOf(queue_d2), "bar") + + def test_queuing(self): + """Test that we queue up requests while a `_process_queue` is being + called. + """ + + self.assertFalse(self._pending_calls) + + queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1")) + self.clock.pump([0]) + + queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2")) + + # We should see only *one* call to `_process_queue` + self.assertEqual(len(self._pending_calls), 1) + self.assertEqual(self._pending_calls[0][0], ["foo1"]) + self.assertFalse(queue_d1.called) + self.assertFalse(queue_d2.called) + + # Return value of the `_process_queue` should be propagated back to the + # first. + self._pending_calls.pop()[1].callback("bar1") + + self.assertEqual(self.successResultOf(queue_d1), "bar1") + self.assertFalse(queue_d2.called) + + # We should now see a second call to `_process_queue` + self.clock.pump([0]) + self.assertEqual(len(self._pending_calls), 1) + self.assertEqual(self._pending_calls[0][0], ["foo2"]) + self.assertFalse(queue_d2.called) + + # Return value of the `_process_queue` should be propagated back to the + # second. + self._pending_calls.pop()[1].callback("bar2") + + self.assertEqual(self.successResultOf(queue_d2), "bar2") + + def test_different_keys(self): + """Test that calls to different keys get processed in parallel.""" + + self.assertFalse(self._pending_calls) + + queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1", key=1)) + self.clock.pump([0]) + queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2", key=2)) + self.clock.pump([0]) + + # We queue up another item with key=2 to check that we will keep taking + # things off the queue. + queue_d3 = defer.ensureDeferred(self.queue.add_to_queue("foo3", key=2)) + + # We should see two calls to `_process_queue` + self.assertEqual(len(self._pending_calls), 2) + self.assertEqual(self._pending_calls[0][0], ["foo1"]) + self.assertEqual(self._pending_calls[1][0], ["foo2"]) + self.assertFalse(queue_d1.called) + self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + + # Return value of the `_process_queue` should be propagated back to the + # first. + self._pending_calls.pop(0)[1].callback("bar1") + + self.assertEqual(self.successResultOf(queue_d1), "bar1") + self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + + # Return value of the `_process_queue` should be propagated back to the + # second. + self._pending_calls.pop()[1].callback("bar2") + + self.assertEqual(self.successResultOf(queue_d2), "bar2") + self.assertFalse(queue_d3.called) + + # We should now see a call `_pending_calls` for `foo3` + self.clock.pump([0]) + self.assertEqual(len(self._pending_calls), 1) + self.assertEqual(self._pending_calls[0][0], ["foo3"]) + self.assertFalse(queue_d3.called) + + # Return value of the `_process_queue` should be propagated back to the + # third deferred. + self._pending_calls.pop()[1].callback("bar4") + + self.assertEqual(self.successResultOf(queue_d3), "bar4") From 6a8643ff3da905568e3f2ec047182753352e39d1 Mon Sep 17 00:00:00 2001 From: Marek Matys <57749215+thermaq@users.noreply.github.com> Date: Fri, 21 May 2021 13:02:06 +0200 Subject: [PATCH 163/222] Fixed removal of new presence stream states (#10014) Fixes: https://github.com/matrix-org/synapse/issues/9962 This is a fix for above problem. I fixed it by swaping the order of insertion of new records and deletion of old ones. This ensures that we don't delete fresh database records as we do deletes before inserts. Signed-off-by: Marek Matys --- changelog.d/10014.bugfix | 1 + synapse/storage/databases/main/presence.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 changelog.d/10014.bugfix diff --git a/changelog.d/10014.bugfix b/changelog.d/10014.bugfix new file mode 100644 index 000000000000..7cf3603f94df --- /dev/null +++ b/changelog.d/10014.bugfix @@ -0,0 +1 @@ +Fixed deletion of new presence stream states from database. diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 669a2af8845a..6a2baa7841e0 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -97,6 +97,15 @@ def _update_presence_txn(self, txn, stream_orderings, presence_states): ) txn.call_after(self._get_presence_for_user.invalidate, (state.user_id,)) + # Delete old rows to stop database from getting really big + sql = "DELETE FROM presence_stream WHERE stream_id < ? AND " + + for states in batch_iter(presence_states, 50): + clause, args = make_in_list_sql_clause( + self.database_engine, "user_id", [s.user_id for s in states] + ) + txn.execute(sql + clause, [stream_id] + list(args)) + # Actually insert new rows self.db_pool.simple_insert_many_txn( txn, @@ -117,15 +126,6 @@ def _update_presence_txn(self, txn, stream_orderings, presence_states): ], ) - # Delete old rows to stop database from getting really big - sql = "DELETE FROM presence_stream WHERE stream_id < ? AND " - - for states in batch_iter(presence_states, 50): - clause, args = make_in_list_sql_clause( - self.database_engine, "user_id", [s.user_id for s in states] - ) - txn.execute(sql + clause, [stream_id] + list(args)) - async def get_all_presence_updates( self, instance_name: str, last_id: int, current_id: int, limit: int ) -> Tuple[List[Tuple[int, list]], int, bool]: From c5413d0e9ef6afa9cb5140774acfb4954fe0bf37 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 21 May 2021 12:02:01 -0400 Subject: [PATCH 164/222] Remove unused properties from the SpaceSummaryHandler. (#10038) --- changelog.d/10038.feature | 1 + synapse/handlers/space_summary.py | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 changelog.d/10038.feature diff --git a/changelog.d/10038.feature b/changelog.d/10038.feature new file mode 100644 index 000000000000..2c655350c04f --- /dev/null +++ b/changelog.d/10038.feature @@ -0,0 +1 @@ +Experimental support to allow a user who could join a restricted room to view it in the spaces summary. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index 8d49ba81646e..abd9ddecca20 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -50,8 +50,6 @@ class SpaceSummaryHandler: def __init__(self, hs: "HomeServer"): self._clock = hs.get_clock() self._auth = hs.get_auth() - self._room_list_handler = hs.get_room_list_handler() - self._state_handler = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() self._store = hs.get_datastore() self._event_serializer = hs.get_event_client_serializer() From 21bd230831022a69ce90f334e869edd151155067 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 21 May 2021 17:29:14 +0100 Subject: [PATCH 165/222] Add a test for update_presence (#10033) https://github.com/matrix-org/synapse/issues/9962 uncovered that we accidentally removed all but one of the presence updates that we store in the database when persisting multiple updates. This could cause users' presence state to be stale. The bug was fixed in #10014, and this PR just adds a test that failed on the old code, and was used to initially verify the bug. The test attempts to insert some presence into the database in a batch using `PresenceStore.update_presence`, and then simply pulls it out again. --- changelog.d/10033.bugfix | 1 + tests/handlers/test_presence.py | 47 ++++++++++++++++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10033.bugfix diff --git a/changelog.d/10033.bugfix b/changelog.d/10033.bugfix new file mode 100644 index 000000000000..587d839b8c9d --- /dev/null +++ b/changelog.d/10033.bugfix @@ -0,0 +1 @@ +Fixed deletion of new presence stream states from database. \ No newline at end of file diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 1ffab709fcb8..d90a9fec91fa 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -32,13 +32,19 @@ handle_timeout, handle_update, ) +from synapse.rest import admin from synapse.rest.client.v1 import room from synapse.types import UserID, get_domain_from_id from tests import unittest -class PresenceUpdateTestCase(unittest.TestCase): +class PresenceUpdateTestCase(unittest.HomeserverTestCase): + servlets = [admin.register_servlets] + + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + def test_offline_to_online(self): wheel_timer = Mock() user_id = "@foo:bar" @@ -292,6 +298,45 @@ def test_online_to_idle(self): any_order=True, ) + def test_persisting_presence_updates(self): + """Tests that the latest presence state for each user is persisted correctly""" + # Create some test users and presence states for them + presence_states = [] + for i in range(5): + user_id = self.register_user(f"user_{i}", "password") + + presence_state = UserPresenceState( + user_id=user_id, + state="online", + last_active_ts=1, + last_federation_update_ts=1, + last_user_sync_ts=1, + status_msg="I'm online!", + currently_active=True, + ) + presence_states.append(presence_state) + + # Persist these presence updates to the database + self.get_success(self.store.update_presence(presence_states)) + + # Check that each update is present in the database + db_presence_states = self.get_success( + self.store.get_all_presence_updates( + instance_name="master", + last_id=0, + current_id=len(presence_states) + 1, + limit=len(presence_states), + ) + ) + + # Extract presence update user ID and state information into lists of tuples + db_presence_states = [(ps[0], ps[1]) for _, ps in db_presence_states[0]] + presence_states = [(ps.user_id, ps.state) for ps in presence_states] + + # Compare what we put into the storage with what we got out. + # They should be identical. + self.assertEqual(presence_states, db_presence_states) + class PresenceTimeoutTestCase(unittest.TestCase): def test_idle_timer(self): From e8ac9ac8ca18fe3456bfeba7a5883be1c991b2a6 Mon Sep 17 00:00:00 2001 From: Michael Telatynski <7t3chguy@gmail.com> Date: Fri, 21 May 2021 17:31:59 +0100 Subject: [PATCH 166/222] Fix /upload 500'ing when presented a very large image (#10029) * Fix /upload 500'ing when presented a very large image Catch DecompressionBombError and re-raise as ThumbnailErrors * Set PIL's MAX_IMAGE_PIXELS to match homeserver.yaml to get it to bomb out quicker, to load less into memory in the case of super large images * Add changelog entry for 10029 --- changelog.d/10029.bugfix | 1 + synapse/rest/media/v1/media_repository.py | 2 ++ synapse/rest/media/v1/thumbnailer.py | 9 +++++++++ 3 files changed, 12 insertions(+) create mode 100644 changelog.d/10029.bugfix diff --git a/changelog.d/10029.bugfix b/changelog.d/10029.bugfix new file mode 100644 index 000000000000..c214cbdaecc1 --- /dev/null +++ b/changelog.d/10029.bugfix @@ -0,0 +1 @@ +Fixed a bug with very high resolution image uploads throwing internal server errors. \ No newline at end of file diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index e8a875b90093..21c43c340c3b 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -76,6 +76,8 @@ def __init__(self, hs: "HomeServer"): self.max_upload_size = hs.config.max_upload_size self.max_image_pixels = hs.config.max_image_pixels + Thumbnailer.set_limits(self.max_image_pixels) + self.primary_base_path = hs.config.media_store_path # type: str self.filepaths = MediaFilePaths(self.primary_base_path) # type: MediaFilePaths diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 37fe582390e3..a65e9e18022e 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -40,6 +40,10 @@ class Thumbnailer: FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"} + @staticmethod + def set_limits(max_image_pixels: int): + Image.MAX_IMAGE_PIXELS = max_image_pixels + def __init__(self, input_path: str): try: self.image = Image.open(input_path) @@ -47,6 +51,11 @@ def __init__(self, input_path: str): # If an error occurs opening the image, a thumbnail won't be able to # be generated. raise ThumbnailError from e + except Image.DecompressionBombError as e: + # If an image decompression bomb error occurs opening the image, + # then the image exceeds the pixel limit and a thumbnail won't + # be able to be generated. + raise ThumbnailError from e self.width, self.height = self.image.size self.transpose_method = None From 3e831f24ffc887e174f67ff7b1cfe3a429b7b5c1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 21 May 2021 17:57:08 +0100 Subject: [PATCH 167/222] Don't hammer the database for destination retry timings every ~5mins (#10036) --- changelog.d/10036.misc | 1 + synapse/app/generic_worker.py | 2 - synapse/federation/transport/server.py | 2 +- .../replication/slave/storage/transactions.py | 21 ------ synapse/storage/databases/main/__init__.py | 4 +- .../storage/databases/main/transactions.py | 66 +++++++++++-------- synapse/util/retryutils.py | 8 +-- tests/handlers/test_typing.py | 8 +-- tests/storage/test_transactions.py | 8 ++- tests/util/test_retryutils.py | 18 +++-- 10 files changed, 62 insertions(+), 76 deletions(-) create mode 100644 changelog.d/10036.misc delete mode 100644 synapse/replication/slave/storage/transactions.py diff --git a/changelog.d/10036.misc b/changelog.d/10036.misc new file mode 100644 index 000000000000..d2cf1e54734e --- /dev/null +++ b/changelog.d/10036.misc @@ -0,0 +1 @@ +Properly invalidate caches for destination retry timings every (instead of expiring entries every 5 minutes). diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index f730cdbd78f4..91ad326f193f 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -61,7 +61,6 @@ from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.slave.storage.room import RoomStore -from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.rest.admin import register_servlets_for_media_repo from synapse.rest.client.v1 import events, login, presence, room from synapse.rest.client.v1.initial_sync import InitialSyncRestServlet @@ -237,7 +236,6 @@ class GenericWorkerSlavedStore( DirectoryStore, SlavedApplicationServiceStore, SlavedRegistrationStore, - SlavedTransactionStore, SlavedProfileStore, SlavedClientIpStore, SlavedFilteringStore, diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index c17a085a4f93..9d50b05d0152 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -160,7 +160,7 @@ async def authenticate_request(self, request, content): # If we get a valid signed request from the other side, its probably # alive retry_timings = await self.store.get_destination_retry_timings(origin) - if retry_timings and retry_timings["retry_last_ts"]: + if retry_timings and retry_timings.retry_last_ts: run_in_background(self._reset_retry_timings, origin) return origin diff --git a/synapse/replication/slave/storage/transactions.py b/synapse/replication/slave/storage/transactions.py deleted file mode 100644 index a59e54392441..000000000000 --- a/synapse/replication/slave/storage/transactions.py +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from synapse.storage.databases.main.transactions import TransactionStore - -from ._base import BaseSlavedStore - - -class SlavedTransactionStore(TransactionStore, BaseSlavedStore): - pass diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 49c7606d5138..9cce62ae6c5b 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -67,7 +67,7 @@ from .stats import StatsStore from .stream import StreamStore from .tags import TagsStore -from .transactions import TransactionStore +from .transactions import TransactionWorkerStore from .ui_auth import UIAuthStore from .user_directory import UserDirectoryStore from .user_erasure_store import UserErasureStore @@ -83,7 +83,7 @@ class DataStore( StreamStore, ProfileStore, PresenceStore, - TransactionStore, + TransactionWorkerStore, DirectoryStore, KeyStore, StateStore, diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 82335e7a9dad..d211c423b2ce 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -16,13 +16,15 @@ from collections import namedtuple from typing import Iterable, List, Optional, Tuple +import attr from canonicaljson import encode_canonical_json from synapse.metrics.background_process_metrics import wrap_as_background_process -from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage._base import db_to_json from synapse.storage.database import DatabasePool, LoggingTransaction +from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.types import JsonDict -from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.caches.descriptors import cached db_binary_type = memoryview @@ -38,10 +40,23 @@ "_TransactionRow", ("response_code", "response_json") ) -SENTINEL = object() +@attr.s(slots=True, frozen=True, auto_attribs=True) +class DestinationRetryTimings: + """The current destination retry timing info for a remote server.""" -class TransactionWorkerStore(SQLBaseStore): + # The first time we tried and failed to reach the remote server, in ms. + failure_ts: int + + # The last time we tried and failed to reach the remote server, in ms. + retry_last_ts: int + + # How long since the last time we tried to reach the remote server before + # trying again, in ms. + retry_interval: int + + +class TransactionWorkerStore(CacheInvalidationWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) @@ -60,19 +75,6 @@ def _cleanup_transactions_txn(txn): "_cleanup_transactions", _cleanup_transactions_txn ) - -class TransactionStore(TransactionWorkerStore): - """A collection of queries for handling PDUs.""" - - def __init__(self, database: DatabasePool, db_conn, hs): - super().__init__(database, db_conn, hs) - - self._destination_retry_cache = ExpiringCache( - cache_name="get_destination_retry_timings", - clock=self._clock, - expiry_ms=5 * 60 * 1000, - ) - async def get_received_txn_response( self, transaction_id: str, origin: str ) -> Optional[Tuple[int, JsonDict]]: @@ -145,7 +147,11 @@ async def set_received_txn_response( desc="set_received_txn_response", ) - async def get_destination_retry_timings(self, destination): + @cached(max_entries=10000) + async def get_destination_retry_timings( + self, + destination: str, + ) -> Optional[DestinationRetryTimings]: """Gets the current retry timings (if any) for a given destination. Args: @@ -156,34 +162,29 @@ async def get_destination_retry_timings(self, destination): Otherwise a dict for the retry scheme """ - result = self._destination_retry_cache.get(destination, SENTINEL) - if result is not SENTINEL: - return result - result = await self.db_pool.runInteraction( "get_destination_retry_timings", self._get_destination_retry_timings, destination, ) - # We don't hugely care about race conditions between getting and - # invalidating the cache, since we time out fairly quickly anyway. - self._destination_retry_cache[destination] = result return result - def _get_destination_retry_timings(self, txn, destination): + def _get_destination_retry_timings( + self, txn, destination: str + ) -> Optional[DestinationRetryTimings]: result = self.db_pool.simple_select_one_txn( txn, table="destinations", keyvalues={"destination": destination}, - retcols=("destination", "failure_ts", "retry_last_ts", "retry_interval"), + retcols=("failure_ts", "retry_last_ts", "retry_interval"), allow_none=True, ) # check we have a row and retry_last_ts is not null or zero # (retry_last_ts can't be negative) if result and result["retry_last_ts"]: - return result + return DestinationRetryTimings(**result) else: return None @@ -204,7 +205,6 @@ async def set_destination_retry_timings( retry_interval: how long until next retry in ms """ - self._destination_retry_cache.pop(destination, None) if self.database_engine.can_native_upsert: return await self.db_pool.runInteraction( "set_destination_retry_timings", @@ -252,6 +252,10 @@ def _set_destination_retry_timings_native( txn.execute(sql, (destination, failure_ts, retry_last_ts, retry_interval)) + self._invalidate_cache_and_stream( + txn, self.get_destination_retry_timings, (destination,) + ) + def _set_destination_retry_timings_emulated( self, txn, destination, failure_ts, retry_last_ts, retry_interval ): @@ -295,6 +299,10 @@ def _set_destination_retry_timings_emulated( }, ) + self._invalidate_cache_and_stream( + txn, self.get_destination_retry_timings, (destination,) + ) + async def store_destination_rooms_entries( self, destinations: Iterable[str], diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index f9c370a814cc..129b47cd4994 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -82,11 +82,9 @@ async def get_retry_limiter(destination, clock, store, ignore_backoff=False, **k retry_timings = await store.get_destination_retry_timings(destination) if retry_timings: - failure_ts = retry_timings["failure_ts"] - retry_last_ts, retry_interval = ( - retry_timings["retry_last_ts"], - retry_timings["retry_interval"], - ) + failure_ts = retry_timings.failure_ts + retry_last_ts = retry_timings.retry_last_ts + retry_interval = retry_timings.retry_interval now = int(clock.time_msec()) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 0c89487eaf34..f58afbc244a8 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -89,14 +89,8 @@ def prepare(self, reactor, clock, hs): self.event_source = hs.get_event_sources().sources["typing"] self.datastore = hs.get_datastore() - retry_timings_res = { - "destination": "", - "retry_last_ts": 0, - "retry_interval": 0, - "failure_ts": None, - } self.datastore.get_destination_retry_timings = Mock( - return_value=defer.succeed(retry_timings_res) + return_value=defer.succeed(None) ) self.datastore.get_device_updates_by_remote = Mock( diff --git a/tests/storage/test_transactions.py b/tests/storage/test_transactions.py index b7f7eae8d096..bea9091d3089 100644 --- a/tests/storage/test_transactions.py +++ b/tests/storage/test_transactions.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from synapse.storage.databases.main.transactions import DestinationRetryTimings from synapse.util.retryutils import MAX_RETRY_INTERVAL from tests.unittest import HomeserverTestCase @@ -36,8 +37,11 @@ def test_get_set_transactions(self): d = self.store.get_destination_retry_timings("example.com") r = self.get_success(d) - self.assert_dict( - {"retry_last_ts": 50, "retry_interval": 100, "failure_ts": 1000}, r + self.assertEqual( + DestinationRetryTimings( + retry_last_ts=50, retry_interval=100, failure_ts=1000 + ), + r, ) def test_initial_set_transactions(self): diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 9b2be83a43a1..9e1bebdc8335 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -51,10 +51,12 @@ def test_limiter(self): except AssertionError: pass + self.pump() + new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) - self.assertEqual(new_timings["failure_ts"], failure_ts) - self.assertEqual(new_timings["retry_last_ts"], failure_ts) - self.assertEqual(new_timings["retry_interval"], MIN_RETRY_INTERVAL) + self.assertEqual(new_timings.failure_ts, failure_ts) + self.assertEqual(new_timings.retry_last_ts, failure_ts) + self.assertEqual(new_timings.retry_interval, MIN_RETRY_INTERVAL) # now if we try again we should get a failure self.get_failure( @@ -77,14 +79,16 @@ def test_limiter(self): except AssertionError: pass + self.pump() + new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) - self.assertEqual(new_timings["failure_ts"], failure_ts) - self.assertEqual(new_timings["retry_last_ts"], retry_ts) + self.assertEqual(new_timings.failure_ts, failure_ts) + self.assertEqual(new_timings.retry_last_ts, retry_ts) self.assertGreaterEqual( - new_timings["retry_interval"], MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 0.5 + new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 0.5 ) self.assertLessEqual( - new_timings["retry_interval"], MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0 + new_timings.retry_interval, MIN_RETRY_INTERVAL * RETRY_MULTIPLIER * 2.0 ) # From 5f1198a67ebe182db14b5f98bb4c9a19d4889918 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 24 May 2021 04:43:33 -0500 Subject: [PATCH 168/222] Fix `get_state_ids_for_event` return type typo to match what the function actually does (#10050) It looks like a typo copy/paste from `get_state_for_event` above. --- changelog.d/10050.misc | 1 + synapse/storage/state.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10050.misc diff --git a/changelog.d/10050.misc b/changelog.d/10050.misc new file mode 100644 index 000000000000..2cac953cca1e --- /dev/null +++ b/changelog.d/10050.misc @@ -0,0 +1 @@ +Fix typo in `get_state_ids_for_event` docstring where the return type was incorrect. diff --git a/synapse/storage/state.py b/synapse/storage/state.py index cfafba22c5e0..c9dce726cb0e 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -540,7 +540,7 @@ async def get_state_ids_for_event( state_filter: The state filter used to fetch state from the database. Returns: - A dict from (type, state_key) -> state_event + A dict from (type, state_key) -> state_event_id """ state_map = await self.get_state_ids_for_events( [event_id], state_filter or StateFilter.all() From 387c297489b90bd80212ac1391666eecd01ff701 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Mon, 24 May 2021 13:37:30 +0200 Subject: [PATCH 169/222] Add missing entry to the table of contents of room admin API (#10043) --- changelog.d/10043.doc | 1 + docs/admin_api/rooms.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/10043.doc diff --git a/changelog.d/10043.doc b/changelog.d/10043.doc new file mode 100644 index 000000000000..a574ec0bf06c --- /dev/null +++ b/changelog.d/10043.doc @@ -0,0 +1 @@ +Add missing room state entry to the table of contents of room admin API. \ No newline at end of file diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 01d3882426b3..5721210fee11 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -4,6 +4,7 @@ * [Usage](#usage) - [Room Details API](#room-details-api) - [Room Members API](#room-members-api) +- [Room State API](#room-state-api) - [Delete Room API](#delete-room-api) * [Parameters](#parameters-1) * [Response](#response) From 316f89e87f462608a5e63ba567ad549330f1456a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 24 May 2021 08:57:14 -0400 Subject: [PATCH 170/222] Enable experimental spaces by default. (#10011) The previous spaces_enabled flag now defaults to true and is exposed in the sample config. --- changelog.d/10011.feature | 1 + docs/sample_config.yaml | 15 +++++++++++++++ synapse/config/experimental.py | 19 ++++++++++++++++++- synapse/config/homeserver.py | 2 +- 4 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10011.feature diff --git a/changelog.d/10011.feature b/changelog.d/10011.feature new file mode 100644 index 000000000000..409140fb1367 --- /dev/null +++ b/changelog.d/10011.feature @@ -0,0 +1 @@ +Enable experimental support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) (spaces summary API) and [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) (restricted join rules) by default. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2952f2ba3201..f0f9f06a6ee3 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2943,3 +2943,18 @@ redis: # Optional password if configured on the Redis instance # #password: + + +# Enable experimental features in Synapse. +# +# Experimental features might break or be removed without a deprecation +# period. +# +experimental_features: + # Support for Spaces (MSC1772), it enables the following: + # + # * The Spaces Summary API (MSC2946). + # * Restricting room membership based on space membership (MSC3083). + # + # Uncomment to disable support for Spaces. + #spaces_enabled: false diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index a693fba87779..cc67377f0fe7 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -29,9 +29,26 @@ def read_config(self, config: JsonDict, **kwargs): self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool # Spaces (MSC1772, MSC2946, MSC3083, etc) - self.spaces_enabled = experimental.get("spaces_enabled", False) # type: bool + self.spaces_enabled = experimental.get("spaces_enabled", True) # type: bool if self.spaces_enabled: KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083 # MSC3026 (busy presence state) self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool + + def generate_config_section(self, **kwargs): + return """\ + # Enable experimental features in Synapse. + # + # Experimental features might break or be removed without a deprecation + # period. + # + experimental_features: + # Support for Spaces (MSC1772), it enables the following: + # + # * The Spaces Summary API (MSC2946). + # * Restricting room membership based on space membership (MSC3083). + # + # Uncomment to disable support for Spaces. + #spaces_enabled: false + """ diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index c23b66c88c76..5ae0f55bccd6 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -57,7 +57,6 @@ class HomeServerConfig(RootConfig): config_classes = [ ServerConfig, - ExperimentalConfig, TlsConfig, FederationConfig, CacheConfig, @@ -94,4 +93,5 @@ class HomeServerConfig(RootConfig): TracerConfig, WorkerConfig, RedisConfig, + ExperimentalConfig, ] From c0df6bae066fe818bb80d41af65503be7a07275d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 May 2021 14:02:01 +0100 Subject: [PATCH 171/222] Remove `keylen` from `LruCache`. (#9993) `keylen` seems to be a thing that is frequently incorrectly set, and we don't really need it. The only time it was used was to figure out if we had removed a subtree in `del_multi`, which we can do better by changing `TreeCache.pop` to return a different type (`TreeCacheNode`). Commits should be independently reviewable. --- changelog.d/9993.misc | 1 + .../replication/slave/storage/client_ips.py | 2 +- synapse/storage/databases/main/client_ips.py | 2 +- synapse/storage/databases/main/devices.py | 2 +- .../storage/databases/main/events_worker.py | 1 - synapse/util/caches/deferred_cache.py | 2 - synapse/util/caches/descriptors.py | 1 - synapse/util/caches/lrucache.py | 10 +- synapse/util/caches/treecache.py | 104 +++++++++++------- tests/util/test_lrucache.py | 4 +- tests/util/test_treecache.py | 6 +- 11 files changed, 80 insertions(+), 55 deletions(-) create mode 100644 changelog.d/9993.misc diff --git a/changelog.d/9993.misc b/changelog.d/9993.misc new file mode 100644 index 000000000000..0dd92440717d --- /dev/null +++ b/changelog.d/9993.misc @@ -0,0 +1 @@ +Remove `keylen` param on `LruCache`. diff --git a/synapse/replication/slave/storage/client_ips.py b/synapse/replication/slave/storage/client_ips.py index 8730966380de..13ed87adc4ab 100644 --- a/synapse/replication/slave/storage/client_ips.py +++ b/synapse/replication/slave/storage/client_ips.py @@ -24,7 +24,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): super().__init__(database, db_conn, hs) self.client_ip_last_seen = LruCache( - cache_name="client_ip_last_seen", keylen=4, max_size=50000 + cache_name="client_ip_last_seen", max_size=50000 ) # type: LruCache[tuple, int] async def insert_client_ip(self, user_id, access_token, ip, user_agent, device_id): diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index d60010e942ab..074b077bef91 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -436,7 +436,7 @@ class ClientIpStore(ClientIpWorkerStore): def __init__(self, database: DatabasePool, db_conn, hs): self.client_ip_last_seen = LruCache( - cache_name="client_ip_last_seen", keylen=4, max_size=50000 + cache_name="client_ip_last_seen", max_size=50000 ) super().__init__(database, db_conn, hs) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index a1f98b7e3854..fd87ba71abcb 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1053,7 +1053,7 @@ def __init__(self, database: DatabasePool, db_conn, hs): # Map of (user_id, device_id) -> bool. If there is an entry that implies # the device exists. self.device_id_exists_cache = LruCache( - cache_name="device_id_exists", keylen=2, max_size=10000 + cache_name="device_id_exists", max_size=10000 ) async def store_device( diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 2c823e09cfa6..6963bbf7f484 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -157,7 +157,6 @@ def __init__(self, database: DatabasePool, db_conn, hs): self._get_event_cache = LruCache( cache_name="*getEvent*", - keylen=3, max_size=hs.config.caches.event_cache_size, ) diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 484097a48a45..371e7e4dd0de 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -70,7 +70,6 @@ def __init__( self, name: str, max_entries: int = 1000, - keylen: int = 1, tree: bool = False, iterable: bool = False, apply_cache_factor_from_config: bool = True, @@ -101,7 +100,6 @@ def metrics_cb(): # a Deferred. self.cache = LruCache( max_size=max_entries, - keylen=keylen, cache_name=name, cache_type=cache_type, size_callback=(lambda d: len(d) or 1) if iterable else None, diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 3a4d027095bb..2ac24a2f2536 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -270,7 +270,6 @@ def __get__(self, obj, owner): cache = DeferredCache( name=self.orig.__name__, max_entries=self.max_entries, - keylen=self.num_args, tree=self.tree, iterable=self.iterable, ) # type: DeferredCache[CacheKey, Any] diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 1be675e014af..54df407ff70c 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -34,7 +34,7 @@ from synapse.config import cache as cache_config from synapse.util import caches from synapse.util.caches import CacheMetric, register_cache -from synapse.util.caches.treecache import TreeCache +from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry try: from pympler.asizeof import Asizer @@ -160,7 +160,6 @@ def __init__( self, max_size: int, cache_name: Optional[str] = None, - keylen: int = 1, cache_type: Type[Union[dict, TreeCache]] = dict, size_callback: Optional[Callable] = None, metrics_collection_callback: Optional[Callable[[], None]] = None, @@ -173,9 +172,6 @@ def __init__( cache_name: The name of this cache, for the prometheus metrics. If unset, no metrics will be reported on this cache. - keylen: The length of the tuple used as the cache key. Ignored unless - cache_type is `TreeCache`. - cache_type (type): type of underlying cache to be used. Typically one of dict or TreeCache. @@ -403,7 +399,9 @@ def cache_del_multi(key: KT) -> None: popped = cache.pop(key) if popped is None: return - for leaf in enumerate_leaves(popped, keylen - len(cast(tuple, key))): + # for each deleted node, we now need to remove it from the linked list + # and run its callbacks. + for leaf in iterate_tree_cache_entry(popped): delete_node(leaf) @synchronized diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index eb4d98f683cb..73502a8b061b 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -1,18 +1,43 @@ -from typing import Dict +# Copyright 2016-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. SENTINEL = object() +class TreeCacheNode(dict): + """The type of nodes in our tree. + + Has its own type so we can distinguish it from real dicts that are stored at the + leaves. + """ + + pass + + class TreeCache: """ Tree-based backing store for LruCache. Allows subtrees of data to be deleted efficiently. Keys must be tuples. + + The data structure is a chain of TreeCacheNodes: + root = {key_1: {key_2: _value}} """ def __init__(self): self.size = 0 - self.root = {} # type: Dict + self.root = TreeCacheNode() def __setitem__(self, key, value): return self.set(key, value) @@ -21,10 +46,23 @@ def __contains__(self, key): return self.get(key, SENTINEL) is not SENTINEL def set(self, key, value): + if isinstance(value, TreeCacheNode): + # this would mean we couldn't tell where our tree ended and the value + # started. + raise ValueError("Cannot store TreeCacheNodes in a TreeCache") + node = self.root for k in key[:-1]: - node = node.setdefault(k, {}) - node[key[-1]] = _Entry(value) + next_node = node.get(k, SENTINEL) + if next_node is SENTINEL: + next_node = node[k] = TreeCacheNode() + elif not isinstance(next_node, TreeCacheNode): + # this suggests that the caller is not being consistent with its key + # length. + raise ValueError("value conflicts with an existing subtree") + node = next_node + + node[key[-1]] = value self.size += 1 def get(self, key, default=None): @@ -33,25 +71,41 @@ def get(self, key, default=None): node = node.get(k, None) if node is None: return default - return node.get(key[-1], _Entry(default)).value + return node.get(key[-1], default) def clear(self): self.size = 0 - self.root = {} + self.root = TreeCacheNode() def pop(self, key, default=None): + """Remove the given key, or subkey, from the cache + + Args: + key: key or subkey to remove. + default: value to return if key is not found + + Returns: + If the key is not found, 'default'. If the key is complete, the removed + value. If the key is partial, the TreeCacheNode corresponding to the part + of the tree that was removed. + """ + # a list of the nodes we have touched on the way down the tree nodes = [] node = self.root for k in key[:-1]: node = node.get(k, None) - nodes.append(node) # don't add the root node if node is None: return default + if not isinstance(node, TreeCacheNode): + # we've gone off the end of the tree + raise ValueError("pop() key too long") + nodes.append(node) # don't add the root node popped = node.pop(key[-1], SENTINEL) if popped is SENTINEL: return default + # working back up the tree, clear out any nodes that are now empty node_and_keys = list(zip(nodes, key)) node_and_keys.reverse() node_and_keys.append((self.root, None)) @@ -61,14 +115,15 @@ def pop(self, key, default=None): if n: break + # found an empty node: remove it from its parent, and loop. node_and_keys[i + 1][0].pop(k) - popped, cnt = _strip_and_count_entires(popped) + cnt = sum(1 for _ in iterate_tree_cache_entry(popped)) self.size -= cnt return popped def values(self): - return list(iterate_tree_cache_entry(self.root)) + return iterate_tree_cache_entry(self.root) def __len__(self): return self.size @@ -78,36 +133,9 @@ def iterate_tree_cache_entry(d): """Helper function to iterate over the leaves of a tree, i.e. a dict of that can contain dicts. """ - if isinstance(d, dict): + if isinstance(d, TreeCacheNode): for value_d in d.values(): for value in iterate_tree_cache_entry(value_d): yield value else: - if isinstance(d, _Entry): - yield d.value - else: - yield d - - -class _Entry: - __slots__ = ["value"] - - def __init__(self, value): - self.value = value - - -def _strip_and_count_entires(d): - """Takes an _Entry or dict with leaves of _Entry's, and either returns the - value or a dictionary with _Entry's replaced by their values. - - Also returns the count of _Entry's - """ - if isinstance(d, dict): - cnt = 0 - for key, value in d.items(): - v, n = _strip_and_count_entires(value) - d[key] = v - cnt += n - return d, cnt - else: - return d.value, 1 + yield d diff --git a/tests/util/test_lrucache.py b/tests/util/test_lrucache.py index df3e27779fd2..377904e72e6c 100644 --- a/tests/util/test_lrucache.py +++ b/tests/util/test_lrucache.py @@ -59,7 +59,7 @@ def test_pop(self): self.assertEquals(cache.pop("key"), None) def test_del_multi(self): - cache = LruCache(4, keylen=2, cache_type=TreeCache) + cache = LruCache(4, cache_type=TreeCache) cache[("animal", "cat")] = "mew" cache[("animal", "dog")] = "woof" cache[("vehicles", "car")] = "vroom" @@ -165,7 +165,7 @@ def test_del_multi(self): m2 = Mock() m3 = Mock() m4 = Mock() - cache = LruCache(4, keylen=2, cache_type=TreeCache) + cache = LruCache(4, cache_type=TreeCache) cache.set(("a", "1"), "value", callbacks=[m1]) cache.set(("a", "2"), "value", callbacks=[m2]) diff --git a/tests/util/test_treecache.py b/tests/util/test_treecache.py index 3b077af27e90..60663720536d 100644 --- a/tests/util/test_treecache.py +++ b/tests/util/test_treecache.py @@ -13,7 +13,7 @@ # limitations under the License. -from synapse.util.caches.treecache import TreeCache +from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry from .. import unittest @@ -64,12 +64,14 @@ def test_pop_mixedlevel(self): cache[("a", "b")] = "AB" cache[("b", "a")] = "BA" self.assertEquals(cache.get(("a", "a")), "AA") - cache.pop(("a",)) + popped = cache.pop(("a",)) self.assertEquals(cache.get(("a", "a")), None) self.assertEquals(cache.get(("a", "b")), None) self.assertEquals(cache.get(("b", "a")), "BA") self.assertEquals(len(cache), 1) + self.assertEquals({"AA", "AB"}, set(iterate_tree_cache_entry(popped))) + def test_clear(self): cache = TreeCache() cache[("a",)] = "A" From daca7b2794fb86514dc01de551bb0e5db77cf914 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 May 2021 14:03:00 +0100 Subject: [PATCH 172/222] Fix off-by-one-error in synapse_port_db (#9991) fixes #9979 --- .buildkite/postgres-config.yaml | 6 ++---- .buildkite/scripts/test_synapse_port_db.sh | 4 ++++ .buildkite/sqlite-config.yaml | 6 ++---- changelog.d/9991.bugfix | 1 + scripts/synapse_port_db | 2 +- 5 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 changelog.d/9991.bugfix diff --git a/.buildkite/postgres-config.yaml b/.buildkite/postgres-config.yaml index 2acbe66f4caf..67e17fa9d1de 100644 --- a/.buildkite/postgres-config.yaml +++ b/.buildkite/postgres-config.yaml @@ -3,7 +3,7 @@ # CI's Docker setup at the point where this file is considered. server_name: "localhost:8800" -signing_key_path: "/src/.buildkite/test.signing.key" +signing_key_path: ".buildkite/test.signing.key" report_stats: false @@ -16,6 +16,4 @@ database: database: synapse # Suppress the key server warning. -trusted_key_servers: - - server_name: "matrix.org" -suppress_key_server_warning: true +trusted_key_servers: [] diff --git a/.buildkite/scripts/test_synapse_port_db.sh b/.buildkite/scripts/test_synapse_port_db.sh index a7e245476937..82d7d56d4e9f 100755 --- a/.buildkite/scripts/test_synapse_port_db.sh +++ b/.buildkite/scripts/test_synapse_port_db.sh @@ -33,6 +33,10 @@ scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml echo "+++ Run synapse_port_db against test database" coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml +# We should be able to run twice against the same database. +echo "+++ Run synapse_port_db a second time" +coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml + ##### # Now do the same again, on an empty database. diff --git a/.buildkite/sqlite-config.yaml b/.buildkite/sqlite-config.yaml index 6d9bf80d844b..d16459cfd947 100644 --- a/.buildkite/sqlite-config.yaml +++ b/.buildkite/sqlite-config.yaml @@ -3,7 +3,7 @@ # schema and run background updates on it. server_name: "localhost:8800" -signing_key_path: "/src/.buildkite/test.signing.key" +signing_key_path: ".buildkite/test.signing.key" report_stats: false @@ -13,6 +13,4 @@ database: database: ".buildkite/test_db.db" # Suppress the key server warning. -trusted_key_servers: - - server_name: "matrix.org" -suppress_key_server_warning: true +trusted_key_servers: [] diff --git a/changelog.d/9991.bugfix b/changelog.d/9991.bugfix new file mode 100644 index 000000000000..665ff04dea84 --- /dev/null +++ b/changelog.d/9991.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.26.0 which meant that `synapse_port_db` would not correctly initialise some postgres sequences, requiring manual updates afterwards. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 7c7645c05a63..86eb76cbca7a 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -959,7 +959,7 @@ class Porter(object): def r(txn): txn.execute( "ALTER SEQUENCE event_auth_chain_id RESTART WITH %s", - (curr_chain_id,), + (curr_chain_id + 1,), ) if curr_chain_id is not None: From 82eacb0e071657e796952638fe8da90cdb94f2a1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 24 May 2021 14:03:30 +0100 Subject: [PATCH 173/222] Fix --no-daemonize for synctl with workers (#9995) --- changelog.d/9995.bugfix | 1 + synctl | 102 +++++++++++++--------------------------- 2 files changed, 33 insertions(+), 70 deletions(-) create mode 100644 changelog.d/9995.bugfix diff --git a/changelog.d/9995.bugfix b/changelog.d/9995.bugfix new file mode 100644 index 000000000000..3b63e7c42aed --- /dev/null +++ b/changelog.d/9995.bugfix @@ -0,0 +1 @@ +Fix `synctl`'s `--no-daemonize` parameter to work correctly with worker processes. diff --git a/synctl b/synctl index ccf404accb45..6ce19918d212 100755 --- a/synctl +++ b/synctl @@ -24,12 +24,13 @@ import signal import subprocess import sys import time +from typing import Iterable import yaml from synapse.config import find_config_files -SYNAPSE = [sys.executable, "-m", "synapse.app.homeserver"] +MAIN_PROCESS = "synapse.app.homeserver" GREEN = "\x1b[1;32m" YELLOW = "\x1b[1;33m" @@ -68,71 +69,37 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile: str, daemonize: bool = True) -> bool: - """Attempts to start synapse. +def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) -> bool: + """Attempts to start a synapse main or worker process. Args: - configfile: path to a yaml synapse config file - daemonize: whether to daemonize synapse or keep it attached to the current - session + pidfile: the pidfile we expect the process to create + app: the python module to run + config_files: config files to pass to synapse + daemonize: if True, will include a --daemonize argument to synapse Returns: - True if the process started successfully + True if the process started successfully or was already running False if there was an error starting the process - - If deamonize is False it will only return once synapse exits. """ - write("Starting ...") - args = SYNAPSE - - if daemonize: - args.extend(["--daemonize", "-c", configfile]) - else: - args.extend(["-c", configfile]) - - try: - subprocess.check_call(args) - write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN) + if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): + print(app + " already running") return True - except subprocess.CalledProcessError as e: - write( - "error starting (exit code: %d); see above for logs" % e.returncode, - colour=RED, - ) - return False - -def start_worker(app: str, configfile: str, worker_configfile: str) -> bool: - """Attempts to start a synapse worker. - Args: - app: name of the worker's appservice - configfile: path to a yaml synapse config file - worker_configfile: path to worker specific yaml synapse file - - Returns: - True if the process started successfully - False if there was an error starting the process - """ - - args = [ - sys.executable, - "-m", - app, - "-c", - configfile, - "-c", - worker_configfile, - "--daemonize", - ] + args = [sys.executable, "-m", app] + for c in config_files: + args += ["-c", c] + if daemonize: + args.append("--daemonize") try: subprocess.check_call(args) - write("started %s(%r)" % (app, worker_configfile), colour=GREEN) + write("started %s(%s)" % (app, ",".join(config_files)), colour=GREEN) return True except subprocess.CalledProcessError as e: write( - "error starting %s(%r) (exit code: %d); see above for logs" - % (app, worker_configfile, e.returncode), + "error starting %s(%s) (exit code: %d); see above for logs" + % (app, ",".join(config_files), e.returncode), colour=RED, ) return False @@ -224,10 +191,11 @@ def main(): if not os.path.exists(configfile): write( - "No config file found\n" - "To generate a config file, run '%s -c %s --generate-config" - " --server-name= --report-stats='\n" - % (" ".join(SYNAPSE), options.configfile), + f"Config file {configfile} does not exist.\n" + f"To generate a config file, run:\n" + f" {sys.executable} -m {MAIN_PROCESS}" + f" -c {configfile} --generate-config" + f" --server-name= --report-stats=\n", stream=sys.stderr, ) sys.exit(1) @@ -323,7 +291,7 @@ def main(): has_stopped = False if start_stop_synapse: - if not stop(pidfile, "synapse.app.homeserver"): + if not stop(pidfile, MAIN_PROCESS): has_stopped = False if not has_stopped and action == "stop": sys.exit(1) @@ -346,30 +314,24 @@ def main(): if action == "start" or action == "restart": error = False if start_stop_synapse: - # Check if synapse is already running - if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): - abort("synapse.app.homeserver already running") - - if not start(configfile, bool(options.daemonize)): + if not start(pidfile, MAIN_PROCESS, (configfile,), options.daemonize): error = True for worker in workers: env = os.environ.copy() - # Skip starting a worker if its already running - if os.path.exists(worker.pidfile) and pid_running( - int(open(worker.pidfile).read()) - ): - print(worker.app + " already running") - continue - if worker.cache_factor: os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor) for cache_name, factor in worker.cache_factors.items(): os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor) - if not start_worker(worker.app, configfile, worker.configfile): + if not start( + worker.pidfile, + worker.app, + (configfile, worker.configfile), + options.daemonize, + ): error = True # Reset env back to the original From 057ce7b75406dc97be8ff2c890c47fd9357b0773 Mon Sep 17 00:00:00 2001 From: Jerin J Titus <72017981+jerinjtitus@users.noreply.github.com> Date: Mon, 24 May 2021 22:13:30 +0530 Subject: [PATCH 174/222] Remove tls_fingerprints option (#9280) Signed-off-by: Jerin J Titus <72017981+jerinjtitus@users.noreply.github.com> --- changelog.d/9280.removal | 1 + docs/sample_config.yaml | 27 ------------ scripts-dev/convert_server_keys.py | 7 --- synapse/config/tls.py | 50 ---------------------- synapse/rest/key/v2/local_key_resource.py | 8 ---- synapse/rest/key/v2/remote_key_resource.py | 3 -- 6 files changed, 1 insertion(+), 95 deletions(-) create mode 100644 changelog.d/9280.removal diff --git a/changelog.d/9280.removal b/changelog.d/9280.removal new file mode 100644 index 000000000000..c2ed3d308dda --- /dev/null +++ b/changelog.d/9280.removal @@ -0,0 +1 @@ +Removed support for the deprecated `tls_fingerprints` configuration setting. Contributed by Jerin J Titus. \ No newline at end of file diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f0f9f06a6ee3..6576b153d0eb 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -683,33 +683,6 @@ acme: # account_key_file: DATADIR/acme_account.key -# List of allowed TLS fingerprints for this server to publish along -# with the signing keys for this server. Other matrix servers that -# make HTTPS requests to this server will check that the TLS -# certificates returned by this server match one of the fingerprints. -# -# Synapse automatically adds the fingerprint of its own certificate -# to the list. So if federation traffic is handled directly by synapse -# then no modification to the list is required. -# -# If synapse is run behind a load balancer that handles the TLS then it -# will be necessary to add the fingerprints of the certificates used by -# the loadbalancers to this list if they are different to the one -# synapse is using. -# -# Homeservers are permitted to cache the list of TLS fingerprints -# returned in the key responses up to the "valid_until_ts" returned in -# key. It may be necessary to publish the fingerprints of a new -# certificate and wait until the "valid_until_ts" of the previous key -# responses have passed before deploying it. -# -# You can calculate a fingerprint from a given TLS listener via: -# openssl s_client -connect $host:$port < /dev/null 2> /dev/null | -# openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '=' -# or by checking matrix.org/federationtester/api/report?server_name=$host -# -#tls_fingerprints: [{"sha256": ""}] - ## Federation ## diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py index 961dc59f1134..d4314a054c44 100644 --- a/scripts-dev/convert_server_keys.py +++ b/scripts-dev/convert_server_keys.py @@ -1,4 +1,3 @@ -import hashlib import json import sys import time @@ -54,15 +53,9 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate): "server_name": server_name, "verify_keys": {key_id: {"key": key} for key_id, key in keys.items()}, "valid_until_ts": valid_until, - "tls_fingerprints": [fingerprint(certificate)], } -def fingerprint(certificate): - finger = hashlib.sha256(certificate) - return {"sha256": encode_base64(finger.digest())} - - def rows_v2(server, json): valid_until = json["valid_until_ts"] key_json = encode_canonical_json(json) diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 7df4e4c3e6b4..26f1150ca506 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -16,11 +16,8 @@ import os import warnings from datetime import datetime -from hashlib import sha256 from typing import List, Optional, Pattern -from unpaddedbase64 import encode_base64 - from OpenSSL import SSL, crypto from twisted.internet._sslverify import Certificate, trustRootFromCertificates @@ -83,13 +80,6 @@ def read_config(self, config: dict, config_dir_path: str, **kwargs): "configured." ) - self._original_tls_fingerprints = config.get("tls_fingerprints", []) - - if self._original_tls_fingerprints is None: - self._original_tls_fingerprints = [] - - self.tls_fingerprints = list(self._original_tls_fingerprints) - # Whether to verify certificates on outbound federation traffic self.federation_verify_certificates = config.get( "federation_verify_certificates", True @@ -248,19 +238,6 @@ def read_certificate_from_disk(self, require_cert_and_key: bool): e, ) - self.tls_fingerprints = list(self._original_tls_fingerprints) - - if self.tls_certificate: - # Check that our own certificate is included in the list of fingerprints - # and include it if it is not. - x509_certificate_bytes = crypto.dump_certificate( - crypto.FILETYPE_ASN1, self.tls_certificate - ) - sha256_fingerprint = encode_base64(sha256(x509_certificate_bytes).digest()) - sha256_fingerprints = {f["sha256"] for f in self.tls_fingerprints} - if sha256_fingerprint not in sha256_fingerprints: - self.tls_fingerprints.append({"sha256": sha256_fingerprint}) - def generate_config_section( self, config_dir_path, @@ -443,33 +420,6 @@ def generate_config_section( # If unspecified, we will use CONFDIR/client.key. # account_key_file: %(default_acme_account_file)s - - # List of allowed TLS fingerprints for this server to publish along - # with the signing keys for this server. Other matrix servers that - # make HTTPS requests to this server will check that the TLS - # certificates returned by this server match one of the fingerprints. - # - # Synapse automatically adds the fingerprint of its own certificate - # to the list. So if federation traffic is handled directly by synapse - # then no modification to the list is required. - # - # If synapse is run behind a load balancer that handles the TLS then it - # will be necessary to add the fingerprints of the certificates used by - # the loadbalancers to this list if they are different to the one - # synapse is using. - # - # Homeservers are permitted to cache the list of TLS fingerprints - # returned in the key responses up to the "valid_until_ts" returned in - # key. It may be necessary to publish the fingerprints of a new - # certificate and wait until the "valid_until_ts" of the previous key - # responses have passed before deploying it. - # - # You can calculate a fingerprint from a given TLS listener via: - # openssl s_client -connect $host:$port < /dev/null 2> /dev/null | - # openssl x509 -outform DER | openssl sha256 -binary | base64 | tr -d '=' - # or by checking matrix.org/federationtester/api/report?server_name=$host - # - #tls_fingerprints: [{"sha256": ""}] """ # Lowercase the string representation of boolean values % { diff --git a/synapse/rest/key/v2/local_key_resource.py b/synapse/rest/key/v2/local_key_resource.py index e8dbe240d81e..a5fcd15e3af4 100644 --- a/synapse/rest/key/v2/local_key_resource.py +++ b/synapse/rest/key/v2/local_key_resource.py @@ -48,11 +48,6 @@ class LocalKey(Resource): "key": # base64 encoded NACL verification key. } }, - "tls_fingerprints": [ # Fingerprints of the TLS certs this server uses. - { - "sha256": # base64 encoded sha256 fingerprint of the X509 cert - }, - ], "signatures": { "this.server.example.com": { "algorithm:version": # NACL signature for this server @@ -89,14 +84,11 @@ def response_json_object(self): "expired_ts": key.expired_ts, } - tls_fingerprints = self.config.tls_fingerprints - json_object = { "valid_until_ts": self.valid_until_ts, "server_name": self.config.server_name, "verify_keys": verify_keys, "old_verify_keys": old_verify_keys, - "tls_fingerprints": tls_fingerprints, } for key in self.config.signing_key: json_object = sign_json(json_object, self.config.server_name, key) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index f648678b0903..aba1734a55ac 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -73,9 +73,6 @@ class RemoteKey(DirectServeJsonResource): "expired_ts": 0, # when the key stop being used. } } - "tls_fingerprints": [ - { "sha256": # fingerprint } - ] "signatures": { "remote.server.example.com": {...} "this.server.example.com": {...} From 22a8838f626834c4ffc09761f4c5d65215cfc885 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20Migu=C3=A9ns?= Date: Mon, 24 May 2021 21:23:54 +0200 Subject: [PATCH 175/222] Fix docker image to not log at `/homeserver.log` (#10045) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #9970 Signed-off-by: Sergio Miguéns Iglesias lonyelon@lony.xyz --- changelog.d/10045.docker | 1 + docker/conf/log.config | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10045.docker diff --git a/changelog.d/10045.docker b/changelog.d/10045.docker new file mode 100644 index 000000000000..70b65b0a01a4 --- /dev/null +++ b/changelog.d/10045.docker @@ -0,0 +1 @@ +Fix bug introduced in Synapse 1.33.0 which caused a `Permission denied: '/homeserver.log'` error when starting Synapse with the generated log configuration. Contributed by Sergio Miguéns Iglesias. diff --git a/docker/conf/log.config b/docker/conf/log.config index 34572bc0f36a..a99462692628 100644 --- a/docker/conf/log.config +++ b/docker/conf/log.config @@ -9,10 +9,11 @@ formatters: {% endif %} handlers: +{% if LOG_FILE_PATH %} file: class: logging.handlers.TimedRotatingFileHandler formatter: precise - filename: {{ LOG_FILE_PATH or "homeserver.log" }} + filename: {{ LOG_FILE_PATH }} when: "midnight" backupCount: 6 # Does not include the current log file. encoding: utf8 @@ -29,6 +30,7 @@ handlers: # be written to disk. capacity: 10 flushLevel: 30 # Flush for WARNING logs as well +{% endif %} console: class: logging.StreamHandler From 7adcb20fc02d614b4a2b03b128b279f25633e2bd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 24 May 2021 15:32:01 -0400 Subject: [PATCH 176/222] Add missing type hints to synapse.util (#9982) --- changelog.d/9982.misc | 1 + mypy.ini | 9 +++++++++ synapse/config/saml2.py | 8 +++++++- synapse/storage/databases/main/keys.py | 2 +- synapse/util/hash.py | 10 +++++----- synapse/util/iterutils.py | 11 ++++------- synapse/util/module_loader.py | 9 +++++---- synapse/util/msisdn.py | 10 +++++----- tests/util/test_itertools.py | 4 ++-- 9 files changed, 39 insertions(+), 25 deletions(-) create mode 100644 changelog.d/9982.misc diff --git a/changelog.d/9982.misc b/changelog.d/9982.misc new file mode 100644 index 000000000000..f3821f61a32a --- /dev/null +++ b/changelog.d/9982.misc @@ -0,0 +1 @@ +Add missing type hints to `synapse.util` module. diff --git a/mypy.ini b/mypy.ini index 1d1d1ea0f25f..062872020e4c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -71,8 +71,13 @@ files = synapse/types.py, synapse/util/async_helpers.py, synapse/util/caches, + synapse/util/daemonize.py, + synapse/util/hash.py, + synapse/util/iterutils.py, synapse/util/metrics.py, synapse/util/macaroons.py, + synapse/util/module_loader.py, + synapse/util/msisdn.py, synapse/util/stringutils.py, synapse/visibility.py, tests/replication, @@ -80,6 +85,7 @@ files = tests/handlers/test_password_providers.py, tests/rest/client/v1/test_login.py, tests/rest/client/v2_alpha/test_auth.py, + tests/util/test_itertools.py, tests/util/test_stream_change_cache.py [mypy-pymacaroons.*] @@ -175,5 +181,8 @@ ignore_missing_imports = True [mypy-pympler.*] ignore_missing_imports = True +[mypy-phonenumbers.*] +ignore_missing_imports = True + [mypy-ijson.*] ignore_missing_imports = True diff --git a/synapse/config/saml2.py b/synapse/config/saml2.py index 3d1218c8d14b..05e983625dc8 100644 --- a/synapse/config/saml2.py +++ b/synapse/config/saml2.py @@ -164,7 +164,13 @@ def read_config(self, config, **kwargs): config_path = saml2_config.get("config_path", None) if config_path is not None: mod = load_python_module(config_path) - _dict_merge(merge_dict=mod.CONFIG, into_dict=saml2_config_dict) + config = getattr(mod, "CONFIG", None) + if config is None: + raise ConfigError( + "Config path specified by saml2_config.config_path does not " + "have a CONFIG property." + ) + _dict_merge(merge_dict=config, into_dict=saml2_config_dict) import saml2.config diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 0e8680783404..6990f3ed1d27 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -55,7 +55,7 @@ async def get_server_verify_keys( """ keys = {} - def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str]]) -> None: + def _get_keys(txn: Cursor, batch: Tuple[Tuple[str, str], ...]) -> None: """Processes a batch of keys to fetch, and adds the result to `keys`.""" # batch_iter always returns tuples so it's safe to do len(batch) diff --git a/synapse/util/hash.py b/synapse/util/hash.py index ba676e176240..7625ca8c2c63 100644 --- a/synapse/util/hash.py +++ b/synapse/util/hash.py @@ -17,15 +17,15 @@ import unpaddedbase64 -def sha256_and_url_safe_base64(input_text): +def sha256_and_url_safe_base64(input_text: str) -> str: """SHA256 hash an input string, encode the digest as url-safe base64, and return - :param input_text: string to hash - :type input_text: str + Args: + input_text: string to hash - :returns a sha256 hashed and url-safe base64 encoded digest - :rtype: str + returns: + A sha256 hashed and url-safe base64 encoded digest """ digest = hashlib.sha256(input_text.encode()).digest() return unpaddedbase64.encode_base64(digest, urlsafe=True) diff --git a/synapse/util/iterutils.py b/synapse/util/iterutils.py index abfdc2983261..886afa9d1931 100644 --- a/synapse/util/iterutils.py +++ b/synapse/util/iterutils.py @@ -30,12 +30,12 @@ T = TypeVar("T") -def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]: +def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T, ...]]: """batch an iterable up into tuples with a maximum size Args: - iterable (iterable): the iterable to slice - size (int): the maximum batch size + iterable: the iterable to slice + size: the maximum batch size Returns: an iterator over the chunks @@ -46,10 +46,7 @@ def batch_iter(iterable: Iterable[T], size: int) -> Iterator[Tuple[T]]: return iter(lambda: tuple(islice(sourceiter, size)), ()) -ISeq = TypeVar("ISeq", bound=Sequence, covariant=True) - - -def chunk_seq(iseq: ISeq, maxlen: int) -> Iterable[ISeq]: +def chunk_seq(iseq: Sequence[T], maxlen: int) -> Iterable[Sequence[T]]: """Split the given sequence into chunks of the given size The last chunk may be shorter than the given size. diff --git a/synapse/util/module_loader.py b/synapse/util/module_loader.py index 8acbe276e4bc..cbfbd097f9a1 100644 --- a/synapse/util/module_loader.py +++ b/synapse/util/module_loader.py @@ -15,6 +15,7 @@ import importlib import importlib.util import itertools +from types import ModuleType from typing import Any, Iterable, Tuple, Type import jsonschema @@ -44,8 +45,8 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: # We need to import the module, and then pick the class out of # that, so we split based on the last dot. - module, clz = modulename.rsplit(".", 1) - module = importlib.import_module(module) + module_name, clz = modulename.rsplit(".", 1) + module = importlib.import_module(module_name) provider_class = getattr(module, clz) # Load the module config. If None, pass an empty dictionary instead @@ -69,11 +70,11 @@ def load_module(provider: dict, config_path: Iterable[str]) -> Tuple[Type, Any]: return provider_class, provider_config -def load_python_module(location: str): +def load_python_module(location: str) -> ModuleType: """Load a python module, and return a reference to its global namespace Args: - location (str): path to the module + location: path to the module Returns: python module object diff --git a/synapse/util/msisdn.py b/synapse/util/msisdn.py index bbbdebf2648e..1046224f1598 100644 --- a/synapse/util/msisdn.py +++ b/synapse/util/msisdn.py @@ -17,19 +17,19 @@ from synapse.api.errors import SynapseError -def phone_number_to_msisdn(country, number): +def phone_number_to_msisdn(country: str, number: str) -> str: """ Takes an ISO-3166-1 2 letter country code and phone number and returns an msisdn representing the canonical version of that phone number. Args: - country (str): ISO-3166-1 2 letter country code - number (str): Phone number in a national or international format + country: ISO-3166-1 2 letter country code + number: Phone number in a national or international format Returns: - (str) The canonical form of the phone number, as an msisdn + The canonical form of the phone number, as an msisdn Raises: - SynapseError if the number could not be parsed. + SynapseError if the number could not be parsed. """ try: phoneNumber = phonenumbers.parse(number, country) diff --git a/tests/util/test_itertools.py b/tests/util/test_itertools.py index 1bd0b45d940a..e712eb42ea4b 100644 --- a/tests/util/test_itertools.py +++ b/tests/util/test_itertools.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List +from typing import Dict, Iterable, List, Sequence from synapse.util.iterutils import chunk_seq, sorted_topologically @@ -44,7 +44,7 @@ def test_uneven_parts(self): ) def test_empty_input(self): - parts = chunk_seq([], 5) + parts = chunk_seq([], 5) # type: Iterable[Sequence] self.assertEqual( list(parts), From 7d90d6ce9b6733bdebd4c5aca0c785e28e265e13 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 24 May 2021 15:32:45 -0400 Subject: [PATCH 177/222] Run complement with Synapse workers manually. (#10039) Adds an option to complement.sh to run Synapse in worker mode (instead of the default monolith mode). --- changelog.d/10039.misc | 1 + docker/configure_workers_and_start.py | 8 ++++---- scripts-dev/complement.sh | 25 ++++++++++++++++++++++--- 3 files changed, 27 insertions(+), 7 deletions(-) create mode 100644 changelog.d/10039.misc diff --git a/changelog.d/10039.misc b/changelog.d/10039.misc new file mode 100644 index 000000000000..8855f141d903 --- /dev/null +++ b/changelog.d/10039.misc @@ -0,0 +1 @@ +Fix running complement tests with Synapse workers. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 4be6afc65d9a..1d22a4d5719d 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -184,18 +184,18 @@ """ NGINX_LOCATION_CONFIG_BLOCK = """ - location ~* {endpoint} { + location ~* {endpoint} {{ proxy_pass {upstream}; proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header Host $host; - } + }} """ NGINX_UPSTREAM_CONFIG_BLOCK = """ -upstream {upstream_worker_type} { +upstream {upstream_worker_type} {{ {body} -} +}} """ diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 1612ab522c33..004396467378 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -10,6 +10,9 @@ # checkout by setting the COMPLEMENT_DIR environment variable to the # filepath of a local Complement checkout. # +# By default Synapse is run in monolith mode. This can be overridden by +# setting the WORKERS environment variable. +# # A regular expression of test method names can be supplied as the first # argument to the script. Complement will then only run those tests. If # no regex is supplied, all tests are run. For example; @@ -32,10 +35,26 @@ if [[ -z "$COMPLEMENT_DIR" ]]; then echo "Checkout available at 'complement-master'" fi +# If we're using workers, modify the docker files slightly. +if [[ -n "$WORKERS" ]]; then + BASE_IMAGE=matrixdotorg/synapse-workers + BASE_DOCKERFILE=docker/Dockerfile-workers + export COMPLEMENT_BASE_IMAGE=complement-synapse-workers + COMPLEMENT_DOCKERFILE=SynapseWorkers.Dockerfile + # And provide some more configuration to complement. + export COMPLEMENT_CA=true + export COMPLEMENT_VERSION_CHECK_ITERATIONS=500 +else + BASE_IMAGE=matrixdotorg/synapse + BASE_DOCKERFILE=docker/Dockerfile + export COMPLEMENT_BASE_IMAGE=complement-synapse + COMPLEMENT_DOCKERFILE=Synapse.Dockerfile +fi + # Build the base Synapse image from the local checkout -docker build -t matrixdotorg/synapse -f docker/Dockerfile . +docker build -t $BASE_IMAGE -f "$BASE_DOCKERFILE" . # Build the Synapse monolith image from Complement, based on the above image we just built -docker build -t complement-synapse -f "$COMPLEMENT_DIR/dockerfiles/Synapse.Dockerfile" "$COMPLEMENT_DIR/dockerfiles" +docker build -t $COMPLEMENT_BASE_IMAGE -f "$COMPLEMENT_DIR/dockerfiles/$COMPLEMENT_DOCKERFILE" "$COMPLEMENT_DIR/dockerfiles" cd "$COMPLEMENT_DIR" @@ -46,4 +65,4 @@ if [[ -n "$1" ]]; then fi # Run the tests! -COMPLEMENT_BASE_IMAGE=complement-synapse go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests +go test -v -tags synapse_blacklist,msc2946,msc3083 -count=1 $EXTRA_COMPLEMENT_ARGS ./tests From 557635f69ab734142ae5889e215ea512f7678f21 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 May 2021 11:00:13 +0100 Subject: [PATCH 178/222] 1.35.0rc1 --- CHANGES.md | 64 +++++++++++++++++++++++++++++++++++++++ changelog.d/10001.misc | 1 - changelog.d/10002.bugfix | 1 - changelog.d/10007.feature | 1 - changelog.d/10011.feature | 1 - changelog.d/10014.bugfix | 1 - changelog.d/10016.doc | 1 - changelog.d/10017.misc | 1 - changelog.d/10018.misc | 1 - changelog.d/10029.bugfix | 1 - changelog.d/10033.bugfix | 1 - changelog.d/10036.misc | 1 - changelog.d/10038.feature | 1 - changelog.d/10039.misc | 1 - changelog.d/10043.doc | 1 - changelog.d/10045.docker | 1 - changelog.d/10050.misc | 1 - changelog.d/9280.removal | 1 - changelog.d/9803.doc | 1 - changelog.d/9823.misc | 1 - changelog.d/9922.feature | 1 - changelog.d/9958.feature | 1 - changelog.d/9974.misc | 1 - changelog.d/9975.misc | 1 - changelog.d/9977.misc | 1 - changelog.d/9978.feature | 1 - changelog.d/9980.doc | 1 - changelog.d/9981.misc | 1 - changelog.d/9982.misc | 1 - changelog.d/9984.misc | 1 - changelog.d/9985.misc | 1 - changelog.d/9986.misc | 1 - changelog.d/9987.misc | 1 - changelog.d/9988.doc | 1 - changelog.d/9989.doc | 1 - changelog.d/9991.bugfix | 1 - changelog.d/9993.misc | 1 - changelog.d/9995.bugfix | 1 - synapse/__init__.py | 2 +- 39 files changed, 65 insertions(+), 38 deletions(-) delete mode 100644 changelog.d/10001.misc delete mode 100644 changelog.d/10002.bugfix delete mode 100644 changelog.d/10007.feature delete mode 100644 changelog.d/10011.feature delete mode 100644 changelog.d/10014.bugfix delete mode 100644 changelog.d/10016.doc delete mode 100644 changelog.d/10017.misc delete mode 100644 changelog.d/10018.misc delete mode 100644 changelog.d/10029.bugfix delete mode 100644 changelog.d/10033.bugfix delete mode 100644 changelog.d/10036.misc delete mode 100644 changelog.d/10038.feature delete mode 100644 changelog.d/10039.misc delete mode 100644 changelog.d/10043.doc delete mode 100644 changelog.d/10045.docker delete mode 100644 changelog.d/10050.misc delete mode 100644 changelog.d/9280.removal delete mode 100644 changelog.d/9803.doc delete mode 100644 changelog.d/9823.misc delete mode 100644 changelog.d/9922.feature delete mode 100644 changelog.d/9958.feature delete mode 100644 changelog.d/9974.misc delete mode 100644 changelog.d/9975.misc delete mode 100644 changelog.d/9977.misc delete mode 100644 changelog.d/9978.feature delete mode 100644 changelog.d/9980.doc delete mode 100644 changelog.d/9981.misc delete mode 100644 changelog.d/9982.misc delete mode 100644 changelog.d/9984.misc delete mode 100644 changelog.d/9985.misc delete mode 100644 changelog.d/9986.misc delete mode 100644 changelog.d/9987.misc delete mode 100644 changelog.d/9988.doc delete mode 100644 changelog.d/9989.doc delete mode 100644 changelog.d/9991.bugfix delete mode 100644 changelog.d/9993.misc delete mode 100644 changelog.d/9995.bugfix diff --git a/CHANGES.md b/CHANGES.md index 709436da978c..0e451f983c95 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,67 @@ +Synapse 1.35.0rc1 (2021-05-25) +============================== + +Features +-------- + +- Add experimental support to allow a user who could join a restricted room to view it in the spaces summary. ([\#9922](https://github.com/matrix-org/synapse/issues/9922), [\#10007](https://github.com/matrix-org/synapse/issues/10007), [\#10038](https://github.com/matrix-org/synapse/issues/10038)) +- Reduce memory usage when joining very large rooms over federation. ([\#9958](https://github.com/matrix-org/synapse/issues/9958)) +- Add a configuration option which allows enabling opentracing by user id. ([\#9978](https://github.com/matrix-org/synapse/issues/9978)) +- Enable experimental support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) (spaces summary API) and [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) (restricted join rules) by default. ([\#10011](https://github.com/matrix-org/synapse/issues/10011)) + + +Bugfixes +-------- + +- Fix a bug introduced in v1.26.0 which meant that `synapse_port_db` would not correctly initialise some postgres sequences, requiring manual updates afterwards. ([\#9991](https://github.com/matrix-org/synapse/issues/9991)) +- Fix `synctl`'s `--no-daemonize` parameter to work correctly with worker processes. ([\#9995](https://github.com/matrix-org/synapse/issues/9995)) +- Fix a validation bug introduced in v1.34.0 in the ordering of spaces in the space summary API. ([\#10002](https://github.com/matrix-org/synapse/issues/10002)) +- Fixed deletion of new presence stream states from database. ([\#10014](https://github.com/matrix-org/synapse/issues/10014), [\#10033](https://github.com/matrix-org/synapse/issues/10033)) +- Fixed a bug with very high resolution image uploads throwing internal server errors. ([\#10029](https://github.com/matrix-org/synapse/issues/10029)) + + +Updates to the Docker image +--------------------------- + +- Fix bug introduced in Synapse 1.33.0 which caused a `Permission denied: '/homeserver.log'` error when starting Synapse with the generated log configuration. Contributed by Sergio Miguéns Iglesias. ([\#10045](https://github.com/matrix-org/synapse/issues/10045)) + + +Improved Documentation +---------------------- + +- Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files. ([\#9803](https://github.com/matrix-org/synapse/issues/9803)) +- Clarify documentation around SSO mapping providers generating unique IDs and localparts. ([\#9980](https://github.com/matrix-org/synapse/issues/9980)) +- Updates to the PostgreSQL documentation (`postgres.md`). ([\#9988](https://github.com/matrix-org/synapse/issues/9988), [\#9989](https://github.com/matrix-org/synapse/issues/9989)) +- Fix broken link in user directory documentation. Contributed by @junquera. ([\#10016](https://github.com/matrix-org/synapse/issues/10016)) +- Add missing room state entry to the table of contents of room admin API. ([\#10043](https://github.com/matrix-org/synapse/issues/10043)) + + +Deprecations and Removals +------------------------- + +- Removed support for the deprecated `tls_fingerprints` configuration setting. Contributed by Jerin J Titus. ([\#9280](https://github.com/matrix-org/synapse/issues/9280)) + + +Internal Changes +---------------- + +- Allow sending full presence to users via workers other than the one that called `ModuleApi.send_local_online_presence_to`. ([\#9823](https://github.com/matrix-org/synapse/issues/9823)) +- Update comments in the space summary handler. ([\#9974](https://github.com/matrix-org/synapse/issues/9974)) +- Minor enhancements to the `@cachedList` descriptor. ([\#9975](https://github.com/matrix-org/synapse/issues/9975)) +- Split multipart email sending into a dedicated handler. ([\#9977](https://github.com/matrix-org/synapse/issues/9977)) +- Run `black` on files in the `scripts` directory. ([\#9981](https://github.com/matrix-org/synapse/issues/9981)) +- Add missing type hints to `synapse.util` module. ([\#9982](https://github.com/matrix-org/synapse/issues/9982)) +- Simplify a few helper functions. ([\#9984](https://github.com/matrix-org/synapse/issues/9984), [\#9985](https://github.com/matrix-org/synapse/issues/9985), [\#9986](https://github.com/matrix-org/synapse/issues/9986)) +- Remove unnecessary property from SQLBaseStore. ([\#9987](https://github.com/matrix-org/synapse/issues/9987)) +- Remove `keylen` param on `LruCache`. ([\#9993](https://github.com/matrix-org/synapse/issues/9993)) +- Update the Grafana dashboard in `contrib/`. ([\#10001](https://github.com/matrix-org/synapse/issues/10001)) +- Add a batching queue implementation. ([\#10017](https://github.com/matrix-org/synapse/issues/10017)) +- Reduce memory usage when verifying signatures on large numbers of events at once. ([\#10018](https://github.com/matrix-org/synapse/issues/10018)) +- Properly invalidate caches for destination retry timings every (instead of expiring entries every 5 minutes). ([\#10036](https://github.com/matrix-org/synapse/issues/10036)) +- Fix running complement tests with Synapse workers. ([\#10039](https://github.com/matrix-org/synapse/issues/10039)) +- Fix typo in `get_state_ids_for_event` docstring where the return type was incorrect. ([\#10050](https://github.com/matrix-org/synapse/issues/10050)) + + Synapse 1.34.0 (2021-05-17) =========================== diff --git a/changelog.d/10001.misc b/changelog.d/10001.misc deleted file mode 100644 index 8740cc478dda..000000000000 --- a/changelog.d/10001.misc +++ /dev/null @@ -1 +0,0 @@ -Update the Grafana dashboard in `contrib/`. diff --git a/changelog.d/10002.bugfix b/changelog.d/10002.bugfix deleted file mode 100644 index 1fabdad22eee..000000000000 --- a/changelog.d/10002.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a validation bug introduced in v1.34.0 in the ordering of spaces in the space summary API. diff --git a/changelog.d/10007.feature b/changelog.d/10007.feature deleted file mode 100644 index 2c655350c04f..000000000000 --- a/changelog.d/10007.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support to allow a user who could join a restricted room to view it in the spaces summary. diff --git a/changelog.d/10011.feature b/changelog.d/10011.feature deleted file mode 100644 index 409140fb1367..000000000000 --- a/changelog.d/10011.feature +++ /dev/null @@ -1 +0,0 @@ -Enable experimental support for [MSC2946](https://github.com/matrix-org/matrix-doc/pull/2946) (spaces summary API) and [MSC3083](https://github.com/matrix-org/matrix-doc/pull/3083) (restricted join rules) by default. diff --git a/changelog.d/10014.bugfix b/changelog.d/10014.bugfix deleted file mode 100644 index 7cf3603f94df..000000000000 --- a/changelog.d/10014.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed deletion of new presence stream states from database. diff --git a/changelog.d/10016.doc b/changelog.d/10016.doc deleted file mode 100644 index f9b615d7d7ab..000000000000 --- a/changelog.d/10016.doc +++ /dev/null @@ -1 +0,0 @@ -Fix broken link in user directory documentation. Contributed by @junquera. diff --git a/changelog.d/10017.misc b/changelog.d/10017.misc deleted file mode 100644 index 4777b7fb57b1..000000000000 --- a/changelog.d/10017.misc +++ /dev/null @@ -1 +0,0 @@ -Add a batching queue implementation. diff --git a/changelog.d/10018.misc b/changelog.d/10018.misc deleted file mode 100644 index eaf9f64867a6..000000000000 --- a/changelog.d/10018.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce memory usage when verifying signatures on large numbers of events at once. diff --git a/changelog.d/10029.bugfix b/changelog.d/10029.bugfix deleted file mode 100644 index c214cbdaecc1..000000000000 --- a/changelog.d/10029.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed a bug with very high resolution image uploads throwing internal server errors. \ No newline at end of file diff --git a/changelog.d/10033.bugfix b/changelog.d/10033.bugfix deleted file mode 100644 index 587d839b8c9d..000000000000 --- a/changelog.d/10033.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed deletion of new presence stream states from database. \ No newline at end of file diff --git a/changelog.d/10036.misc b/changelog.d/10036.misc deleted file mode 100644 index d2cf1e54734e..000000000000 --- a/changelog.d/10036.misc +++ /dev/null @@ -1 +0,0 @@ -Properly invalidate caches for destination retry timings every (instead of expiring entries every 5 minutes). diff --git a/changelog.d/10038.feature b/changelog.d/10038.feature deleted file mode 100644 index 2c655350c04f..000000000000 --- a/changelog.d/10038.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support to allow a user who could join a restricted room to view it in the spaces summary. diff --git a/changelog.d/10039.misc b/changelog.d/10039.misc deleted file mode 100644 index 8855f141d903..000000000000 --- a/changelog.d/10039.misc +++ /dev/null @@ -1 +0,0 @@ -Fix running complement tests with Synapse workers. diff --git a/changelog.d/10043.doc b/changelog.d/10043.doc deleted file mode 100644 index a574ec0bf06c..000000000000 --- a/changelog.d/10043.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing room state entry to the table of contents of room admin API. \ No newline at end of file diff --git a/changelog.d/10045.docker b/changelog.d/10045.docker deleted file mode 100644 index 70b65b0a01a4..000000000000 --- a/changelog.d/10045.docker +++ /dev/null @@ -1 +0,0 @@ -Fix bug introduced in Synapse 1.33.0 which caused a `Permission denied: '/homeserver.log'` error when starting Synapse with the generated log configuration. Contributed by Sergio Miguéns Iglesias. diff --git a/changelog.d/10050.misc b/changelog.d/10050.misc deleted file mode 100644 index 2cac953cca1e..000000000000 --- a/changelog.d/10050.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in `get_state_ids_for_event` docstring where the return type was incorrect. diff --git a/changelog.d/9280.removal b/changelog.d/9280.removal deleted file mode 100644 index c2ed3d308dda..000000000000 --- a/changelog.d/9280.removal +++ /dev/null @@ -1 +0,0 @@ -Removed support for the deprecated `tls_fingerprints` configuration setting. Contributed by Jerin J Titus. \ No newline at end of file diff --git a/changelog.d/9803.doc b/changelog.d/9803.doc deleted file mode 100644 index 16c7ba703394..000000000000 --- a/changelog.d/9803.doc +++ /dev/null @@ -1 +0,0 @@ -Add hardened systemd files as proposed in [#9760](https://github.com/matrix-org/synapse/issues/9760) and added them to `contrib/`. Change the docs to reflect the presence of these files. diff --git a/changelog.d/9823.misc b/changelog.d/9823.misc deleted file mode 100644 index bf924ab68cc4..000000000000 --- a/changelog.d/9823.misc +++ /dev/null @@ -1 +0,0 @@ -Allow sending full presence to users via workers other than the one that called `ModuleApi.send_local_online_presence_to`. \ No newline at end of file diff --git a/changelog.d/9922.feature b/changelog.d/9922.feature deleted file mode 100644 index 2c655350c04f..000000000000 --- a/changelog.d/9922.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support to allow a user who could join a restricted room to view it in the spaces summary. diff --git a/changelog.d/9958.feature b/changelog.d/9958.feature deleted file mode 100644 index d86ba36519f4..000000000000 --- a/changelog.d/9958.feature +++ /dev/null @@ -1 +0,0 @@ -Reduce memory usage when joining very large rooms over federation. diff --git a/changelog.d/9974.misc b/changelog.d/9974.misc deleted file mode 100644 index 9ddee2618e21..000000000000 --- a/changelog.d/9974.misc +++ /dev/null @@ -1 +0,0 @@ -Update comments in the space summary handler. diff --git a/changelog.d/9975.misc b/changelog.d/9975.misc deleted file mode 100644 index 28b1e40c2b99..000000000000 --- a/changelog.d/9975.misc +++ /dev/null @@ -1 +0,0 @@ -Minor enhancements to the `@cachedList` descriptor. diff --git a/changelog.d/9977.misc b/changelog.d/9977.misc deleted file mode 100644 index 093dffc6beac..000000000000 --- a/changelog.d/9977.misc +++ /dev/null @@ -1 +0,0 @@ -Split multipart email sending into a dedicated handler. diff --git a/changelog.d/9978.feature b/changelog.d/9978.feature deleted file mode 100644 index 851adb9f6ea8..000000000000 --- a/changelog.d/9978.feature +++ /dev/null @@ -1 +0,0 @@ -Add a configuration option which allows enabling opentracing by user id. diff --git a/changelog.d/9980.doc b/changelog.d/9980.doc deleted file mode 100644 index d30ed0601da6..000000000000 --- a/changelog.d/9980.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify documentation around SSO mapping providers generating unique IDs and localparts. diff --git a/changelog.d/9981.misc b/changelog.d/9981.misc deleted file mode 100644 index 677c9b4cbdc1..000000000000 --- a/changelog.d/9981.misc +++ /dev/null @@ -1 +0,0 @@ -Run `black` on files in the `scripts` directory. diff --git a/changelog.d/9982.misc b/changelog.d/9982.misc deleted file mode 100644 index f3821f61a32a..000000000000 --- a/changelog.d/9982.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints to `synapse.util` module. diff --git a/changelog.d/9984.misc b/changelog.d/9984.misc deleted file mode 100644 index 97bd747f2653..000000000000 --- a/changelog.d/9984.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify a few helper functions. diff --git a/changelog.d/9985.misc b/changelog.d/9985.misc deleted file mode 100644 index 97bd747f2653..000000000000 --- a/changelog.d/9985.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify a few helper functions. diff --git a/changelog.d/9986.misc b/changelog.d/9986.misc deleted file mode 100644 index 97bd747f2653..000000000000 --- a/changelog.d/9986.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify a few helper functions. diff --git a/changelog.d/9987.misc b/changelog.d/9987.misc deleted file mode 100644 index 02c088e3e6c2..000000000000 --- a/changelog.d/9987.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unnecessary property from SQLBaseStore. diff --git a/changelog.d/9988.doc b/changelog.d/9988.doc deleted file mode 100644 index 25338c44c312..000000000000 --- a/changelog.d/9988.doc +++ /dev/null @@ -1 +0,0 @@ -Updates to the PostgreSQL documentation (`postgres.md`). diff --git a/changelog.d/9989.doc b/changelog.d/9989.doc deleted file mode 100644 index 25338c44c312..000000000000 --- a/changelog.d/9989.doc +++ /dev/null @@ -1 +0,0 @@ -Updates to the PostgreSQL documentation (`postgres.md`). diff --git a/changelog.d/9991.bugfix b/changelog.d/9991.bugfix deleted file mode 100644 index 665ff04dea84..000000000000 --- a/changelog.d/9991.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.26.0 which meant that `synapse_port_db` would not correctly initialise some postgres sequences, requiring manual updates afterwards. diff --git a/changelog.d/9993.misc b/changelog.d/9993.misc deleted file mode 100644 index 0dd92440717d..000000000000 --- a/changelog.d/9993.misc +++ /dev/null @@ -1 +0,0 @@ -Remove `keylen` param on `LruCache`. diff --git a/changelog.d/9995.bugfix b/changelog.d/9995.bugfix deleted file mode 100644 index 3b63e7c42aed..000000000000 --- a/changelog.d/9995.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `synctl`'s `--no-daemonize` parameter to work correctly with worker processes. diff --git a/synapse/__init__.py b/synapse/__init__.py index 7498a6016f0b..e60e9db71ef8 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.34.0" +__version__ = "1.35.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 3e1beb75e65f48acb778a64da66a97b01f48bdd3 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 26 May 2021 04:55:30 -0500 Subject: [PATCH 179/222] Update CAPTCHA documentation to mention turning off verify origin feature (#10046) * Update CAPTCHA documentation to mention turning off verify origin Signed-off-by: Aaron Raimist --- changelog.d/10046.doc | 1 + docs/CAPTCHA_SETUP.md | 50 ++++++++++++++++++++++++------------------- 2 files changed, 29 insertions(+), 22 deletions(-) create mode 100644 changelog.d/10046.doc diff --git a/changelog.d/10046.doc b/changelog.d/10046.doc new file mode 100644 index 000000000000..995960163b0c --- /dev/null +++ b/changelog.d/10046.doc @@ -0,0 +1 @@ +Update CAPTCHA documentation to mention turning off the verify origin feature. Contributed by @aaronraimist. diff --git a/docs/CAPTCHA_SETUP.md b/docs/CAPTCHA_SETUP.md index 331e5d059a0e..fabdd7b7265e 100644 --- a/docs/CAPTCHA_SETUP.md +++ b/docs/CAPTCHA_SETUP.md @@ -1,31 +1,37 @@ # Overview -Captcha can be enabled for this home server. This file explains how to do that. -The captcha mechanism used is Google's ReCaptcha. This requires API keys from Google. - -## Getting keys - -Requires a site/secret key pair from: - - - -Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option - -## Setting ReCaptcha Keys - -The keys are a config option on the home server config. If they are not -visible, you can generate them via `--generate-config`. Set the following value: - +A captcha can be enabled on your homeserver to help prevent bots from registering +accounts. Synapse currently uses Google's reCAPTCHA service which requires API keys +from Google. + +## Getting API keys + +1. Create a new site at +1. Set the label to anything you want +1. Set the type to reCAPTCHA v2 using the "I'm not a robot" Checkbox option. +This is the only type of captcha that works with Synapse. +1. Add the public hostname for your server, as set in `public_baseurl` +in `homeserver.yaml`, to the list of authorized domains. If you have not set +`public_baseurl`, use `server_name`. +1. Agree to the terms of service and submit. +1. Copy your site key and secret key and add them to your `homeserver.yaml` +configuration file + ``` recaptcha_public_key: YOUR_SITE_KEY recaptcha_private_key: YOUR_SECRET_KEY - -In addition, you MUST enable captchas via: - + ``` +1. Enable the CAPTCHA for new registrations + ``` enable_registration_captcha: true + ``` +1. Go to the settings page for the CAPTCHA you just created +1. Uncheck the "Verify the origin of reCAPTCHA solutions" checkbox so that the +captcha can be displayed in any client. If you do not disable this option then you +must specify the domains of every client that is allowed to display the CAPTCHA. ## Configuring IP used for auth -The ReCaptcha API requires that the IP address of the user who solved the -captcha is sent. If the client is connecting through a proxy or load balancer, +The reCAPTCHA API requires that the IP address of the user who solved the +CAPTCHA is sent. If the client is connecting through a proxy or load balancer, it may be required to use the `X-Forwarded-For` (XFF) header instead of the origin IP address. This can be configured using the `x_forwarded` directive in the -listeners section of the homeserver.yaml configuration file. +listeners section of the `homeserver.yaml` configuration file. From 65e6c64d8317d3a10527a7e422753281f3e9ec81 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 26 May 2021 12:19:47 +0200 Subject: [PATCH 180/222] Add an admin API for unprotecting local media from quarantine (#10040) Signed-off-by: Dirk Klimpel dirk@klimpel.org --- changelog.d/10040.feature | 1 + docs/admin_api/media_admin_api.md | 21 ++++ synapse/rest/admin/media.py | 28 +++++- .../databases/main/media_repository.py | 7 +- tests/rest/admin/test_media.py | 99 +++++++++++++++++++ 5 files changed, 151 insertions(+), 5 deletions(-) create mode 100644 changelog.d/10040.feature diff --git a/changelog.d/10040.feature b/changelog.d/10040.feature new file mode 100644 index 000000000000..ec78a30f0074 --- /dev/null +++ b/changelog.d/10040.feature @@ -0,0 +1 @@ +Add an admin API for unprotecting local media from quarantine. Contributed by @dklimpel. diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 9dbec68c19e6..d1b7e390d5a9 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -7,6 +7,7 @@ * [Quarantining media in a room](#quarantining-media-in-a-room) * [Quarantining all media of a user](#quarantining-all-media-of-a-user) * [Protecting media from being quarantined](#protecting-media-from-being-quarantined) + * [Unprotecting media from being quarantined](#unprotecting-media-from-being-quarantined) - [Delete local media](#delete-local-media) * [Delete a specific local media](#delete-a-specific-local-media) * [Delete local media by date or size](#delete-local-media-by-date-or-size) @@ -159,6 +160,26 @@ Response: {} ``` +## Unprotecting media from being quarantined + +This API reverts the protection of a media. + +Request: + +``` +POST /_synapse/admin/v1/media/unprotect/ + +{} +``` + +Where `media_id` is in the form of `abcdefg12345...`. + +Response: + +```json +{} +``` + # Delete local media This API deletes the *local* media from the disk of your own server. This includes any local thumbnails and copies of media downloaded from diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 24dd46113a29..2c71af4279bf 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -137,8 +137,31 @@ async def on_POST( logging.info("Protecting local media by ID: %s", media_id) - # Quarantine this media id - await self.store.mark_local_media_as_safe(media_id) + # Protect this media id + await self.store.mark_local_media_as_safe(media_id, safe=True) + + return 200, {} + + +class UnprotectMediaByID(RestServlet): + """Unprotect local media from being quarantined.""" + + PATTERNS = admin_patterns("/media/unprotect/(?P[^/]+)") + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + async def on_POST( + self, request: SynapseRequest, media_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + logging.info("Unprotecting local media by ID: %s", media_id) + + # Unprotect this media id + await self.store.mark_local_media_as_safe(media_id, safe=False) return 200, {} @@ -269,6 +292,7 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server): QuarantineMediaByID(hs).register(http_server) QuarantineMediaByUser(hs).register(http_server) ProtectMediaByID(hs).register(http_server) + UnprotectMediaByID(hs).register(http_server) ListMediaInRoom(hs).register(http_server) DeleteMediaByID(hs).register(http_server) DeleteMediaByDateSize(hs).register(http_server) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index c584868188e9..2fa945d171f0 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -143,6 +143,7 @@ async def get_local_media(self, media_id: str) -> Optional[Dict[str, Any]]: "created_ts", "quarantined_by", "url_cache", + "safe_from_quarantine", ), allow_none=True, desc="get_local_media", @@ -296,12 +297,12 @@ async def store_local_media( desc="store_local_media", ) - async def mark_local_media_as_safe(self, media_id: str) -> None: - """Mark a local media as safe from quarantining.""" + async def mark_local_media_as_safe(self, media_id: str, safe: bool = True) -> None: + """Mark a local media as safe or unsafe from quarantining.""" await self.db_pool.simple_update_one( table="local_media_repository", keyvalues={"media_id": media_id}, - updatevalues={"safe_from_quarantine": True}, + updatevalues={"safe_from_quarantine": safe}, desc="mark_local_media_as_safe", ) diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index ac7b21970033..f741121ea236 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -16,6 +16,8 @@ import os from binascii import unhexlify +from parameterized import parameterized + import synapse.rest.admin from synapse.api.errors import Codes from synapse.rest.client.v1 import login, profile, room @@ -562,3 +564,100 @@ def _access_media(self, server_and_media_id, expect_success=True): ) # Test that the file is deleted self.assertFalse(os.path.exists(local_path)) + + +class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_media_repo, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + media_repo = hs.get_media_repository_resource() + self.store = hs.get_datastore() + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create media + upload_resource = media_repo.children[b"upload"] + # file size is 67 Byte + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + response = self.helper.upload_media( + upload_resource, image_data, tok=self.admin_user_tok, expect_code=200 + ) + # Extract media ID from the response + server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' + self.media_id = server_and_media_id.split("/")[1] + + self.url = "/_synapse/admin/v1/media/%s/%s" + + @parameterized.expand(["protect", "unprotect"]) + def test_no_auth(self, action: str): + """ + Try to protect media without authentication. + """ + + channel = self.make_request("POST", self.url % (action, self.media_id), b"{}") + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + @parameterized.expand(["protect", "unprotect"]) + def test_requester_is_no_admin(self, action: str): + """ + If the user is not a server admin, an error is returned. + """ + self.other_user = self.register_user("user", "pass") + self.other_user_token = self.login("user", "pass") + + channel = self.make_request( + "POST", + self.url % (action, self.media_id), + access_token=self.other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_protect_media(self): + """ + Tests that protect and unprotect a media is successfully + """ + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["safe_from_quarantine"]) + + # protect + channel = self.make_request( + "POST", + self.url % ("protect", self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertTrue(media_info["safe_from_quarantine"]) + + # unprotect + channel = self.make_request( + "POST", + self.url % ("unprotect", self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["safe_from_quarantine"]) From 913a761a53640b725245408ff8f49bf54493707c Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Wed, 26 May 2021 13:16:06 +0100 Subject: [PATCH 181/222] Tell CircleCI to build Docker images from `main` (#9906) The `only` field takes a string or list of strings per the Circle docs: https://circleci.com/docs/2.0/configuration-reference/#branches Signed-off-by: Dan Callahan --- .circleci/config.yml | 2 +- changelog.d/9906.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/9906.misc diff --git a/.circleci/config.yml b/.circleci/config.yml index 1ac48a71bace..cf1989eff923 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -41,7 +41,7 @@ workflows: - dockerhubuploadlatest: filters: branches: - only: master + only: [ master, main ] commands: docker_prepare: diff --git a/changelog.d/9906.misc b/changelog.d/9906.misc new file mode 100644 index 000000000000..667d51a4c0e5 --- /dev/null +++ b/changelog.d/9906.misc @@ -0,0 +1 @@ +Tell CircleCI to build Docker images from `main` branch. From f95e7a03fa66e00b581d11f17244bf1701559f6c Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 26 May 2021 07:29:02 -0500 Subject: [PATCH 182/222] Tweak wording of database recommendation in INSTALL.md (#10057) * Tweak wording of database recommendation in INSTALL.md Signed-off-by: Aaron Raimist --- INSTALL.md | 12 +++++++----- changelog.d/10057.doc | 1 + 2 files changed, 8 insertions(+), 5 deletions(-) create mode 100644 changelog.d/10057.doc diff --git a/INSTALL.md b/INSTALL.md index 7b4068923451..3c498edd298b 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -399,11 +399,9 @@ Once you have installed synapse as above, you will need to configure it. ### Using PostgreSQL -By default Synapse uses [SQLite](https://sqlite.org/) and in doing so trades performance for convenience. -SQLite is only recommended in Synapse for testing purposes or for servers with -very light workloads. - -Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org). Advantages include: +By default Synapse uses an [SQLite](https://sqlite.org/) database and in doing so trades +performance for convenience. Almost all installations should opt to use [PostgreSQL](https://www.postgresql.org) +instead. Advantages include: - significant performance improvements due to the superior threading and caching model, smarter query optimiser @@ -412,6 +410,10 @@ Almost all installations should opt to use [PostgreSQL](https://www.postgresql.o For information on how to install and use PostgreSQL in Synapse, please see [docs/postgres.md](docs/postgres.md) +SQLite is only acceptable for testing purposes. SQLite should not be used in +a production server. Synapse will perform poorly when using +SQLite, especially when participating in large rooms. + ### TLS certificates The default configuration exposes a single HTTP port on the local diff --git a/changelog.d/10057.doc b/changelog.d/10057.doc new file mode 100644 index 000000000000..35437cb01747 --- /dev/null +++ b/changelog.d/10057.doc @@ -0,0 +1 @@ +Tweak wording of database recommendation in `INSTALL.md`. Contributed by @aaronraimist. \ No newline at end of file From 49df2c28e3f2f11ddb29536468990d0cd3ff68d0 Mon Sep 17 00:00:00 2001 From: Dan Callahan Date: Wed, 26 May 2021 14:14:43 +0100 Subject: [PATCH 183/222] Fix GitHub Actions lint for newsfragments (#10069) * Fix GitHub Actions lint for newsfragments Signed-off-by: Dan Callahan --- .github/workflows/tests.yml | 6 ++++++ changelog.d/10069.misc | 1 + 2 files changed, 7 insertions(+) create mode 100644 changelog.d/10069.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e7f3be1b4ea9..2ae81b5fcf49 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -34,7 +34,13 @@ jobs: if: ${{ github.base_ref == 'develop' || contains(github.base_ref, 'release-') }} runs-on: ubuntu-latest steps: + # Note: This and the script can be simplified once we drop Buildkite. See: + # https://github.com/actions/checkout/issues/266#issuecomment-638346893 + # https://github.com/actions/checkout/issues/416 - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 0 - uses: actions/setup-python@v2 - run: pip install tox - name: Patch Buildkite-specific test script diff --git a/changelog.d/10069.misc b/changelog.d/10069.misc new file mode 100644 index 000000000000..a8d2629e9b5d --- /dev/null +++ b/changelog.d/10069.misc @@ -0,0 +1 @@ +Fix GitHub Actions lint for newsfragments. From f42e4c4eb9b5b84bd1da80a4f3938c1c06305364 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 26 May 2021 14:35:16 -0400 Subject: [PATCH 184/222] Remove the experimental spaces enabled flag. (#10063) In lieu of just always enabling the unstable spaces endpoint and unstable room version. --- changelog.d/10063.removal | 1 + docs/sample_config.yaml | 15 --------------- synapse/api/room_versions.py | 2 +- synapse/config/experimental.py | 23 ----------------------- synapse/federation/transport/server.py | 13 ++++++------- synapse/rest/client/v1/room.py | 4 +--- 6 files changed, 9 insertions(+), 49 deletions(-) create mode 100644 changelog.d/10063.removal diff --git a/changelog.d/10063.removal b/changelog.d/10063.removal new file mode 100644 index 000000000000..0f8889b6b4d7 --- /dev/null +++ b/changelog.d/10063.removal @@ -0,0 +1 @@ +Remove the experimental `spaces_enabled` flag. The spaces features are always available now. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6576b153d0eb..7b97f73a296b 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -2916,18 +2916,3 @@ redis: # Optional password if configured on the Redis instance # #password: - - -# Enable experimental features in Synapse. -# -# Experimental features might break or be removed without a deprecation -# period. -# -experimental_features: - # Support for Spaces (MSC1772), it enables the following: - # - # * The Spaces Summary API (MSC2946). - # * Restricting room membership based on space membership (MSC3083). - # - # Uncomment to disable support for Spaces. - #spaces_enabled: false diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index c9f9596ada92..373a4669d019 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -181,6 +181,6 @@ class RoomVersions: RoomVersions.V5, RoomVersions.V6, RoomVersions.MSC2176, + RoomVersions.MSC3083, ) - # Note that we do not include MSC3083 here unless it is enabled in the config. } # type: Dict[str, RoomVersion] diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index cc67377f0fe7..6ebce4b2f75d 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config._base import Config from synapse.types import JsonDict @@ -28,27 +27,5 @@ def read_config(self, config: JsonDict, **kwargs): # MSC2858 (multiple SSO identity providers) self.msc2858_enabled = experimental.get("msc2858_enabled", False) # type: bool - # Spaces (MSC1772, MSC2946, MSC3083, etc) - self.spaces_enabled = experimental.get("spaces_enabled", True) # type: bool - if self.spaces_enabled: - KNOWN_ROOM_VERSIONS[RoomVersions.MSC3083.identifier] = RoomVersions.MSC3083 - # MSC3026 (busy presence state) self.msc3026_enabled = experimental.get("msc3026_enabled", False) # type: bool - - def generate_config_section(self, **kwargs): - return """\ - # Enable experimental features in Synapse. - # - # Experimental features might break or be removed without a deprecation - # period. - # - experimental_features: - # Support for Spaces (MSC1772), it enables the following: - # - # * The Spaces Summary API (MSC2946). - # * Restricting room membership based on space membership (MSC3083). - # - # Uncomment to disable support for Spaces. - #spaces_enabled: false - """ diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 9d50b05d0152..00ff02c7cbb5 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1562,13 +1562,12 @@ def register_servlets( server_name=hs.hostname, ).register(resource) - if hs.config.experimental.spaces_enabled: - FederationSpaceSummaryServlet( - handler=hs.get_space_summary_handler(), - authenticator=authenticator, - ratelimiter=ratelimiter, - server_name=hs.hostname, - ).register(resource) + FederationSpaceSummaryServlet( + handler=hs.get_space_summary_handler(), + authenticator=authenticator, + ratelimiter=ratelimiter, + server_name=hs.hostname, + ).register(resource) if "openid" in servlet_groups: for servletclass in OPENID_SERVLET_CLASSES: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 51813cccbe72..d6d55893af8a 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -1060,9 +1060,7 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False): RoomRedactEventRestServlet(hs).register(http_server) RoomTypingRestServlet(hs).register(http_server) RoomEventContextServlet(hs).register(http_server) - - if hs.config.experimental.spaces_enabled: - RoomSpaceSummaryRestServlet(hs).register(http_server) + RoomSpaceSummaryRestServlet(hs).register(http_server) # Some servlets only get registered for the main process. if not is_worker: From 224f2f949b1661094a64d1105efb64159ddf4aa0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 May 2021 10:33:56 +0100 Subject: [PATCH 185/222] Combine `LruCache.invalidate` and `invalidate_many` (#9973) * Make `invalidate` and `invalidate_many` do the same thing ... so that we can do either over the invalidation replication stream, and also because they always confused me a bit. * Kill off `invalidate_many` * changelog --- changelog.d/9973.misc | 1 + synapse/replication/slave/storage/devices.py | 2 +- synapse/storage/databases/main/cache.py | 6 +-- synapse/storage/databases/main/devices.py | 2 +- .../databases/main/event_push_actions.py | 2 +- synapse/storage/databases/main/events.py | 8 ++-- synapse/storage/databases/main/receipts.py | 6 +-- synapse/util/caches/deferred_cache.py | 42 +++++++------------ synapse/util/caches/descriptors.py | 8 +++- synapse/util/caches/lrucache.py | 18 ++++---- synapse/util/caches/treecache.py | 3 ++ tests/util/caches/test_descriptors.py | 6 +-- 12 files changed, 52 insertions(+), 52 deletions(-) create mode 100644 changelog.d/9973.misc diff --git a/changelog.d/9973.misc b/changelog.d/9973.misc new file mode 100644 index 000000000000..7f22d42291b2 --- /dev/null +++ b/changelog.d/9973.misc @@ -0,0 +1 @@ +Make `LruCache.invalidate` support tree invalidation, and remove `invalidate_many`. diff --git a/synapse/replication/slave/storage/devices.py b/synapse/replication/slave/storage/devices.py index 70207420a6b0..26bdead5651e 100644 --- a/synapse/replication/slave/storage/devices.py +++ b/synapse/replication/slave/storage/devices.py @@ -68,7 +68,7 @@ def _invalidate_caches_for_devices(self, token, rows): if row.entity.startswith("@"): self._device_list_stream_cache.entity_has_changed(row.entity, token) self.get_cached_devices_for_user.invalidate((row.entity,)) - self._get_cached_user_device.invalidate_many((row.entity,)) + self._get_cached_user_device.invalidate((row.entity,)) self.get_device_list_last_stream_id_for_remote.invalidate((row.entity,)) else: diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index ecc1f935e228..f7872501a06c 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -171,7 +171,7 @@ def _invalidate_caches_for_event( self.get_latest_event_ids_in_room.invalidate((room_id,)) - self.get_unread_event_push_actions_by_room_for_user.invalidate_many((room_id,)) + self.get_unread_event_push_actions_by_room_for_user.invalidate((room_id,)) if not backfilled: self._events_stream_cache.entity_has_changed(room_id, stream_ordering) @@ -184,8 +184,8 @@ def _invalidate_caches_for_event( self.get_invited_rooms_for_local_user.invalidate((state_key,)) if relates_to: - self.get_relations_for_event.invalidate_many((relates_to,)) - self.get_aggregation_groups_for_event.invalidate_many((relates_to,)) + self.get_relations_for_event.invalidate((relates_to,)) + self.get_aggregation_groups_for_event.invalidate((relates_to,)) self.get_applicable_edit.invalidate((relates_to,)) async def invalidate_cache_and_stream(self, cache_name: str, keys: Tuple[Any, ...]): diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index fd87ba71abcb..18f07d96dcd0 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1282,7 +1282,7 @@ def _update_remote_device_list_cache_txn( ) txn.call_after(self.get_cached_devices_for_user.invalidate, (user_id,)) - txn.call_after(self._get_cached_user_device.invalidate_many, (user_id,)) + txn.call_after(self._get_cached_user_device.invalidate, (user_id,)) txn.call_after( self.get_device_list_last_stream_id_for_remote.invalidate, (user_id,) ) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 584532211837..d1237c65cc85 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -860,7 +860,7 @@ def _remove_old_push_actions_before_txn( not be deleted. """ txn.call_after( - self.get_unread_event_push_actions_by_room_for_user.invalidate_many, + self.get_unread_event_push_actions_by_room_for_user.invalidate, (room_id, user_id), ) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index fd25c8112d73..897fa06639cb 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1748,9 +1748,9 @@ def _handle_event_relations(self, txn, event): }, ) - txn.call_after(self.store.get_relations_for_event.invalidate_many, (parent_id,)) + txn.call_after(self.store.get_relations_for_event.invalidate, (parent_id,)) txn.call_after( - self.store.get_aggregation_groups_for_event.invalidate_many, (parent_id,) + self.store.get_aggregation_groups_for_event.invalidate, (parent_id,) ) if rel_type == RelationTypes.REPLACE: @@ -1903,7 +1903,7 @@ def _set_push_actions_for_event_and_users_txn( for user_id in user_ids: txn.call_after( - self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, + self.store.get_unread_event_push_actions_by_room_for_user.invalidate, (room_id, user_id), ) @@ -1917,7 +1917,7 @@ def _set_push_actions_for_event_and_users_txn( def _remove_push_actions_for_event_id_txn(self, txn, room_id, event_id): # Sad that we have to blow away the cache for the whole room here txn.call_after( - self.store.get_unread_event_push_actions_by_room_for_user.invalidate_many, + self.store.get_unread_event_push_actions_by_room_for_user.invalidate, (room_id,), ) txn.execute( diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 3647276acb95..edeaacd7a65a 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -460,7 +460,7 @@ def _invalidate_get_users_with_receipts_in_room( def invalidate_caches_for_receipt(self, room_id, receipt_type, user_id): self.get_receipts_for_user.invalidate((user_id, receipt_type)) - self._get_linearized_receipts_for_room.invalidate_many((room_id,)) + self._get_linearized_receipts_for_room.invalidate((room_id,)) self.get_last_receipt_event_id_for_user.invalidate( (user_id, room_id, receipt_type) ) @@ -659,9 +659,7 @@ def insert_graph_receipt_txn( ) txn.call_after(self.get_receipts_for_user.invalidate, (user_id, receipt_type)) # FIXME: This shouldn't invalidate the whole cache - txn.call_after( - self._get_linearized_receipts_for_room.invalidate_many, (room_id,) - ) + txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,)) self.db_pool.simple_delete_txn( txn, diff --git a/synapse/util/caches/deferred_cache.py b/synapse/util/caches/deferred_cache.py index 371e7e4dd0de..1044139119c5 100644 --- a/synapse/util/caches/deferred_cache.py +++ b/synapse/util/caches/deferred_cache.py @@ -16,16 +16,7 @@ import enum import threading -from typing import ( - Callable, - Generic, - Iterable, - MutableMapping, - Optional, - TypeVar, - Union, - cast, -) +from typing import Callable, Generic, Iterable, MutableMapping, Optional, TypeVar, Union from prometheus_client import Gauge @@ -91,7 +82,7 @@ def __init__( # _pending_deferred_cache maps from the key value to a `CacheEntry` object. self._pending_deferred_cache = ( cache_type() - ) # type: MutableMapping[KT, CacheEntry] + ) # type: Union[TreeCache, MutableMapping[KT, CacheEntry]] def metrics_cb(): cache_pending_metric.labels(name).set(len(self._pending_deferred_cache)) @@ -287,8 +278,17 @@ def prefill( self.cache.set(key, value, callbacks=callbacks) def invalidate(self, key): + """Delete a key, or tree of entries + + If the cache is backed by a regular dict, then "key" must be of + the right type for this cache + + If the cache is backed by a TreeCache, then "key" must be a tuple, but + may be of lower cardinality than the TreeCache - in which case the whole + subtree is deleted. + """ self.check_thread() - self.cache.pop(key, None) + self.cache.del_multi(key) # if we have a pending lookup for this key, remove it from the # _pending_deferred_cache, which will (a) stop it being returned @@ -299,20 +299,10 @@ def invalidate(self, key): # run the invalidation callbacks now, rather than waiting for the # deferred to resolve. if entry: - entry.invalidate() - - def invalidate_many(self, key: KT): - self.check_thread() - if not isinstance(key, tuple): - raise TypeError("The cache key must be a tuple not %r" % (type(key),)) - key = cast(KT, key) - self.cache.del_multi(key) - - # if we have a pending lookup for this key, remove it from the - # _pending_deferred_cache, as above - entry_dict = self._pending_deferred_cache.pop(key, None) - if entry_dict is not None: - for entry in iterate_tree_cache_entry(entry_dict): + # _pending_deferred_cache.pop should either return a CacheEntry, or, in the + # case of a TreeCache, a dict of keys to cache entries. Either way calling + # iterate_tree_cache_entry on it will do the right thing. + for entry in iterate_tree_cache_entry(entry): entry.invalidate() def invalidate_all(self): diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 2ac24a2f2536..d77e8edeeadf 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -48,7 +48,6 @@ class _CachedFunction(Generic[F]): invalidate = None # type: Any invalidate_all = None # type: Any - invalidate_many = None # type: Any prefill = None # type: Any cache = None # type: Any num_args = None # type: Any @@ -262,6 +261,11 @@ def __init__( ): super().__init__(orig, num_args=num_args, cache_context=cache_context) + if tree and self.num_args < 2: + raise RuntimeError( + "tree=True is nonsensical for cached functions with a single parameter" + ) + self.max_entries = max_entries self.tree = tree self.iterable = iterable @@ -302,11 +306,11 @@ def _wrapped(*args, **kwargs): wrapped = cast(_CachedFunction, _wrapped) if self.num_args == 1: + assert not self.tree wrapped.invalidate = lambda key: cache.invalidate(key[0]) wrapped.prefill = lambda key, val: cache.prefill(key[0], val) else: wrapped.invalidate = cache.invalidate - wrapped.invalidate_many = cache.invalidate_many wrapped.prefill = cache.prefill wrapped.invalidate_all = cache.invalidate_all diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index 54df407ff70c..d89e9d9b1dfc 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -152,7 +152,6 @@ class LruCache(Generic[KT, VT]): """ Least-recently-used cache, supporting prometheus metrics and invalidation callbacks. - Supports del_multi only if cache_type=TreeCache If cache_type=TreeCache, all keys must be tuples. """ @@ -393,10 +392,16 @@ def cache_pop(key: KT, default: Optional[T] = None): @synchronized def cache_del_multi(key: KT) -> None: + """Delete an entry, or tree of entries + + If the LruCache is backed by a regular dict, then "key" must be of + the right type for this cache + + If the LruCache is backed by a TreeCache, then "key" must be a tuple, but + may be of lower cardinality than the TreeCache - in which case the whole + subtree is deleted. """ - This will only work if constructed with cache_type=TreeCache - """ - popped = cache.pop(key) + popped = cache.pop(key, None) if popped is None: return # for each deleted node, we now need to remove it from the linked list @@ -430,11 +435,10 @@ def cache_contains(key: KT) -> bool: self.set = cache_set self.setdefault = cache_set_default self.pop = cache_pop + self.del_multi = cache_del_multi # `invalidate` is exposed for consistency with DeferredCache, so that it can be # invalidated by the cache invalidation replication stream. - self.invalidate = cache_pop - if cache_type is TreeCache: - self.del_multi = cache_del_multi + self.invalidate = cache_del_multi self.len = synchronized(cache_len) self.contains = cache_contains self.clear = cache_clear diff --git a/synapse/util/caches/treecache.py b/synapse/util/caches/treecache.py index 73502a8b061b..a6df81ebffdc 100644 --- a/synapse/util/caches/treecache.py +++ b/synapse/util/caches/treecache.py @@ -89,6 +89,9 @@ def pop(self, key, default=None): value. If the key is partial, the TreeCacheNode corresponding to the part of the tree that was removed. """ + if not isinstance(key, tuple): + raise TypeError("The cache key must be a tuple not %r" % (type(key),)) + # a list of the nodes we have touched on the way down the tree nodes = [] diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index bbbc276697e2..0277998cbe59 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -622,17 +622,17 @@ def func2(self, key, cache_context): self.assertEquals(callcount2[0], 1) a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 1) + self.assertEquals(a.func2.cache.cache.del_multi.call_count, 1) yield a.func2("foo") a.func2.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 2) + self.assertEquals(a.func2.cache.cache.del_multi.call_count, 2) self.assertEquals(callcount[0], 1) self.assertEquals(callcount2[0], 2) a.func.invalidate(("foo",)) - self.assertEquals(a.func2.cache.cache.pop.call_count, 3) + self.assertEquals(a.func2.cache.cache.del_multi.call_count, 3) yield a.func("foo") self.assertEquals(callcount[0], 2) From fe5dad46b0da00e9757ed54eb23304ed3c6ceadf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 May 2021 10:34:24 +0100 Subject: [PATCH 186/222] Remove redundant code to reload tls cert (#10054) we don't need to reload the tls cert if we don't have any tls listeners. Follow-up to #9280. --- changelog.d/10054.misc | 1 + synapse/app/_base.py | 5 +---- synapse/config/tls.py | 22 +++------------------- tests/config/test_tls.py | 3 +-- 4 files changed, 6 insertions(+), 25 deletions(-) create mode 100644 changelog.d/10054.misc diff --git a/changelog.d/10054.misc b/changelog.d/10054.misc new file mode 100644 index 000000000000..cebe39ce540f --- /dev/null +++ b/changelog.d/10054.misc @@ -0,0 +1 @@ +Remove some dead code regarding TLS certificate handling. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 59918d789ec5..1329af2e2be7 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -261,13 +261,10 @@ def refresh_certificate(hs): Refresh the TLS certificates that Synapse is using by re-reading them from disk and updating the TLS context factories to use them. """ - if not hs.config.has_tls_listener(): - # attempt to reload the certs for the good of the tls_fingerprints - hs.config.read_certificate_from_disk(require_cert_and_key=False) return - hs.config.read_certificate_from_disk(require_cert_and_key=True) + hs.config.read_certificate_from_disk() hs.tls_server_context_factory = context_factory.ServerContextFactory(hs.config) if hs._listening_services: diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 26f1150ca506..0e9bba53c915 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -215,28 +215,12 @@ def is_disk_cert_valid(self, allow_self_signed=True): days_remaining = (expires_on - now).days return days_remaining - def read_certificate_from_disk(self, require_cert_and_key: bool): + def read_certificate_from_disk(self): """ Read the certificates and private key from disk. - - Args: - require_cert_and_key: set to True to throw an error if the certificate - and key file are not given """ - if require_cert_and_key: - self.tls_private_key = self.read_tls_private_key() - self.tls_certificate = self.read_tls_certificate() - elif self.tls_certificate_file: - # we only need the certificate for the tls_fingerprints. Reload it if we - # can, but it's not a fatal error if we can't. - try: - self.tls_certificate = self.read_tls_certificate() - except Exception as e: - logger.info( - "Unable to read TLS certificate (%s). Ignoring as no " - "tls listeners enabled.", - e, - ) + self.tls_private_key = self.read_tls_private_key() + self.tls_certificate = self.read_tls_certificate() def generate_config_section( self, diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index 183034f7d4aa..dcf336416c60 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -74,12 +74,11 @@ def test_warn_self_signed(self): config = { "tls_certificate_path": os.path.join(config_dir, "cert.pem"), - "tls_fingerprints": [], } t = TestConfig() t.read_config(config, config_dir_path="", data_dir_path="") - t.read_certificate_from_disk(require_cert_and_key=False) + t.read_tls_certificate() warnings = self.flushWarnings() self.assertEqual(len(warnings), 1) From 5447a763327c37f07cd4135418e991a3b4346896 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 May 2021 10:34:55 +0100 Subject: [PATCH 187/222] Remove redundant, unmaintained `convert_server_keys` script. (#10055) --- changelog.d/10055.misc | 1 + scripts-dev/convert_server_keys.py | 108 ----------------------------- 2 files changed, 1 insertion(+), 108 deletions(-) create mode 100644 changelog.d/10055.misc delete mode 100644 scripts-dev/convert_server_keys.py diff --git a/changelog.d/10055.misc b/changelog.d/10055.misc new file mode 100644 index 000000000000..da84a2dde89b --- /dev/null +++ b/changelog.d/10055.misc @@ -0,0 +1 @@ +Remove redundant, unmaintained `convert_server_keys` script. diff --git a/scripts-dev/convert_server_keys.py b/scripts-dev/convert_server_keys.py deleted file mode 100644 index d4314a054c44..000000000000 --- a/scripts-dev/convert_server_keys.py +++ /dev/null @@ -1,108 +0,0 @@ -import json -import sys -import time - -import psycopg2 -import yaml -from canonicaljson import encode_canonical_json -from signedjson.key import read_signing_keys -from signedjson.sign import sign_json -from unpaddedbase64 import encode_base64 - -db_binary_type = memoryview - - -def select_v1_keys(connection): - cursor = connection.cursor() - cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys") - rows = cursor.fetchall() - cursor.close() - results = {} - for server_name, key_id, verify_key in rows: - results.setdefault(server_name, {})[key_id] = encode_base64(verify_key) - return results - - -def select_v1_certs(connection): - cursor = connection.cursor() - cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates") - rows = cursor.fetchall() - cursor.close() - results = {} - for server_name, tls_certificate in rows: - results[server_name] = tls_certificate - return results - - -def select_v2_json(connection): - cursor = connection.cursor() - cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json") - rows = cursor.fetchall() - cursor.close() - results = {} - for server_name, key_id, key_json in rows: - results.setdefault(server_name, {})[key_id] = json.loads( - str(key_json).decode("utf-8") - ) - return results - - -def convert_v1_to_v2(server_name, valid_until, keys, certificate): - return { - "old_verify_keys": {}, - "server_name": server_name, - "verify_keys": {key_id: {"key": key} for key_id, key in keys.items()}, - "valid_until_ts": valid_until, - } - - -def rows_v2(server, json): - valid_until = json["valid_until_ts"] - key_json = encode_canonical_json(json) - for key_id in json["verify_keys"]: - yield (server, key_id, "-", valid_until, valid_until, db_binary_type(key_json)) - - -def main(): - config = yaml.safe_load(open(sys.argv[1])) - valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24 - - server_name = config["server_name"] - signing_key = read_signing_keys(open(config["signing_key_path"]))[0] - - database = config["database"] - assert database["name"] == "psycopg2", "Can only convert for postgresql" - args = database["args"] - args.pop("cp_max") - args.pop("cp_min") - connection = psycopg2.connect(**args) - keys = select_v1_keys(connection) - certificates = select_v1_certs(connection) - json = select_v2_json(connection) - - result = {} - for server in keys: - if server not in json: - v2_json = convert_v1_to_v2( - server, valid_until, keys[server], certificates[server] - ) - v2_json = sign_json(v2_json, server_name, signing_key) - result[server] = v2_json - - yaml.safe_dump(result, sys.stdout, default_flow_style=False) - - rows = [row for server, json in result.items() for row in rows_v2(server, json)] - - cursor = connection.cursor() - cursor.executemany( - "INSERT INTO server_keys_json (" - " server_name, key_id, from_server," - " ts_added_ms, ts_valid_until_ms, key_json" - ") VALUES (%s, %s, %s, %s, %s, %s)", - rows, - ) - connection.commit() - - -if __name__ == "__main__": - main() From dcbfec919ba27da970849cae73c69cacd78432d5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 27 May 2021 10:35:06 +0100 Subject: [PATCH 188/222] Improve the error message printed by synctl when synapse fails to start. (#10059) --- changelog.d/10059.misc | 1 + synctl | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) create mode 100644 changelog.d/10059.misc diff --git a/changelog.d/10059.misc b/changelog.d/10059.misc new file mode 100644 index 000000000000..ca6e0e8a5aff --- /dev/null +++ b/changelog.d/10059.misc @@ -0,0 +1 @@ +Improve the error message printed by synctl when synapse fails to start. diff --git a/synctl b/synctl index 6ce19918d212..90559ded62e6 100755 --- a/synctl +++ b/synctl @@ -97,11 +97,15 @@ def start(pidfile: str, app: str, config_files: Iterable[str], daemonize: bool) write("started %s(%s)" % (app, ",".join(config_files)), colour=GREEN) return True except subprocess.CalledProcessError as e: - write( - "error starting %s(%s) (exit code: %d); see above for logs" - % (app, ",".join(config_files), e.returncode), - colour=RED, + err = "%s(%s) failed to start (exit code: %d). Check the Synapse logfile" % ( + app, + ",".join(config_files), + e.returncode, ) + if daemonize: + err += ", or run synctl with --no-daemonize" + err += "." + write(err, colour=RED, stream=sys.stderr) return False From d9f44fd0b9214e09caa9c8dd46e651bbf0fffad8 Mon Sep 17 00:00:00 2001 From: Denis Kasak Date: Thu, 27 May 2021 11:41:16 +0000 Subject: [PATCH 189/222] Clarify security note regarding the domain Synapse is hosted on. (#9221) --- README.rst | 46 +++++++++++++++++++++++++++++++++----------- changelog.d/9221.doc | 1 + 2 files changed, 36 insertions(+), 11 deletions(-) create mode 100644 changelog.d/9221.doc diff --git a/README.rst b/README.rst index 1a5503572ee4..a14a687fd1b4 100644 --- a/README.rst +++ b/README.rst @@ -149,21 +149,45 @@ For details on having Synapse manage your federation TLS certificates automatically, please see ``_. -Security Note +Security note ============= -Matrix serves raw user generated data in some APIs - specifically the `content -repository endpoints `_. +Matrix serves raw, user-supplied data in some APIs -- specifically the `content +repository endpoints`_. -Whilst we have tried to mitigate against possible XSS attacks (e.g. -https://github.com/matrix-org/synapse/pull/1021) we recommend running -matrix homeservers on a dedicated domain name, to limit any malicious user generated -content served to web browsers a matrix API from being able to attack webapps hosted -on the same domain. This is particularly true of sharing a matrix webclient and -server on the same domain. +.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid -See https://github.com/vector-im/riot-web/issues/1977 and -https://developer.github.com/changes/2014-04-25-user-content-security for more details. +Whilst we make a reasonable effort to mitigate against XSS attacks (for +instance, by using `CSP`_), a Matrix homeserver should not be hosted on a +domain hosting other web applications. This especially applies to sharing +the domain with Matrix web clients and other sensitive applications like +webmail. See +https://developer.github.com/changes/2014-04-25-user-content-security for more +information. + +.. _CSP: https://github.com/matrix-org/synapse/pull/1021 + +Ideally, the homeserver should not simply be on a different subdomain, but on +a completely different `registered domain`_ (also known as top-level site or +eTLD+1). This is because `some attacks`_ are still possible as long as the two +applications share the same registered domain. + +.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3 + +.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie + +To illustrate this with an example, if your Element Web or other sensitive web +application is hosted on ``A.example1.com``, you should ideally host Synapse on +``example2.com``. Some amount of protection is offered by hosting on +``B.example1.com`` instead, so this is also acceptable in some scenarios. +However, you should *not* host your Synapse on ``A.example1.com``. + +Note that all of the above refers exclusively to the domain used in Synapse's +``public_baseurl`` setting. In particular, it has no bearing on the domain +mentioned in MXIDs hosted on that server. + +Following this advice ensures that even if an XSS is found in Synapse, the +impact to other applications will be minimal. Upgrading an existing Synapse diff --git a/changelog.d/9221.doc b/changelog.d/9221.doc new file mode 100644 index 000000000000..9b3476064b39 --- /dev/null +++ b/changelog.d/9221.doc @@ -0,0 +1 @@ +Clarify security note regarding hosting Synapse on the same domain as other web applications. From 8e15c92c2f9e581e59ff68495fa99e998849bb4d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 27 May 2021 08:52:28 -0400 Subject: [PATCH 190/222] Pass the origin when calculating the spaces summary over GET. (#10079) Fixes a bug due to conflicting PRs which were merged. (One added a new caller to a method, the other added a new parameter to the same method.) --- changelog.d/10079.bugfix | 1 + synapse/federation/transport/server.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10079.bugfix diff --git a/changelog.d/10079.bugfix b/changelog.d/10079.bugfix new file mode 100644 index 000000000000..2b93c4534abc --- /dev/null +++ b/changelog.d/10079.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.35.0rc1 when calling the spaces summary API via a GET request. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 9d50b05d0152..40eab4554926 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -1398,7 +1398,7 @@ async def on_GET( ) return 200, await self.handler.federation_space_summary( - room_id, suggested_only, max_rooms_per_space, exclude_rooms + origin, room_id, suggested_only, max_rooms_per_space, exclude_rooms ) # TODO When switching to the stable endpoint, remove the POST handler. From 78b5102ae71f828deb851eca8e677381710bf716 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 27 May 2021 14:32:31 +0100 Subject: [PATCH 191/222] Fix up `BatchingQueue` (#10078) Fixes #10068 --- changelog.d/10078.misc | 1 + synapse/util/batching_queue.py | 70 ++++++++++++++++++--------- tests/util/test_batching_queue.py | 78 ++++++++++++++++++++++++++++++- 3 files changed, 125 insertions(+), 24 deletions(-) create mode 100644 changelog.d/10078.misc diff --git a/changelog.d/10078.misc b/changelog.d/10078.misc new file mode 100644 index 000000000000..a4b089d0fdcd --- /dev/null +++ b/changelog.d/10078.misc @@ -0,0 +1 @@ +Fix up `BatchingQueue` implementation. diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py index 44bbb7b1a8ef..8fd5bfb69b75 100644 --- a/synapse/util/batching_queue.py +++ b/synapse/util/batching_queue.py @@ -25,10 +25,11 @@ TypeVar, ) +from prometheus_client import Gauge + from twisted.internet import defer from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable -from synapse.metrics import LaterGauge from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util import Clock @@ -38,6 +39,24 @@ V = TypeVar("V") R = TypeVar("R") +number_queued = Gauge( + "synapse_util_batching_queue_number_queued", + "The number of items waiting in the queue across all keys", + labelnames=("name",), +) + +number_in_flight = Gauge( + "synapse_util_batching_queue_number_pending", + "The number of items across all keys either being processed or waiting in a queue", + labelnames=("name",), +) + +number_of_keys = Gauge( + "synapse_util_batching_queue_number_of_keys", + "The number of distinct keys that have items queued", + labelnames=("name",), +) + class BatchingQueue(Generic[V, R]): """A queue that batches up work, calling the provided processing function @@ -48,10 +67,20 @@ class BatchingQueue(Generic[V, R]): called, and will keep being called until the queue has been drained (for the given key). + If the processing function raises an exception then the exception is proxied + through to the callers waiting on that batch of work. + Note that the return value of `add_to_queue` will be the return value of the processing function that processed the given item. This means that the returned value will likely include data for other items that were in the batch. + + Args: + name: A name for the queue, used for logging contexts and metrics. + This must be unique, otherwise the metrics will be wrong. + clock: The clock to use to schedule work. + process_batch_callback: The callback to to be run to process a batch of + work. """ def __init__( @@ -73,19 +102,15 @@ def __init__( # The function to call with batches of values. self._process_batch_callback = process_batch_callback - LaterGauge( - "synapse_util_batching_queue_number_queued", - "The number of items waiting in the queue across all keys", - labels=("name",), - caller=lambda: sum(len(v) for v in self._next_values.values()), + number_queued.labels(self._name).set_function( + lambda: sum(len(q) for q in self._next_values.values()) ) - LaterGauge( - "synapse_util_batching_queue_number_of_keys", - "The number of distinct keys that have items queued", - labels=("name",), - caller=lambda: len(self._next_values), - ) + number_of_keys.labels(self._name).set_function(lambda: len(self._next_values)) + + self._number_in_flight_metric = number_in_flight.labels( + self._name + ) # type: Gauge async def add_to_queue(self, value: V, key: Hashable = ()) -> R: """Adds the value to the queue with the given key, returning the result @@ -107,17 +132,18 @@ async def add_to_queue(self, value: V, key: Hashable = ()) -> R: if key not in self._processing_keys: run_as_background_process(self._name, self._process_queue, key) - return await make_deferred_yieldable(d) + with self._number_in_flight_metric.track_inprogress(): + return await make_deferred_yieldable(d) async def _process_queue(self, key: Hashable) -> None: """A background task to repeatedly pull things off the queue for the given key and call the `self._process_batch_callback` with the values. """ - try: - if key in self._processing_keys: - return + if key in self._processing_keys: + return + try: self._processing_keys.add(key) while True: @@ -137,16 +163,16 @@ async def _process_queue(self, key: Hashable) -> None: values = [value for value, _ in next_values] results = await self._process_batch_callback(values) - for _, deferred in next_values: - with PreserveLoggingContext(): + with PreserveLoggingContext(): + for _, deferred in next_values: deferred.callback(results) except Exception as e: - for _, deferred in next_values: - if deferred.called: - continue + with PreserveLoggingContext(): + for _, deferred in next_values: + if deferred.called: + continue - with PreserveLoggingContext(): deferred.errback(e) finally: diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py index 5def1e56c9bb..edf29e5b96ae 100644 --- a/tests/util/test_batching_queue.py +++ b/tests/util/test_batching_queue.py @@ -14,7 +14,12 @@ from twisted.internet import defer from synapse.logging.context import make_deferred_yieldable -from synapse.util.batching_queue import BatchingQueue +from synapse.util.batching_queue import ( + BatchingQueue, + number_in_flight, + number_of_keys, + number_queued, +) from tests.server import get_clock from tests.unittest import TestCase @@ -24,6 +29,14 @@ class BatchingQueueTestCase(TestCase): def setUp(self): self.clock, hs_clock = get_clock() + # We ensure that we remove any existing metrics for "test_queue". + try: + number_queued.remove("test_queue") + number_of_keys.remove("test_queue") + number_in_flight.remove("test_queue") + except KeyError: + pass + self._pending_calls = [] self.queue = BatchingQueue("test_queue", hs_clock, self._process_queue) @@ -32,6 +45,41 @@ async def _process_queue(self, values): self._pending_calls.append((values, d)) return await make_deferred_yieldable(d) + def _assert_metrics(self, queued, keys, in_flight): + """Assert that the metrics are correct""" + + self.assertEqual(len(number_queued.collect()), 1) + self.assertEqual(len(number_queued.collect()[0].samples), 1) + self.assertEqual( + number_queued.collect()[0].samples[0].labels, + {"name": self.queue._name}, + ) + self.assertEqual( + number_queued.collect()[0].samples[0].value, + queued, + "number_queued", + ) + + self.assertEqual(len(number_of_keys.collect()), 1) + self.assertEqual(len(number_of_keys.collect()[0].samples), 1) + self.assertEqual( + number_queued.collect()[0].samples[0].labels, {"name": self.queue._name} + ) + self.assertEqual( + number_of_keys.collect()[0].samples[0].value, keys, "number_of_keys" + ) + + self.assertEqual(len(number_in_flight.collect()), 1) + self.assertEqual(len(number_in_flight.collect()[0].samples), 1) + self.assertEqual( + number_queued.collect()[0].samples[0].labels, {"name": self.queue._name} + ) + self.assertEqual( + number_in_flight.collect()[0].samples[0].value, + in_flight, + "number_in_flight", + ) + def test_simple(self): """Tests the basic case of calling `add_to_queue` once and having `_process_queue` return. @@ -41,6 +89,8 @@ def test_simple(self): queue_d = defer.ensureDeferred(self.queue.add_to_queue("foo")) + self._assert_metrics(queued=1, keys=1, in_flight=1) + # The queue should wait a reactor tick before calling the processing # function. self.assertFalse(self._pending_calls) @@ -52,12 +102,15 @@ def test_simple(self): self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo"]) self.assertFalse(queue_d.called) + self._assert_metrics(queued=0, keys=0, in_flight=1) # Return value of the `_process_queue` should be propagated back. self._pending_calls.pop()[1].callback("bar") self.assertEqual(self.successResultOf(queue_d), "bar") + self._assert_metrics(queued=0, keys=0, in_flight=0) + def test_batching(self): """Test that multiple calls at the same time get batched up into one call to `_process_queue`. @@ -68,6 +121,8 @@ def test_batching(self): queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1")) queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2")) + self._assert_metrics(queued=2, keys=1, in_flight=2) + self.clock.pump([0]) # We should see only *one* call to `_process_queue` @@ -75,12 +130,14 @@ def test_batching(self): self.assertEqual(self._pending_calls[0][0], ["foo1", "foo2"]) self.assertFalse(queue_d1.called) self.assertFalse(queue_d2.called) + self._assert_metrics(queued=0, keys=0, in_flight=2) # Return value of the `_process_queue` should be propagated back to both. self._pending_calls.pop()[1].callback("bar") self.assertEqual(self.successResultOf(queue_d1), "bar") self.assertEqual(self.successResultOf(queue_d2), "bar") + self._assert_metrics(queued=0, keys=0, in_flight=0) def test_queuing(self): """Test that we queue up requests while a `_process_queue` is being @@ -92,13 +149,20 @@ def test_queuing(self): queue_d1 = defer.ensureDeferred(self.queue.add_to_queue("foo1")) self.clock.pump([0]) + self.assertEqual(len(self._pending_calls), 1) + + # We queue up work after the process function has been called, testing + # that they get correctly queued up. queue_d2 = defer.ensureDeferred(self.queue.add_to_queue("foo2")) + queue_d3 = defer.ensureDeferred(self.queue.add_to_queue("foo3")) # We should see only *one* call to `_process_queue` self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo1"]) self.assertFalse(queue_d1.called) self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + self._assert_metrics(queued=2, keys=1, in_flight=3) # Return value of the `_process_queue` should be propagated back to the # first. @@ -106,18 +170,24 @@ def test_queuing(self): self.assertEqual(self.successResultOf(queue_d1), "bar1") self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + self._assert_metrics(queued=2, keys=1, in_flight=2) # We should now see a second call to `_process_queue` self.clock.pump([0]) self.assertEqual(len(self._pending_calls), 1) - self.assertEqual(self._pending_calls[0][0], ["foo2"]) + self.assertEqual(self._pending_calls[0][0], ["foo2", "foo3"]) self.assertFalse(queue_d2.called) + self.assertFalse(queue_d3.called) + self._assert_metrics(queued=0, keys=0, in_flight=2) # Return value of the `_process_queue` should be propagated back to the # second. self._pending_calls.pop()[1].callback("bar2") self.assertEqual(self.successResultOf(queue_d2), "bar2") + self.assertEqual(self.successResultOf(queue_d3), "bar2") + self._assert_metrics(queued=0, keys=0, in_flight=0) def test_different_keys(self): """Test that calls to different keys get processed in parallel.""" @@ -140,6 +210,7 @@ def test_different_keys(self): self.assertFalse(queue_d1.called) self.assertFalse(queue_d2.called) self.assertFalse(queue_d3.called) + self._assert_metrics(queued=1, keys=1, in_flight=3) # Return value of the `_process_queue` should be propagated back to the # first. @@ -148,6 +219,7 @@ def test_different_keys(self): self.assertEqual(self.successResultOf(queue_d1), "bar1") self.assertFalse(queue_d2.called) self.assertFalse(queue_d3.called) + self._assert_metrics(queued=1, keys=1, in_flight=2) # Return value of the `_process_queue` should be propagated back to the # second. @@ -161,9 +233,11 @@ def test_different_keys(self): self.assertEqual(len(self._pending_calls), 1) self.assertEqual(self._pending_calls[0][0], ["foo3"]) self.assertFalse(queue_d3.called) + self._assert_metrics(queued=0, keys=0, in_flight=1) # Return value of the `_process_queue` should be propagated back to the # third deferred. self._pending_calls.pop()[1].callback("bar4") self.assertEqual(self.successResultOf(queue_d3), "bar4") + self._assert_metrics(queued=0, keys=0, in_flight=0) From b1bc26a909f4f9af137e72ab27952a8d2dfc1cb3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 27 May 2021 14:46:24 +0100 Subject: [PATCH 192/222] 1.35.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/10079.bugfix | 1 - synapse/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/10079.bugfix diff --git a/CHANGES.md b/CHANGES.md index 0e451f983c95..1fac16580d13 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.35.0rc2 (2021-05-27) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in v1.35.0rc1 when calling the spaces summary API via a GET request. ([\#10079](https://github.com/matrix-org/synapse/issues/10079)) + + Synapse 1.35.0rc1 (2021-05-25) ============================== diff --git a/changelog.d/10079.bugfix b/changelog.d/10079.bugfix deleted file mode 100644 index 2b93c4534abc..000000000000 --- a/changelog.d/10079.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.35.0rc1 when calling the spaces summary API via a GET request. diff --git a/synapse/__init__.py b/synapse/__init__.py index e60e9db71ef8..6faf31dbbce5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.35.0rc1" +__version__ = "1.35.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From f828a70be331105c98ebfbe3738ef57d9d54df5b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 27 May 2021 18:10:58 +0200 Subject: [PATCH 193/222] Limit the number of events sent over replication when persisting events. (#10082) --- changelog.d/10082.bugfix | 1 + synapse/handlers/federation.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 changelog.d/10082.bugfix diff --git a/changelog.d/10082.bugfix b/changelog.d/10082.bugfix new file mode 100644 index 000000000000..b4f8bcc4fa3c --- /dev/null +++ b/changelog.d/10082.bugfix @@ -0,0 +1 @@ +Fixed a bug causing replication requests to fail when receiving a lot of events via federation. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 678f6b770797..bf113152517f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -91,6 +91,7 @@ get_domain_from_id, ) from synapse.util.async_helpers import Linearizer, concurrently_execute +from synapse.util.iterutils import batch_iter from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server @@ -3053,13 +3054,15 @@ async def persist_events_and_notify( """ instance = self.config.worker.events_shard_config.get_instance(room_id) if instance != self._instance_name: - result = await self._send_events( - instance_name=instance, - store=self.store, - room_id=room_id, - event_and_contexts=event_and_contexts, - backfilled=backfilled, - ) + # Limit the number of events sent over federation. + for batch in batch_iter(event_and_contexts, 1000): + result = await self._send_events( + instance_name=instance, + store=self.store, + room_id=room_id, + event_and_contexts=batch, + backfilled=backfilled, + ) return result["max_stream_id"] else: assert self.storage.persistence From 8fb9af570f942d2057e8acb4a047d61ed7048f58 Mon Sep 17 00:00:00 2001 From: Callum Brown Date: Thu, 27 May 2021 18:42:23 +0100 Subject: [PATCH 194/222] Make reason and score optional for report_event (#10077) Implements MSC2414: https://github.com/matrix-org/matrix-doc/pull/2414 See #8551 Signed-off-by: Callum Brown --- changelog.d/10077.feature | 1 + docs/admin_api/event_reports.md | 4 +- synapse/rest/client/v2_alpha/report_event.py | 13 +-- synapse/storage/databases/main/room.py | 2 +- tests/rest/admin/test_event_reports.py | 15 +++- .../rest/client/v2_alpha/test_report_event.py | 83 +++++++++++++++++++ 6 files changed, 105 insertions(+), 13 deletions(-) create mode 100644 changelog.d/10077.feature create mode 100644 tests/rest/client/v2_alpha/test_report_event.py diff --git a/changelog.d/10077.feature b/changelog.d/10077.feature new file mode 100644 index 000000000000..808feb22150a --- /dev/null +++ b/changelog.d/10077.feature @@ -0,0 +1 @@ +Make reason and score parameters optional for reporting content. Implements [MSC2414](https://github.com/matrix-org/matrix-doc/pull/2414). Contributed by Callum Brown. diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md index 0159098138ef..bfec06f75504 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md @@ -75,9 +75,9 @@ The following fields are returned in the JSON response body: * `name`: string - The name of the room. * `event_id`: string - The ID of the reported event. * `user_id`: string - This is the user who reported the event and wrote the reason. -* `reason`: string - Comment made by the `user_id` in this report. May be blank. +* `reason`: string - Comment made by the `user_id` in this report. May be blank or `null`. * `score`: integer - Content is reported based upon a negative score, where -100 is - "most offensive" and 0 is "inoffensive". + "most offensive" and 0 is "inoffensive". May be `null`. * `sender`: string - This is the ID of the user who sent the original message/event that was reported. * `canonical_alias`: string - The canonical alias of the room. `null` if the room does not diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 2c169abbf313..07ea39a8a3e4 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -16,11 +16,7 @@ from http import HTTPStatus from synapse.api.errors import Codes, SynapseError -from synapse.http.servlet import ( - RestServlet, - assert_params_in_dict, - parse_json_object_from_request, -) +from synapse.http.servlet import RestServlet, parse_json_object_from_request from ._base import client_patterns @@ -42,15 +38,14 @@ async def on_POST(self, request, room_id, event_id): user_id = requester.user.to_string() body = parse_json_object_from_request(request) - assert_params_in_dict(body, ("reason", "score")) - if not isinstance(body["reason"], str): + if not isinstance(body.get("reason", ""), str): raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'reason' must be a string", Codes.BAD_JSON, ) - if not isinstance(body["score"], int): + if not isinstance(body.get("score", 0), int): raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'score' must be an integer", @@ -61,7 +56,7 @@ async def on_POST(self, request, room_id, event_id): room_id=room_id, event_id=event_id, user_id=user_id, - reason=body["reason"], + reason=body.get("reason"), content=body, received_ts=self.clock.time_msec(), ) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 5f38634f48da..0cf450f81d73 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1498,7 +1498,7 @@ async def add_event_report( room_id: str, event_id: str, user_id: str, - reason: str, + reason: Optional[str], content: JsonDict, received_ts: int, ) -> None: diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 29341bc6e98f..f15d1cf6f7c8 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -64,7 +64,7 @@ def prepare(self, reactor, clock, hs): user_tok=self.admin_user_tok, ) for _ in range(5): - self._create_event_and_report( + self._create_event_and_report_without_parameters( room_id=self.room_id2, user_tok=self.admin_user_tok, ) @@ -378,6 +378,19 @@ def _create_event_and_report(self, room_id, user_tok): ) self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + def _create_event_and_report_without_parameters(self, room_id, user_tok): + """Create and report an event, but omit reason and score""" + resp = self.helper.send(room_id, tok=user_tok) + event_id = resp["event_id"] + + channel = self.make_request( + "POST", + "rooms/%s/report/%s" % (room_id, event_id), + json.dumps({}), + access_token=user_tok, + ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + def _check_fields(self, content): """Checks that all attributes are present in an event report""" for c in content: diff --git a/tests/rest/client/v2_alpha/test_report_event.py b/tests/rest/client/v2_alpha/test_report_event.py new file mode 100644 index 000000000000..1ec6b05e5bd2 --- /dev/null +++ b/tests/rest/client/v2_alpha/test_report_event.py @@ -0,0 +1,83 @@ +# Copyright 2021 Callum Brown +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import synapse.rest.admin +from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import report_event + +from tests import unittest + + +class ReportEventTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + room.register_servlets, + report_event.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + self.room_id = self.helper.create_room_as( + self.other_user, tok=self.other_user_tok, is_public=True + ) + self.helper.join(self.room_id, user=self.admin_user, tok=self.admin_user_tok) + resp = self.helper.send(self.room_id, tok=self.admin_user_tok) + self.event_id = resp["event_id"] + self.report_path = "rooms/{}/report/{}".format(self.room_id, self.event_id) + + def test_reason_str_and_score_int(self): + data = {"reason": "this makes me sad", "score": -100} + self._assert_status(200, data) + + def test_no_reason(self): + data = {"score": 0} + self._assert_status(200, data) + + def test_no_score(self): + data = {"reason": "this makes me sad"} + self._assert_status(200, data) + + def test_no_reason_and_no_score(self): + data = {} + self._assert_status(200, data) + + def test_reason_int_and_score_str(self): + data = {"reason": 10, "score": "string"} + self._assert_status(400, data) + + def test_reason_zero_and_score_blank(self): + data = {"reason": 0, "score": ""} + self._assert_status(400, data) + + def test_reason_and_score_null(self): + data = {"reason": None, "score": None} + self._assert_status(400, data) + + def _assert_status(self, response_status, data): + channel = self.make_request( + "POST", + self.report_path, + json.dumps(data), + access_token=self.other_user_tok, + ) + self.assertEqual( + response_status, int(channel.result["code"]), msg=channel.result["body"] + ) From 5eed6348ce747b26883ffe812d69ec4d0fbde5fd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 27 May 2021 22:45:43 +0100 Subject: [PATCH 195/222] Move some more endpoints off master (#10084) --- changelog.d/10084.feature | 1 + docs/workers.md | 3 +++ synapse/app/generic_worker.py | 4 ++-- synapse/rest/client/v1/room.py | 8 ++++---- 4 files changed, 10 insertions(+), 6 deletions(-) create mode 100644 changelog.d/10084.feature diff --git a/changelog.d/10084.feature b/changelog.d/10084.feature new file mode 100644 index 000000000000..602cb6ff5104 --- /dev/null +++ b/changelog.d/10084.feature @@ -0,0 +1 @@ +Add support for routing more requests to workers. diff --git a/docs/workers.md b/docs/workers.md index c6282165b085..46b5e4b7374b 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -228,6 +228,9 @@ expressions: ^/_matrix/client/(api/v1|r0|unstable)/joined_groups$ ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups$ ^/_matrix/client/(api/v1|r0|unstable)/publicised_groups/ + ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/event/ + ^/_matrix/client/(api/v1|r0|unstable)/joined_rooms$ + ^/_matrix/client/(api/v1|r0|unstable)/search$ # Registration/login requests ^/_matrix/client/(api/v1|r0|unstable)/login$ diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 91ad326f193f..57c2fc2e88b2 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -109,7 +109,7 @@ MonthlyActiveUsersWorkerStore, ) from synapse.storage.databases.main.presence import PresenceStore -from synapse.storage.databases.main.search import SearchWorkerStore +from synapse.storage.databases.main.search import SearchStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.transactions import TransactionWorkerStore from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore @@ -242,7 +242,7 @@ class GenericWorkerSlavedStore( MonthlyActiveUsersWorkerStore, MediaRepositoryStore, ServerMetricsStore, - SearchWorkerStore, + SearchStore, TransactionWorkerStore, BaseSlavedStore, ): diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index d6d55893af8a..70286b0ff7f2 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -1061,15 +1061,15 @@ def register_servlets(hs: "HomeServer", http_server, is_worker=False): RoomTypingRestServlet(hs).register(http_server) RoomEventContextServlet(hs).register(http_server) RoomSpaceSummaryRestServlet(hs).register(http_server) + RoomEventServlet(hs).register(http_server) + JoinedRoomsRestServlet(hs).register(http_server) + RoomAliasListServlet(hs).register(http_server) + SearchRestServlet(hs).register(http_server) # Some servlets only get registered for the main process. if not is_worker: RoomCreateRestServlet(hs).register(http_server) RoomForgetRestServlet(hs).register(http_server) - SearchRestServlet(hs).register(http_server) - JoinedRoomsRestServlet(hs).register(http_server) - RoomEventServlet(hs).register(http_server) - RoomAliasListServlet(hs).register(http_server) def register_deprecated_servlets(hs, http_server): From ac3e02d0892c267aea02c637df9761c5820478e6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 28 May 2021 08:19:06 -0500 Subject: [PATCH 196/222] Add `parse_strings_from_args` to get `prev_events` array (#10048) Split out from https://github.com/matrix-org/synapse/pull/9247 Strings: - `parse_string` - `parse_string_from_args` - `parse_strings_from_args` For comparison with ints: - `parse_integer` - `parse_integer_from_args` Previous discussions: - https://github.com/matrix-org/synapse/pull/9247#discussion_r573195687 - https://github.com/matrix-org/synapse/pull/9247#discussion_r574214156 - https://github.com/matrix-org/synapse/pull/9247#discussion_r573264791 Signed-off-by: Eric Eastwood --- changelog.d/10048.misc | 1 + synapse/http/servlet.py | 196 +++++++++++++++++++++++++++++++--------- 2 files changed, 154 insertions(+), 43 deletions(-) create mode 100644 changelog.d/10048.misc diff --git a/changelog.d/10048.misc b/changelog.d/10048.misc new file mode 100644 index 000000000000..a901f8431e9e --- /dev/null +++ b/changelog.d/10048.misc @@ -0,0 +1 @@ +Add `parse_strings_from_args` for parsing an array from query parameters. diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 31897546a967..3f4f2411fc6c 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -15,6 +15,9 @@ """ This module contains base REST classes for constructing REST servlets. """ import logging +from typing import Iterable, List, Optional, Union, overload + +from typing_extensions import Literal from synapse.api.errors import Codes, SynapseError from synapse.util import json_decoder @@ -107,12 +110,11 @@ def parse_boolean_from_args(args, name, default=None, required=False): def parse_string( request, - name, - default=None, - required=False, - allowed_values=None, - param_type="string", - encoding="ascii", + name: Union[bytes, str], + default: Optional[str] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: Optional[str] = "ascii", ): """ Parse a string parameter from the request query string. @@ -122,18 +124,17 @@ def parse_string( Args: request: the twisted HTTP request. - name (bytes|unicode): the name of the query parameter. - default (bytes|unicode|None): value to use if the parameter is absent, + name: the name of the query parameter. + default: value to use if the parameter is absent, defaults to None. Must be bytes if encoding is None. - required (bool): whether to raise a 400 SynapseError if the + required: whether to raise a 400 SynapseError if the parameter is absent, defaults to False. - allowed_values (list[bytes|unicode]): List of allowed values for the + allowed_values: List of allowed values for the string, or None if any value is allowed, defaults to None. Must be the same type as name, if given. - encoding (str|None): The encoding to decode the string content with. - + encoding : The encoding to decode the string content with. Returns: - bytes/unicode|None: A string value or the default. Unicode if encoding + A string value or the default. Unicode if encoding was given, bytes otherwise. Raises: @@ -142,45 +143,105 @@ def parse_string( is not one of those allowed values. """ return parse_string_from_args( - request.args, name, default, required, allowed_values, param_type, encoding + request.args, name, default, required, allowed_values, encoding ) -def parse_string_from_args( - args, - name, - default=None, - required=False, - allowed_values=None, - param_type="string", - encoding="ascii", -): +def _parse_string_value( + value: Union[str, bytes], + allowed_values: Optional[Iterable[str]], + name: str, + encoding: Optional[str], +) -> Union[str, bytes]: + if encoding: + try: + value = value.decode(encoding) + except ValueError: + raise SynapseError(400, "Query parameter %r must be %s" % (name, encoding)) + + if allowed_values is not None and value not in allowed_values: + message = "Query parameter %r must be one of [%s]" % ( + name, + ", ".join(repr(v) for v in allowed_values), + ) + raise SynapseError(400, message) + else: + return value + + +@overload +def parse_strings_from_args( + args: List[str], + name: Union[bytes, str], + default: Optional[List[str]] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: Literal[None] = None, +) -> Optional[List[bytes]]: + ... + + +@overload +def parse_strings_from_args( + args: List[str], + name: Union[bytes, str], + default: Optional[List[str]] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: str = "ascii", +) -> Optional[List[str]]: + ... + + +def parse_strings_from_args( + args: List[str], + name: Union[bytes, str], + default: Optional[List[str]] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: Optional[str] = "ascii", +) -> Optional[List[Union[bytes, str]]]: + """ + Parse a string parameter from the request query string list. + + If encoding is not None, the content of the query param will be + decoded to Unicode using the encoding, otherwise it will be encoded + + Args: + args: the twisted HTTP request.args list. + name: the name of the query parameter. + default: value to use if the parameter is absent, + defaults to None. Must be bytes if encoding is None. + required : whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + allowed_values (list[bytes|unicode]): List of allowed values for the + string, or None if any value is allowed, defaults to None. Must be + the same type as name, if given. + encoding: The encoding to decode the string content with. + + Returns: + A string value or the default. Unicode if encoding + was given, bytes otherwise. + + Raises: + SynapseError if the parameter is absent and required, or if the + parameter is present, must be one of a list of allowed values and + is not one of those allowed values. + """ if not isinstance(name, bytes): name = name.encode("ascii") if name in args: - value = args[name][0] - - if encoding: - try: - value = value.decode(encoding) - except ValueError: - raise SynapseError( - 400, "Query parameter %r must be %s" % (name, encoding) - ) - - if allowed_values is not None and value not in allowed_values: - message = "Query parameter %r must be one of [%s]" % ( - name, - ", ".join(repr(v) for v in allowed_values), - ) - raise SynapseError(400, message) - else: - return value + values = args[name] + + return [ + _parse_string_value(value, allowed_values, name=name, encoding=encoding) + for value in values + ] else: if required: - message = "Missing %s query parameter %r" % (param_type, name) + message = "Missing string query parameter %r" % (name) raise SynapseError(400, message, errcode=Codes.MISSING_PARAM) else: @@ -190,6 +251,55 @@ def parse_string_from_args( return default +def parse_string_from_args( + args: List[str], + name: Union[bytes, str], + default: Optional[str] = None, + required: bool = False, + allowed_values: Optional[Iterable[str]] = None, + encoding: Optional[str] = "ascii", +) -> Optional[Union[bytes, str]]: + """ + Parse the string parameter from the request query string list + and return the first result. + + If encoding is not None, the content of the query param will be + decoded to Unicode using the encoding, otherwise it will be encoded + + Args: + args: the twisted HTTP request.args list. + name: the name of the query parameter. + default: value to use if the parameter is absent, + defaults to None. Must be bytes if encoding is None. + required: whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + allowed_values: List of allowed values for the + string, or None if any value is allowed, defaults to None. Must be + the same type as name, if given. + encoding: The encoding to decode the string content with. + + Returns: + A string value or the default. Unicode if encoding + was given, bytes otherwise. + + Raises: + SynapseError if the parameter is absent and required, or if the + parameter is present, must be one of a list of allowed values and + is not one of those allowed values. + """ + + strings = parse_strings_from_args( + args, + name, + default=[default], + required=required, + allowed_values=allowed_values, + encoding=encoding, + ) + + return strings[0] + + def parse_json_value_from_request(request, allow_empty_body=False): """Parse a JSON value from the body of a twisted HTTP request. @@ -215,7 +325,7 @@ def parse_json_value_from_request(request, allow_empty_body=False): try: content = json_decoder.decode(content_bytes.decode("utf-8")) except Exception as e: - logger.warning("Unable to parse JSON: %s", e) + logger.warning("Unable to parse JSON: %s (%s)", e, content_bytes) raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON) return content From 3f96dbbda7696bf1e2d6ec93ce66bbfece8fdc91 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 May 2021 15:57:53 +0100 Subject: [PATCH 197/222] Log method and path when dropping request due to size limit (#10091) --- changelog.d/10091.misc | 1 + synapse/http/site.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10091.misc diff --git a/changelog.d/10091.misc b/changelog.d/10091.misc new file mode 100644 index 000000000000..dbe310fd17a1 --- /dev/null +++ b/changelog.d/10091.misc @@ -0,0 +1 @@ +Log method and path when dropping request due to size limit. diff --git a/synapse/http/site.py b/synapse/http/site.py index 671fd3fbcc29..40754b7bea7e 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -105,8 +105,10 @@ def handleContentChunk(self, data): assert self.content, "handleContentChunk() called before gotLength()" if self.content.tell() + len(data) > self._max_request_body_size: logger.warning( - "Aborting connection from %s because the request exceeds maximum size", + "Aborting connection from %s because the request exceeds maximum size: %s %s", self.client, + self.get_method(), + self.get_redacted_uri(), ) self.transport.abortConnection() return From ed53bf314fee25d79d349beae409caf81a2d677f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 28 May 2021 16:14:08 +0100 Subject: [PATCH 198/222] Set opentracing priority before setting other tags (#10092) ... because tags on spans which aren't being sampled get thrown away. --- changelog.d/10092.bugfix | 1 + synapse/api/auth.py | 8 +++---- synapse/federation/transport/server.py | 3 ++- synapse/logging/opentracing.py | 21 +++++++++++++++---- synapse/metrics/background_process_metrics.py | 10 +++++++-- 5 files changed, 32 insertions(+), 11 deletions(-) create mode 100644 changelog.d/10092.bugfix diff --git a/changelog.d/10092.bugfix b/changelog.d/10092.bugfix new file mode 100644 index 000000000000..09b2aba7ffa0 --- /dev/null +++ b/changelog.d/10092.bugfix @@ -0,0 +1 @@ +Fix a bug in the `force_tracing_for_users` option introduced in Synapse v1.35 which meant that the OpenTracing spans produced were missing most tags. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 458306eba5ec..26a3b38918b5 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -206,11 +206,11 @@ async def get_user_by_req( requester = create_requester(user_id, app_service=app_service) request.requester = user_id + if user_id in self._force_tracing_for_users: + opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) opentracing.set_tag("authenticated_entity", user_id) opentracing.set_tag("user_id", user_id) opentracing.set_tag("appservice_id", app_service.id) - if user_id in self._force_tracing_for_users: - opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) return requester @@ -259,12 +259,12 @@ async def get_user_by_req( ) request.requester = requester + if user_info.token_owner in self._force_tracing_for_users: + opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) opentracing.set_tag("authenticated_entity", user_info.token_owner) opentracing.set_tag("user_id", user_info.user_id) if device_id: opentracing.set_tag("device_id", device_id) - if user_info.token_owner in self._force_tracing_for_users: - opentracing.set_tag(opentracing.tags.SAMPLING_PRIORITY, 1) return requester except KeyError: diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 59e0a434dce8..fdeaa0f37c65 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -37,6 +37,7 @@ ) from synapse.logging.context import run_in_background from synapse.logging.opentracing import ( + SynapseTags, start_active_span, start_active_span_from_request, tags, @@ -314,7 +315,7 @@ async def new_func(request, *args, **kwargs): raise request_tags = { - "request_id": request.get_request_id(), + SynapseTags.REQUEST_ID: request.get_request_id(), tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index fba2fa390434..428831dad6ad 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -265,6 +265,12 @@ class SynapseTags: # Whether the sync response has new data to be returned to the client. SYNC_RESULT = "sync.new_data" + # incoming HTTP request ID (as written in the logs) + REQUEST_ID = "request_id" + + # HTTP request tag (used to distinguish full vs incremental syncs, etc) + REQUEST_TAG = "request_tag" + # Block everything by default # A regex which matches the server_names to expose traces for. @@ -824,7 +830,7 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): return request_tags = { - "request_id": request.get_request_id(), + SynapseTags.REQUEST_ID: request.get_request_id(), tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER, tags.HTTP_METHOD: request.get_method(), tags.HTTP_URL: request.get_redacted_uri(), @@ -833,9 +839,9 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): request_name = request.request_metrics.name if extract_context: - scope = start_active_span_from_request(request, request_name, tags=request_tags) + scope = start_active_span_from_request(request, request_name) else: - scope = start_active_span(request_name, tags=request_tags) + scope = start_active_span(request_name) with scope: try: @@ -845,4 +851,11 @@ def trace_servlet(request: "SynapseRequest", extract_context: bool = False): # with JsonResource). scope.span.set_operation_name(request.request_metrics.name) - scope.span.set_tag("request_tag", request.request_metrics.start_context.tag) + # set the tags *after* the servlet completes, in case it decided to + # prioritise the span (tags will get dropped on unprioritised spans) + request_tags[ + SynapseTags.REQUEST_TAG + ] = request.request_metrics.start_context.tag + + for k, v in request_tags.items(): + scope.span.set_tag(k, v) diff --git a/synapse/metrics/background_process_metrics.py b/synapse/metrics/background_process_metrics.py index 714caf84c379..0d6d643d3527 100644 --- a/synapse/metrics/background_process_metrics.py +++ b/synapse/metrics/background_process_metrics.py @@ -22,7 +22,11 @@ from twisted.internet import defer from synapse.logging.context import LoggingContext, PreserveLoggingContext -from synapse.logging.opentracing import noop_context_manager, start_active_span +from synapse.logging.opentracing import ( + SynapseTags, + noop_context_manager, + start_active_span, +) from synapse.util.async_helpers import maybe_awaitable if TYPE_CHECKING: @@ -202,7 +206,9 @@ async def run(): try: ctx = noop_context_manager() if bg_start_span: - ctx = start_active_span(desc, tags={"request_id": str(context)}) + ctx = start_active_span( + desc, tags={SynapseTags.REQUEST_ID: str(context)} + ) with ctx: return await maybe_awaitable(func(*args, **kwargs)) except Exception: From 84cf3e47a0318aba51d9f830d5e724182c5d93c4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 May 2021 16:28:01 +0100 Subject: [PATCH 199/222] Allow response of `/send_join` to be larger. (#10093) Fixes #10087. --- changelog.d/10093.bugfix | 1 + synapse/federation/transport/client.py | 7 +++++++ synapse/http/matrixfederationclient.py | 14 +++++++++++++- 3 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10093.bugfix diff --git a/changelog.d/10093.bugfix b/changelog.d/10093.bugfix new file mode 100644 index 000000000000..e50de4b2ea2c --- /dev/null +++ b/changelog.d/10093.bugfix @@ -0,0 +1 @@ +Fix HTTP response size limit to allow joining very large rooms over federation. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index e93ab83f7ff4..5b4f5d17f774 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -35,6 +35,11 @@ logger = logging.getLogger(__name__) +# Send join responses can be huge, so we set a separate limit here. The response +# is parsed in a streaming manner, which helps alleviate the issue of memory +# usage a bit. +MAX_RESPONSE_SIZE_SEND_JOIN = 500 * 1024 * 1024 + class TransportLayerClient: """Sends federation HTTP requests to other servers""" @@ -261,6 +266,7 @@ async def send_join_v1( path=path, data=content, parser=SendJoinParser(room_version, v1_api=True), + max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) return response @@ -276,6 +282,7 @@ async def send_join_v2( path=path, data=content, parser=SendJoinParser(room_version, v1_api=False), + max_response_size=MAX_RESPONSE_SIZE_SEND_JOIN, ) return response diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index f5503b394b37..1998990a144e 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -205,6 +205,7 @@ async def _handle_response( response: IResponse, start_ms: int, parser: ByteParser[T], + max_response_size: Optional[int] = None, ) -> T: """ Reads the body of a response with a timeout and sends it to a parser @@ -216,15 +217,20 @@ async def _handle_response( response: response to the request start_ms: Timestamp when request was made parser: The parser for the response + max_response_size: The maximum size to read from the response, if None + uses the default. Returns: The parsed response """ + if max_response_size is None: + max_response_size = MAX_RESPONSE_SIZE + try: check_content_type_is(response.headers, parser.CONTENT_TYPE) - d = read_body_with_max_size(response, parser, MAX_RESPONSE_SIZE) + d = read_body_with_max_size(response, parser, max_response_size) d = timeout_deferred(d, timeout=timeout_sec, reactor=reactor) length = await make_deferred_yieldable(d) @@ -735,6 +741,7 @@ async def put_json( backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Literal[None] = None, + max_response_size: Optional[int] = None, ) -> Union[JsonDict, list]: ... @@ -752,6 +759,7 @@ async def put_json( backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser[T]] = None, + max_response_size: Optional[int] = None, ) -> T: ... @@ -768,6 +776,7 @@ async def put_json( backoff_on_404: bool = False, try_trailing_slash_on_400: bool = False, parser: Optional[ByteParser] = None, + max_response_size: Optional[int] = None, ): """Sends the specified json data using PUT @@ -803,6 +812,8 @@ async def put_json( enabled. parser: The parser to use to decode the response. Defaults to parsing as JSON. + max_response_size: The maximum size to read from the response, if None + uses the default. Returns: Succeeds when we get a 2xx HTTP response. The @@ -853,6 +864,7 @@ async def put_json( response, start_ms, parser=parser, + max_response_size=max_response_size, ) return body From 1641c5c707fe9cac5f68589863082409c8979da6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 May 2021 15:57:53 +0100 Subject: [PATCH 200/222] Log method and path when dropping request due to size limit (#10091) --- changelog.d/10091.misc | 1 + synapse/http/site.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/10091.misc diff --git a/changelog.d/10091.misc b/changelog.d/10091.misc new file mode 100644 index 000000000000..dbe310fd17a1 --- /dev/null +++ b/changelog.d/10091.misc @@ -0,0 +1 @@ +Log method and path when dropping request due to size limit. diff --git a/synapse/http/site.py b/synapse/http/site.py index 671fd3fbcc29..40754b7bea7e 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -105,8 +105,10 @@ def handleContentChunk(self, data): assert self.content, "handleContentChunk() called before gotLength()" if self.content.tell() + len(data) > self._max_request_body_size: logger.warning( - "Aborting connection from %s because the request exceeds maximum size", + "Aborting connection from %s because the request exceeds maximum size: %s %s", self.client, + self.get_method(), + self.get_redacted_uri(), ) self.transport.abortConnection() return From 9408b86f5c3616e8cfaa2c183e787780a3a64f95 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 27 May 2021 18:10:58 +0200 Subject: [PATCH 201/222] Limit the number of events sent over replication when persisting events. (#10082) --- changelog.d/10082.bugfix | 1 + synapse/handlers/federation.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) create mode 100644 changelog.d/10082.bugfix diff --git a/changelog.d/10082.bugfix b/changelog.d/10082.bugfix new file mode 100644 index 000000000000..b4f8bcc4fa3c --- /dev/null +++ b/changelog.d/10082.bugfix @@ -0,0 +1 @@ +Fixed a bug causing replication requests to fail when receiving a lot of events via federation. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 678f6b770797..bf113152517f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -91,6 +91,7 @@ get_domain_from_id, ) from synapse.util.async_helpers import Linearizer, concurrently_execute +from synapse.util.iterutils import batch_iter from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_server @@ -3053,13 +3054,15 @@ async def persist_events_and_notify( """ instance = self.config.worker.events_shard_config.get_instance(room_id) if instance != self._instance_name: - result = await self._send_events( - instance_name=instance, - store=self.store, - room_id=room_id, - event_and_contexts=event_and_contexts, - backfilled=backfilled, - ) + # Limit the number of events sent over federation. + for batch in batch_iter(event_and_contexts, 1000): + result = await self._send_events( + instance_name=instance, + store=self.store, + room_id=room_id, + event_and_contexts=batch, + backfilled=backfilled, + ) return result["max_stream_id"] else: assert self.storage.persistence From 258a9a9e8bea851493fc2275ac6b81639c997afb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 May 2021 17:06:05 +0100 Subject: [PATCH 202/222] 1.35.0rc3 --- CHANGES.md | 16 ++++++++++++++++ changelog.d/10082.bugfix | 1 - changelog.d/10091.misc | 1 - changelog.d/10093.bugfix | 1 - synapse/__init__.py | 2 +- 5 files changed, 17 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/10082.bugfix delete mode 100644 changelog.d/10091.misc delete mode 100644 changelog.d/10093.bugfix diff --git a/CHANGES.md b/CHANGES.md index 1fac16580d13..8bd05c318dcf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +Synapse 1.35.0rc3 (2021-05-28) +============================== + +Bugfixes +-------- + +- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. ([\#10082](https://github.com/matrix-org/synapse/issues/10082)) +- Fix HTTP response size limit to allow joining very large rooms over federation. ([\#10093](https://github.com/matrix-org/synapse/issues/10093)) + + +Internal Changes +---------------- + +- Log method and path when dropping request due to size limit. ([\#10091](https://github.com/matrix-org/synapse/issues/10091)) + + Synapse 1.35.0rc2 (2021-05-27) ============================== diff --git a/changelog.d/10082.bugfix b/changelog.d/10082.bugfix deleted file mode 100644 index b4f8bcc4fa3c..000000000000 --- a/changelog.d/10082.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixed a bug causing replication requests to fail when receiving a lot of events via federation. diff --git a/changelog.d/10091.misc b/changelog.d/10091.misc deleted file mode 100644 index dbe310fd17a1..000000000000 --- a/changelog.d/10091.misc +++ /dev/null @@ -1 +0,0 @@ -Log method and path when dropping request due to size limit. diff --git a/changelog.d/10093.bugfix b/changelog.d/10093.bugfix deleted file mode 100644 index e50de4b2ea2c..000000000000 --- a/changelog.d/10093.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix HTTP response size limit to allow joining very large rooms over federation. diff --git a/synapse/__init__.py b/synapse/__init__.py index 6faf31dbbce5..4591246bd187 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.35.0rc2" +__version__ = "1.35.0rc3" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 4f41b711d8b37da3403ce67c88d62133f732a459 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 May 2021 17:13:57 +0100 Subject: [PATCH 203/222] CHANGELOG --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 8bd05c318dcf..7e6f478d425e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,8 +4,8 @@ Synapse 1.35.0rc3 (2021-05-28) Bugfixes -------- -- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. ([\#10082](https://github.com/matrix-org/synapse/issues/10082)) -- Fix HTTP response size limit to allow joining very large rooms over federation. ([\#10093](https://github.com/matrix-org/synapse/issues/10093)) +- Fixed a bug causing replication requests to fail when receiving a lot of events via federation. Introduced in v1.33.0. ([\#10082](https://github.com/matrix-org/synapse/issues/10082)) +- Fix HTTP response size limit to allow joining very large rooms over federation. Introduced in v1.33.0. ([\#10093](https://github.com/matrix-org/synapse/issues/10093)) Internal Changes From 10e6d2abce644d5b6d6b59516061562f54382b94 Mon Sep 17 00:00:00 2001 From: Brad Murray Date: Tue, 1 Jun 2021 03:40:26 -0400 Subject: [PATCH 204/222] Fix opentracing inject to use the SpanContext, not the Span (#10074) Signed-off-by: Brad Murray brad@beeper.com --- changelog.d/10074.misc | 1 + synapse/logging/opentracing.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/10074.misc diff --git a/changelog.d/10074.misc b/changelog.d/10074.misc new file mode 100644 index 000000000000..8dbe2cd2bcad --- /dev/null +++ b/changelog.d/10074.misc @@ -0,0 +1 @@ +Update opentracing to inject the right context into the carrier. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 428831dad6ad..f64845b80cc0 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -594,7 +594,7 @@ def inject_active_span_twisted_headers(headers, destination, check_destination=T span = opentracing.tracer.active_span carrier = {} # type: Dict[str, str] - opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): headers.addRawHeaders(key, value) @@ -631,7 +631,7 @@ def inject_active_span_byte_dict(headers, destination, check_destination=True): span = opentracing.tracer.active_span carrier = {} # type: Dict[str, str] - opentracing.tracer.inject(span, opentracing.Format.HTTP_HEADERS, carrier) + opentracing.tracer.inject(span.context, opentracing.Format.HTTP_HEADERS, carrier) for key, value in carrier.items(): headers[key.encode()] = [value.encode()] @@ -665,7 +665,7 @@ def inject_active_span_text_map(carrier, destination, check_destination=True): return opentracing.tracer.inject( - opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) @@ -687,7 +687,7 @@ def get_active_span_text_map(destination=None): carrier = {} # type: Dict[str, str] opentracing.tracer.inject( - opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) return carrier @@ -702,7 +702,7 @@ def active_span_context_as_string(): carrier = {} # type: Dict[str, str] if opentracing: opentracing.tracer.inject( - opentracing.tracer.active_span, opentracing.Format.TEXT_MAP, carrier + opentracing.tracer.active_span.context, opentracing.Format.TEXT_MAP, carrier ) return json_encoder.encode(carrier) From b4b2fd2ecee26214fa6b322bcb62bec1ea324c1a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 1 Jun 2021 12:04:47 +0100 Subject: [PATCH 205/222] add a cache to have_seen_event (#9953) Empirically, this helped my server considerably when handling gaps in Matrix HQ. The problem was that we would repeatedly call have_seen_events for the same set of (50K or so) auth_events, each of which would take many minutes to complete, even though it's only an index scan. --- changelog.d/9953.feature | 1 + changelog.d/9973.feature | 1 + changelog.d/9973.misc | 1 - synapse/handlers/federation.py | 12 ++- synapse/storage/databases/main/cache.py | 1 + .../storage/databases/main/events_worker.py | 61 ++++++++++-- .../storage/databases/main/purge_events.py | 26 ++++- tests/storage/databases/__init__.py | 13 +++ tests/storage/databases/main/__init__.py | 13 +++ .../databases/main/test_events_worker.py | 96 +++++++++++++++++++ 10 files changed, 205 insertions(+), 20 deletions(-) create mode 100644 changelog.d/9953.feature create mode 100644 changelog.d/9973.feature delete mode 100644 changelog.d/9973.misc create mode 100644 tests/storage/databases/__init__.py create mode 100644 tests/storage/databases/main/__init__.py create mode 100644 tests/storage/databases/main/test_events_worker.py diff --git a/changelog.d/9953.feature b/changelog.d/9953.feature new file mode 100644 index 000000000000..6b3d1adc707f --- /dev/null +++ b/changelog.d/9953.feature @@ -0,0 +1 @@ +Improve performance of incoming federation transactions in large rooms. diff --git a/changelog.d/9973.feature b/changelog.d/9973.feature new file mode 100644 index 000000000000..6b3d1adc707f --- /dev/null +++ b/changelog.d/9973.feature @@ -0,0 +1 @@ +Improve performance of incoming federation transactions in large rooms. diff --git a/changelog.d/9973.misc b/changelog.d/9973.misc deleted file mode 100644 index 7f22d42291b2..000000000000 --- a/changelog.d/9973.misc +++ /dev/null @@ -1 +0,0 @@ -Make `LruCache.invalidate` support tree invalidation, and remove `invalidate_many`. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index bf113152517f..49ed7cabcc8d 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -577,7 +577,9 @@ async def _get_state_for_room( # Fetch the state events from the DB, and check we have the auth events. event_map = await self.store.get_events(state_event_ids, allow_rejected=True) - auth_events_in_store = await self.store.have_seen_events(auth_event_ids) + auth_events_in_store = await self.store.have_seen_events( + room_id, auth_event_ids + ) # Check for missing events. We handle state and auth event seperately, # as we want to pull the state from the DB, but we don't for the auth @@ -610,7 +612,7 @@ async def _get_state_for_room( if missing_auth_events: auth_events_in_store = await self.store.have_seen_events( - missing_auth_events + room_id, missing_auth_events ) missing_auth_events.difference_update(auth_events_in_store) @@ -710,7 +712,7 @@ async def _get_state_after_missing_prev_event( missing_auth_events = set(auth_event_ids) - fetched_events.keys() missing_auth_events.difference_update( - await self.store.have_seen_events(missing_auth_events) + await self.store.have_seen_events(room_id, missing_auth_events) ) logger.debug("We are also missing %i auth events", len(missing_auth_events)) @@ -2475,7 +2477,7 @@ async def _update_auth_events_and_context_for_auth( # # we start by checking if they are in the store, and then try calling /event_auth/. if missing_auth: - have_events = await self.store.have_seen_events(missing_auth) + have_events = await self.store.have_seen_events(event.room_id, missing_auth) logger.debug("Events %s are in the store", have_events) missing_auth.difference_update(have_events) @@ -2494,7 +2496,7 @@ async def _update_auth_events_and_context_for_auth( return context seen_remotes = await self.store.have_seen_events( - [e.event_id for e in remote_auth_chain] + event.room_id, [e.event_id for e in remote_auth_chain] ) for e in remote_auth_chain: diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index f7872501a06c..c57ae5ef15c6 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -168,6 +168,7 @@ def _invalidate_caches_for_event( backfilled, ): self._invalidate_get_event_cache(event_id) + self.have_seen_event.invalidate((room_id, event_id)) self.get_latest_event_ids_in_room.invalidate((room_id,)) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 6963bbf7f484..403a5ddaba3e 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -22,6 +22,7 @@ Iterable, List, Optional, + Set, Tuple, overload, ) @@ -55,7 +56,7 @@ from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator from synapse.storage.util.sequence import build_sequence_generator from synapse.types import JsonDict, get_domain_from_id -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.lrucache import LruCache from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -1045,32 +1046,74 @@ async def have_events_in_timeline(self, event_ids): return {r["event_id"] for r in rows} - async def have_seen_events(self, event_ids): + async def have_seen_events( + self, room_id: str, event_ids: Iterable[str] + ) -> Set[str]: """Given a list of event ids, check if we have already processed them. + The room_id is only used to structure the cache (so that it can later be + invalidated by room_id) - there is no guarantee that the events are actually + in the room in question. + Args: - event_ids (iterable[str]): + room_id: Room we are polling + event_ids: events we are looking for Returns: set[str]: The events we have already seen. """ + res = await self._have_seen_events_dict( + (room_id, event_id) for event_id in event_ids + ) + return {eid for ((_rid, eid), have_event) in res.items() if have_event} + + @cachedList("have_seen_event", "keys") + async def _have_seen_events_dict( + self, keys: Iterable[Tuple[str, str]] + ) -> Dict[Tuple[str, str], bool]: + """Helper for have_seen_events + + Returns: + a dict {(room_id, event_id)-> bool} + """ # if the event cache contains the event, obviously we've seen it. - results = {x for x in event_ids if self._get_event_cache.contains(x)} - def have_seen_events_txn(txn, chunk): - sql = "SELECT event_id FROM events as e WHERE " + cache_results = { + (rid, eid) for (rid, eid) in keys if self._get_event_cache.contains((eid,)) + } + results = {x: True for x in cache_results} + + def have_seen_events_txn(txn, chunk: Tuple[Tuple[str, str], ...]): + # we deliberately do *not* query the database for room_id, to make the + # query an index-only lookup on `events_event_id_key`. + # + # We therefore pull the events from the database into a set... + + sql = "SELECT event_id FROM events AS e WHERE " clause, args = make_in_list_sql_clause( - txn.database_engine, "e.event_id", chunk + txn.database_engine, "e.event_id", [eid for (_rid, eid) in chunk] ) txn.execute(sql + clause, args) - results.update(row[0] for row in txn) + found_events = {eid for eid, in txn} - for chunk in batch_iter((x for x in event_ids if x not in results), 100): + # ... and then we can update the results for each row in the batch + results.update({(rid, eid): (eid in found_events) for (rid, eid) in chunk}) + + # each batch requires its own index scan, so we make the batches as big as + # possible. + for chunk in batch_iter((k for k in keys if k not in cache_results), 500): await self.db_pool.runInteraction( "have_seen_events", have_seen_events_txn, chunk ) + return results + @cached(max_entries=100000, tree=True) + async def have_seen_event(self, room_id: str, event_id: str): + # this only exists for the benefit of the @cachedList descriptor on + # _have_seen_events_dict + raise NotImplementedError() + def _get_current_state_event_counts_txn(self, txn, room_id): """ See get_current_state_event_counts. diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 8f83748b5edb..7fb7780d0ffd 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -16,14 +16,14 @@ from typing import Any, List, Set, Tuple from synapse.api.errors import SynapseError -from synapse.storage._base import SQLBaseStore +from synapse.storage.databases.main import CacheInvalidationWorkerStore from synapse.storage.databases.main.state import StateGroupWorkerStore from synapse.types import RoomStreamToken logger = logging.getLogger(__name__) -class PurgeEventsStore(StateGroupWorkerStore, SQLBaseStore): +class PurgeEventsStore(StateGroupWorkerStore, CacheInvalidationWorkerStore): async def purge_history( self, room_id: str, token: str, delete_local_events: bool ) -> Set[int]: @@ -203,8 +203,6 @@ def _purge_history_txn( "DELETE FROM event_to_state_groups " "WHERE event_id IN (SELECT event_id from events_to_purge)" ) - for event_id, _ in event_rows: - txn.call_after(self._get_state_group_for_event.invalidate, (event_id,)) # Delete all remote non-state events for table in ( @@ -283,6 +281,20 @@ def _purge_history_txn( # so make sure to keep this actually last. txn.execute("DROP TABLE events_to_purge") + for event_id, should_delete in event_rows: + self._invalidate_cache_and_stream( + txn, self._get_state_group_for_event, (event_id,) + ) + + # XXX: This is racy, since have_seen_events could be called between the + # transaction completing and the invalidation running. On the other hand, + # that's no different to calling `have_seen_events` just before the + # event is deleted from the database. + if should_delete: + self._invalidate_cache_and_stream( + txn, self.have_seen_event, (room_id, event_id) + ) + logger.info("[purge] done") return referenced_state_groups @@ -422,7 +434,11 @@ def _purge_room_txn(self, txn, room_id: str) -> List[int]: # index on them. In any case we should be clearing out 'stream' tables # periodically anyway (#5888) - # TODO: we could probably usefully do a bunch of cache invalidation here + # TODO: we could probably usefully do a bunch more cache invalidation here + + # XXX: as with purge_history, this is racy, but no worse than other races + # that already exist. + self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,)) logger.info("[purge] done") diff --git a/tests/storage/databases/__init__.py b/tests/storage/databases/__init__.py new file mode 100644 index 000000000000..c24c7ecd92c2 --- /dev/null +++ b/tests/storage/databases/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/storage/databases/main/__init__.py b/tests/storage/databases/main/__init__.py new file mode 100644 index 000000000000..c24c7ecd92c2 --- /dev/null +++ b/tests/storage/databases/main/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py new file mode 100644 index 000000000000..932970fd9ad1 --- /dev/null +++ b/tests/storage/databases/main/test_events_worker.py @@ -0,0 +1,96 @@ +# Copyright 2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json + +from synapse.logging.context import LoggingContext +from synapse.storage.databases.main.events_worker import EventsWorkerStore + +from tests import unittest + + +class HaveSeenEventsTestCase(unittest.HomeserverTestCase): + def prepare(self, reactor, clock, hs): + self.store: EventsWorkerStore = hs.get_datastore() + + # insert some test data + for rid in ("room1", "room2"): + self.get_success( + self.store.db_pool.simple_insert( + "rooms", + {"room_id": rid, "room_version": 4}, + ) + ) + + for idx, (rid, eid) in enumerate( + ( + ("room1", "event10"), + ("room1", "event11"), + ("room1", "event12"), + ("room2", "event20"), + ) + ): + self.get_success( + self.store.db_pool.simple_insert( + "events", + { + "event_id": eid, + "room_id": rid, + "topological_ordering": idx, + "stream_ordering": idx, + "type": "test", + "processed": True, + "outlier": False, + }, + ) + ) + self.get_success( + self.store.db_pool.simple_insert( + "event_json", + { + "event_id": eid, + "room_id": rid, + "json": json.dumps({"type": "test", "room_id": rid}), + "internal_metadata": "{}", + "format_version": 3, + }, + ) + ) + + def test_simple(self): + with LoggingContext(name="test") as ctx: + res = self.get_success( + self.store.have_seen_events("room1", ["event10", "event19"]) + ) + self.assertEquals(res, {"event10"}) + + # that should result in a single db query + self.assertEquals(ctx.get_resource_usage().db_txn_count, 1) + + # a second lookup of the same events should cause no queries + with LoggingContext(name="test") as ctx: + res = self.get_success( + self.store.have_seen_events("room1", ["event10", "event19"]) + ) + self.assertEquals(res, {"event10"}) + self.assertEquals(ctx.get_resource_usage().db_txn_count, 0) + + def test_query_via_event_cache(self): + # fetch an event into the event cache + self.get_success(self.store.get_event("event10")) + + # looking it up should now cause no db hits + with LoggingContext(name="test") as ctx: + res = self.get_success(self.store.have_seen_events("room1", ["event10"])) + self.assertEquals(res, {"event10"}) + self.assertEquals(ctx.get_resource_usage().db_txn_count, 0) From 408ecf8ece397bcf08564031379b461d5c9b0de5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 1 Jun 2021 13:19:50 +0100 Subject: [PATCH 206/222] Announce deprecation of experimental `msc2858_enabled` option. (#10101) c.f. https://github.com/matrix-org/synapse/pull/9617 and https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md Fixes #9627. --- changelog.d/10101.removal | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/10101.removal diff --git a/changelog.d/10101.removal b/changelog.d/10101.removal new file mode 100644 index 000000000000..f2020e9ddf38 --- /dev/null +++ b/changelog.d/10101.removal @@ -0,0 +1 @@ +The core Synapse development team plan to drop support for the [unstable API of MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option, in August 2021. Client authors should ensure that their clients are updated to use the stable API (which has been supported since Synapse 1.30) well before that time, to give their users time to upgrade. From a8372ad591e07fa76e194a22732a5301d9e55b6f Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 1 Jun 2021 13:23:55 +0100 Subject: [PATCH 207/222] 1.35.0 --- CHANGES.md | 9 +++++++++ changelog.d/10101.removal | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/10101.removal diff --git a/CHANGES.md b/CHANGES.md index 7e6f478d425e..09f0be8e176e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.35.0 (2021-06-01) +=========================== + +Deprecations and Removals +------------------------- + +- The core Synapse development team plan to drop support for the [unstable API of MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option, in August 2021. Client authors should ensure that their clients are updated to use the stable API (which has been supported since Synapse 1.30) well before that time, to give their users time to upgrade. ([\#10101](https://github.com/matrix-org/synapse/issues/10101)) + + Synapse 1.35.0rc3 (2021-05-28) ============================== diff --git a/changelog.d/10101.removal b/changelog.d/10101.removal deleted file mode 100644 index f2020e9ddf38..000000000000 --- a/changelog.d/10101.removal +++ /dev/null @@ -1 +0,0 @@ -The core Synapse development team plan to drop support for the [unstable API of MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option, in August 2021. Client authors should ensure that their clients are updated to use the stable API (which has been supported since Synapse 1.30) well before that time, to give their users time to upgrade. diff --git a/debian/changelog b/debian/changelog index bf99ae772c9e..d5efb8ccba63 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.35.0) stable; urgency=medium + + * New synapse release 1.35.0. + + -- Synapse Packaging team Tue, 01 Jun 2021 13:23:35 +0100 + matrix-synapse-py3 (1.34.0) stable; urgency=medium * New synapse release 1.34.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4591246bd187..d9843a170860 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.35.0rc3" +__version__ = "1.35.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 08e54345b1332889cd9e88a778bc13caca7b556f Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 1 Jun 2021 13:25:18 +0100 Subject: [PATCH 208/222] Indicate that there were no functional changes since v1.35.0rc3 --- CHANGES.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 09f0be8e176e..c969fe1ebddc 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,8 @@ Synapse 1.35.0 (2021-06-01) =========================== +No changes since v1.35.0rc3. + Deprecations and Removals ------------------------- From 3fdaf4df55f52ccf283cf6b0ca73a3f98cd5e8f0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 1 Jun 2021 13:40:46 +0100 Subject: [PATCH 209/222] Merge v1.35.0rc3 into v1.35.0 due to incorrect tagging --- CHANGES.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index c969fe1ebddc..f03a53affc1a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,17 +1,13 @@ Synapse 1.35.0 (2021-06-01) =========================== -No changes since v1.35.0rc3. +Note that [the tag](https://github.com/matrix-org/synapse/releases/tag/v1.35.0rc3) and [docker images](https://hub.docker.com/layers/matrixdotorg/synapse/v1.35.0rc3/images/sha256-34ccc87bd99a17e2cbc0902e678b5937d16bdc1991ead097eee6096481ecf2c4?context=explore) for `v1.35.0rc3` were incorrectly built. If you are experiencing issues with either, it is recommended to upgrade to the equivalent tag or docker image for the `v1.35.0` release. Deprecations and Removals ------------------------- - The core Synapse development team plan to drop support for the [unstable API of MSC2858](https://github.com/matrix-org/matrix-doc/blob/master/proposals/2858-Multiple-SSO-Identity-Providers.md#unstable-prefix), including the undocumented `experimental.msc2858_enabled` config option, in August 2021. Client authors should ensure that their clients are updated to use the stable API (which has been supported since Synapse 1.30) well before that time, to give their users time to upgrade. ([\#10101](https://github.com/matrix-org/synapse/issues/10101)) - -Synapse 1.35.0rc3 (2021-05-28) -============================== - Bugfixes -------- From 4deaebfe00e5416b408f5822b521fc9c55f09494 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Jun 2021 15:48:17 +0100 Subject: [PATCH 210/222] Make /sync do less state res (#10102) --- changelog.d/10102.misc | 1 + synapse/handlers/sync.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10102.misc diff --git a/changelog.d/10102.misc b/changelog.d/10102.misc new file mode 100644 index 000000000000..87672ee295c7 --- /dev/null +++ b/changelog.d/10102.misc @@ -0,0 +1 @@ +Make `/sync` do fewer state resolutions. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0fcc1532da8d..069ffc76f73b 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -463,7 +463,7 @@ async def _load_filtered_recents( # ensure that we always include current state in the timeline current_state_ids = frozenset() # type: FrozenSet[str] if any(e.is_state() for e in recents): - current_state_ids_map = await self.state.get_current_state_ids( + current_state_ids_map = await self.store.get_current_state_ids( room_id ) current_state_ids = frozenset(current_state_ids_map.values()) @@ -523,7 +523,7 @@ async def _load_filtered_recents( # ensure that we always include current state in the timeline current_state_ids = frozenset() if any(e.is_state() for e in loaded_recents): - current_state_ids_map = await self.state.get_current_state_ids( + current_state_ids_map = await self.store.get_current_state_ids( room_id ) current_state_ids = frozenset(current_state_ids_map.values()) From 3cf6b34b4e203dcda803ab3ac88c9dadc591e4a1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Jun 2021 11:31:41 -0400 Subject: [PATCH 211/222] Do not show invite-only rooms in spaces summary (unless joined/invited). (#10109) --- changelog.d/10109.bugfix | 1 + synapse/handlers/space_summary.py | 19 +++++++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 changelog.d/10109.bugfix diff --git a/changelog.d/10109.bugfix b/changelog.d/10109.bugfix new file mode 100644 index 000000000000..bc41bf9e5e3c --- /dev/null +++ b/changelog.d/10109.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to users in a space who were not invited. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index abd9ddecca20..046dba6fd87a 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -26,7 +26,6 @@ HistoryVisibility, Membership, ) -from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.events.utils import format_event_for_client_v2 from synapse.types import JsonDict @@ -456,16 +455,16 @@ async def _is_room_accessible( return True # Otherwise, check if they should be allowed access via membership in a space. - try: - await self._event_auth_handler.check_restricted_join_rules( - state_ids, room_version, requester, member_event + if self._event_auth_handler.has_restricted_join_rules( + state_ids, room_version + ): + allowed_spaces = ( + await self._event_auth_handler.get_spaces_that_allow_join(state_ids) ) - except AuthError: - # The user doesn't have access due to spaces, but might have access - # another way. Keep trying. - pass - else: - return True + if await self._event_auth_handler.is_user_in_rooms( + allowed_spaces, requester + ): + return True # If this is a request over federation, check if the host is in the room or # is in one of the spaces specified via the join rules. From fc3d2dc269a79e0404d0a9867e5042354d59147f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 2 Jun 2021 16:37:59 +0100 Subject: [PATCH 212/222] Rewrite the KeyRing (#10035) --- changelog.d/10035.feature | 1 + synapse/crypto/keyring.py | 642 ++++++++---------- synapse/federation/transport/server.py | 4 +- synapse/groups/attestations.py | 4 +- synapse/rest/key/v2/remote_key_resource.py | 9 +- tests/crypto/test_keyring.py | 170 ++--- tests/rest/key/v2/test_remote_key_resource.py | 18 +- tests/util/test_batching_queue.py | 37 +- 8 files changed, 393 insertions(+), 492 deletions(-) create mode 100644 changelog.d/10035.feature diff --git a/changelog.d/10035.feature b/changelog.d/10035.feature new file mode 100644 index 000000000000..68052b5a7eaa --- /dev/null +++ b/changelog.d/10035.feature @@ -0,0 +1 @@ +Rewrite logic around verifying JSON object and fetching server keys to be more performant and use less memory. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6fc07129788f..c840ffca7141 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -16,8 +16,7 @@ import abc import logging import urllib -from collections import defaultdict -from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Tuple import attr from signedjson.key import ( @@ -44,17 +43,12 @@ from synapse.config.key import TrustedKeyServer from synapse.events import EventBase from synapse.events.utils import prune_event_dict -from synapse.logging.context import ( - PreserveLoggingContext, - make_deferred_yieldable, - preserve_fn, - run_in_background, -) +from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.storage.keys import FetchKeyResult from synapse.types import JsonDict from synapse.util import unwrapFirstError from synapse.util.async_helpers import yieldable_gather_results -from synapse.util.metrics import Measure +from synapse.util.batching_queue import BatchingQueue from synapse.util.retryutils import NotRetryingDestination if TYPE_CHECKING: @@ -80,32 +74,19 @@ class VerifyJsonRequest: minimum_valid_until_ts: time at which we require the signing key to be valid. (0 implies we don't care) - request_name: The name of the request. - key_ids: The set of key_ids to that could be used to verify the JSON object - - key_ready (Deferred[str, str, nacl.signing.VerifyKey]): - A deferred (server_name, key_id, verify_key) tuple that resolves when - a verify key has been fetched. The deferreds' callbacks are run with no - logcontext. - - If we are unable to find a key which satisfies the request, the deferred - errbacks with an M_UNAUTHORIZED SynapseError. """ server_name = attr.ib(type=str) get_json_object = attr.ib(type=Callable[[], JsonDict]) minimum_valid_until_ts = attr.ib(type=int) - request_name = attr.ib(type=str) key_ids = attr.ib(type=List[str]) - key_ready = attr.ib(default=attr.Factory(defer.Deferred), type=defer.Deferred) @staticmethod def from_json_object( server_name: str, json_object: JsonDict, minimum_valid_until_ms: int, - request_name: str, ): """Create a VerifyJsonRequest to verify all signatures on a signed JSON object for the given server. @@ -115,7 +96,6 @@ def from_json_object( server_name, lambda: json_object, minimum_valid_until_ms, - request_name=request_name, key_ids=key_ids, ) @@ -135,16 +115,48 @@ def from_event( # memory than the Event object itself. lambda: prune_event_dict(event.room_version, event.get_pdu_json()), minimum_valid_until_ms, - request_name=event.event_id, key_ids=key_ids, ) + def to_fetch_key_request(self) -> "_FetchKeyRequest": + """Create a key fetch request for all keys needed to satisfy the + verification request. + """ + return _FetchKeyRequest( + server_name=self.server_name, + minimum_valid_until_ts=self.minimum_valid_until_ts, + key_ids=self.key_ids, + ) + class KeyLookupError(ValueError): pass +@attr.s(slots=True) +class _FetchKeyRequest: + """A request for keys for a given server. + + We will continue to try and fetch until we have all the keys listed under + `key_ids` (with an appropriate `valid_until_ts` property) or we run out of + places to fetch keys from. + + Attributes: + server_name: The name of the server that owns the keys. + minimum_valid_until_ts: The timestamp which the keys must be valid until. + key_ids: The IDs of the keys to attempt to fetch + """ + + server_name = attr.ib(type=str) + minimum_valid_until_ts = attr.ib(type=int) + key_ids = attr.ib(type=List[str]) + + class Keyring: + """Handles verifying signed JSON objects and fetching the keys needed to do + so. + """ + def __init__( self, hs: "HomeServer", key_fetchers: "Optional[Iterable[KeyFetcher]]" = None ): @@ -158,22 +170,22 @@ def __init__( ) self._key_fetchers = key_fetchers - # map from server name to Deferred. Has an entry for each server with - # an ongoing key download; the Deferred completes once the download - # completes. - # - # These are regular, logcontext-agnostic Deferreds. - self.key_downloads = {} # type: Dict[str, defer.Deferred] + self._server_queue = BatchingQueue( + "keyring_server", + clock=hs.get_clock(), + process_batch_callback=self._inner_fetch_key_requests, + ) # type: BatchingQueue[_FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]]] - def verify_json_for_server( + async def verify_json_for_server( self, server_name: str, json_object: JsonDict, validity_time: int, - request_name: str, - ) -> defer.Deferred: + ) -> None: """Verify that a JSON object has been signed by a given server + Completes if the the object was correctly signed, otherwise raises. + Args: server_name: name of the server which must have signed this object @@ -181,52 +193,45 @@ def verify_json_for_server( validity_time: timestamp at which we require the signing key to be valid. (0 implies we don't care) - - request_name: an identifier for this json object (eg, an event id) - for logging. - - Returns: - Deferred[None]: completes if the the object was correctly signed, otherwise - errbacks with an error """ request = VerifyJsonRequest.from_json_object( server_name, json_object, validity_time, - request_name, ) - requests = (request,) - return make_deferred_yieldable(self._verify_objects(requests)[0]) + return await self.process_request(request) def verify_json_objects_for_server( - self, server_and_json: Iterable[Tuple[str, dict, int, str]] + self, server_and_json: Iterable[Tuple[str, dict, int]] ) -> List[defer.Deferred]: """Bulk verifies signatures of json objects, bulk fetching keys as necessary. Args: server_and_json: - Iterable of (server_name, json_object, validity_time, request_name) + Iterable of (server_name, json_object, validity_time) tuples. validity_time is a timestamp at which the signing key must be valid. - request_name is an identifier for this json object (eg, an event id) - for logging. - Returns: List: for each input triplet, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. """ - return self._verify_objects( - VerifyJsonRequest.from_json_object( - server_name, json_object, validity_time, request_name + return [ + run_in_background( + self.process_request, + VerifyJsonRequest.from_json_object( + server_name, + json_object, + validity_time, + ), ) - for server_name, json_object, validity_time, request_name in server_and_json - ) + for server_name, json_object, validity_time in server_and_json + ] def verify_events_for_server( self, server_and_events: Iterable[Tuple[str, EventBase, int]] @@ -252,321 +257,223 @@ def verify_events_for_server( server_name. The deferreds run their callbacks in the sentinel logcontext. """ - return self._verify_objects( - VerifyJsonRequest.from_event(server_name, event, validity_time) + return [ + run_in_background( + self.process_request, + VerifyJsonRequest.from_event( + server_name, + event, + validity_time, + ), + ) for server_name, event, validity_time in server_and_events - ) - - def _verify_objects( - self, verify_requests: Iterable[VerifyJsonRequest] - ) -> List[defer.Deferred]: - """Does the work of verify_json_[objects_]for_server - - - Args: - verify_requests: Iterable of verification requests. + ] - Returns: - List: for each input item, a deferred indicating success - or failure to verify each json object's signature for the given - server_name. The deferreds run their callbacks in the sentinel - logcontext. + async def process_request(self, verify_request: VerifyJsonRequest) -> None: + """Processes the `VerifyJsonRequest`. Raises if the object is not signed + by the server, the signatures don't match or we failed to fetch the + necessary keys. """ - # a list of VerifyJsonRequests which are awaiting a key lookup - key_lookups = [] - handle = preserve_fn(_handle_key_deferred) - - def process(verify_request: VerifyJsonRequest) -> defer.Deferred: - """Process an entry in the request list - - Adds a key request to key_lookups, and returns a deferred which - will complete or fail (in the sentinel context) when verification completes. - """ - if not verify_request.key_ids: - return defer.fail( - SynapseError( - 400, - "Not signed by %s" % (verify_request.server_name,), - Codes.UNAUTHORIZED, - ) - ) - logger.debug( - "Verifying %s for %s with key_ids %s, min_validity %i", - verify_request.request_name, - verify_request.server_name, - verify_request.key_ids, - verify_request.minimum_valid_until_ts, + if not verify_request.key_ids: + raise SynapseError( + 400, + f"Not signed by {verify_request.server_name}", + Codes.UNAUTHORIZED, ) - # add the key request to the queue, but don't start it off yet. - key_lookups.append(verify_request) - - # now run _handle_key_deferred, which will wait for the key request - # to complete and then do the verification. - # - # We want _handle_key_request to log to the right context, so we - # wrap it with preserve_fn (aka run_in_background) - return handle(verify_request) - - results = [process(r) for r in verify_requests] - - if key_lookups: - run_in_background(self._start_key_lookups, key_lookups) - - return results - - async def _start_key_lookups( - self, verify_requests: List[VerifyJsonRequest] - ) -> None: - """Sets off the key fetches for each verify request - - Once each fetch completes, verify_request.key_ready will be resolved. - - Args: - verify_requests: - """ - - try: - # map from server name to a set of outstanding request ids - server_to_request_ids = {} # type: Dict[str, Set[int]] - - for verify_request in verify_requests: - server_name = verify_request.server_name - request_id = id(verify_request) - server_to_request_ids.setdefault(server_name, set()).add(request_id) - - # Wait for any previous lookups to complete before proceeding. - await self.wait_for_previous_lookups(server_to_request_ids.keys()) - - # take out a lock on each of the servers by sticking a Deferred in - # key_downloads - for server_name in server_to_request_ids.keys(): - self.key_downloads[server_name] = defer.Deferred() - logger.debug("Got key lookup lock on %s", server_name) - - # When we've finished fetching all the keys for a given server_name, - # drop the lock by resolving the deferred in key_downloads. - def drop_server_lock(server_name): - d = self.key_downloads.pop(server_name) - d.callback(None) - - def lookup_done(res, verify_request): - server_name = verify_request.server_name - server_requests = server_to_request_ids[server_name] - server_requests.remove(id(verify_request)) - - # if there are no more requests for this server, we can drop the lock. - if not server_requests: - logger.debug("Releasing key lookup lock on %s", server_name) - drop_server_lock(server_name) - - return res + # Add the keys we need to verify to the queue for retrieval. We queue + # up requests for the same server so we don't end up with many in flight + # requests for the same keys. + key_request = verify_request.to_fetch_key_request() + found_keys_by_server = await self._server_queue.add_to_queue( + key_request, key=verify_request.server_name + ) - for verify_request in verify_requests: - verify_request.key_ready.addBoth(lookup_done, verify_request) + # Since we batch up requests the returned set of keys may contain keys + # from other servers, so we pull out only the ones we care about.s + found_keys = found_keys_by_server.get(verify_request.server_name, {}) - # Actually start fetching keys. - self._get_server_verify_keys(verify_requests) - except Exception: - logger.exception("Error starting key lookups") + # Verify each signature we got valid keys for, raising if we can't + # verify any of them. + verified = False + for key_id in verify_request.key_ids: + key_result = found_keys.get(key_id) + if not key_result: + continue - async def wait_for_previous_lookups(self, server_names: Iterable[str]) -> None: - """Waits for any previous key lookups for the given servers to finish. + if key_result.valid_until_ts < verify_request.minimum_valid_until_ts: + continue - Args: - server_names: list of servers which we want to look up + verify_key = key_result.verify_key + json_object = verify_request.get_json_object() + try: + verify_signed_json( + json_object, + verify_request.server_name, + verify_key, + ) + verified = True + except SignatureVerifyException as e: + logger.debug( + "Error verifying signature for %s:%s:%s with key %s: %s", + verify_request.server_name, + verify_key.alg, + verify_key.version, + encode_verify_key_base64(verify_key), + str(e), + ) + raise SynapseError( + 401, + "Invalid signature for server %s with key %s:%s: %s" + % ( + verify_request.server_name, + verify_key.alg, + verify_key.version, + str(e), + ), + Codes.UNAUTHORIZED, + ) - Returns: - Resolves once all key lookups for the given servers have - completed. Follows the synapse rules of logcontext preservation. - """ - loop_count = 1 - while True: - wait_on = [ - (server_name, self.key_downloads[server_name]) - for server_name in server_names - if server_name in self.key_downloads - ] - if not wait_on: - break - logger.info( - "Waiting for existing lookups for %s to complete [loop %i]", - [w[0] for w in wait_on], - loop_count, + if not verified: + raise SynapseError( + 401, + f"Failed to find any key to satisfy: {key_request}", + Codes.UNAUTHORIZED, ) - with PreserveLoggingContext(): - await defer.DeferredList((w[1] for w in wait_on)) - loop_count += 1 + async def _inner_fetch_key_requests( + self, requests: List[_FetchKeyRequest] + ) -> Dict[str, Dict[str, FetchKeyResult]]: + """Processing function for the queue of `_FetchKeyRequest`.""" + + logger.debug("Starting fetch for %s", requests) + + # First we need to deduplicate requests for the same key. We do this by + # taking the *maximum* requested `minimum_valid_until_ts` for each pair + # of server name/key ID. + server_to_key_to_ts = {} # type: Dict[str, Dict[str, int]] + for request in requests: + by_server = server_to_key_to_ts.setdefault(request.server_name, {}) + for key_id in request.key_ids: + existing_ts = by_server.get(key_id, 0) + by_server[key_id] = max(request.minimum_valid_until_ts, existing_ts) + + deduped_requests = [ + _FetchKeyRequest(server_name, minimum_valid_ts, [key_id]) + for server_name, by_server in server_to_key_to_ts.items() + for key_id, minimum_valid_ts in by_server.items() + ] + + logger.debug("Deduplicated key requests to %s", deduped_requests) + + # For each key we call `_inner_verify_request` which will handle + # fetching each key. Note these shouldn't throw if we fail to contact + # other servers etc. + results_per_request = await yieldable_gather_results( + self._inner_fetch_key_request, + deduped_requests, + ) - def _get_server_verify_keys(self, verify_requests: List[VerifyJsonRequest]) -> None: - """Tries to find at least one key for each verify request + # We now convert the returned list of results into a map from server + # name to key ID to FetchKeyResult, to return. + to_return = {} # type: Dict[str, Dict[str, FetchKeyResult]] + for (request, results) in zip(deduped_requests, results_per_request): + to_return_by_server = to_return.setdefault(request.server_name, {}) + for key_id, key_result in results.items(): + existing = to_return_by_server.get(key_id) + if not existing or existing.valid_until_ts < key_result.valid_until_ts: + to_return_by_server[key_id] = key_result - For each verify_request, verify_request.key_ready is called back with - params (server_name, key_id, VerifyKey) if a key is found, or errbacked - with a SynapseError if none of the keys are found. + return to_return - Args: - verify_requests: list of verify requests + async def _inner_fetch_key_request( + self, verify_request: _FetchKeyRequest + ) -> Dict[str, FetchKeyResult]: + """Attempt to fetch the given key by calling each key fetcher one by + one. """ + logger.debug("Starting fetch for %s", verify_request) - remaining_requests = {rq for rq in verify_requests if not rq.key_ready.called} + found_keys: Dict[str, FetchKeyResult] = {} + missing_key_ids = set(verify_request.key_ids) - async def do_iterations(): - try: - with Measure(self.clock, "get_server_verify_keys"): - for f in self._key_fetchers: - if not remaining_requests: - return - await self._attempt_key_fetches_with_fetcher( - f, remaining_requests - ) - - # look for any requests which weren't satisfied - while remaining_requests: - verify_request = remaining_requests.pop() - rq_str = ( - "VerifyJsonRequest(server=%s, key_ids=%s, min_valid=%i)" - % ( - verify_request.server_name, - verify_request.key_ids, - verify_request.minimum_valid_until_ts, - ) - ) - - # If we run the errback immediately, it may cancel our - # loggingcontext while we are still in it, so instead we - # schedule it for the next time round the reactor. - # - # (this also ensures that we don't get a stack overflow if we - # has a massive queue of lookups waiting for this server). - self.clock.call_later( - 0, - verify_request.key_ready.errback, - SynapseError( - 401, - "Failed to find any key to satisfy %s" % (rq_str,), - Codes.UNAUTHORIZED, - ), - ) - except Exception as err: - # we don't really expect to get here, because any errors should already - # have been caught and logged. But if we do, let's log the error and make - # sure that all of the deferreds are resolved. - logger.error("Unexpected error in _get_server_verify_keys: %s", err) - with PreserveLoggingContext(): - for verify_request in remaining_requests: - if not verify_request.key_ready.called: - verify_request.key_ready.errback(err) - - run_in_background(do_iterations) - - async def _attempt_key_fetches_with_fetcher( - self, fetcher: "KeyFetcher", remaining_requests: Set[VerifyJsonRequest] - ): - """Use a key fetcher to attempt to satisfy some key requests + for fetcher in self._key_fetchers: + if not missing_key_ids: + break - Args: - fetcher: fetcher to use to fetch the keys - remaining_requests: outstanding key requests. - Any successfully-completed requests will be removed from the list. - """ - # The keys to fetch. - # server_name -> key_id -> min_valid_ts - missing_keys = defaultdict(dict) # type: Dict[str, Dict[str, int]] - - for verify_request in remaining_requests: - # any completed requests should already have been removed - assert not verify_request.key_ready.called - keys_for_server = missing_keys[verify_request.server_name] - - for key_id in verify_request.key_ids: - # If we have several requests for the same key, then we only need to - # request that key once, but we should do so with the greatest - # min_valid_until_ts of the requests, so that we can satisfy all of - # the requests. - keys_for_server[key_id] = max( - keys_for_server.get(key_id, -1), - verify_request.minimum_valid_until_ts, - ) + logger.debug("Getting keys from %s for %s", fetcher, verify_request) + keys = await fetcher.get_keys( + verify_request.server_name, + list(missing_key_ids), + verify_request.minimum_valid_until_ts, + ) - results = await fetcher.get_keys(missing_keys) + for key_id, key in keys.items(): + if not key: + continue - completed = [] - for verify_request in remaining_requests: - server_name = verify_request.server_name + # If we already have a result for the given key ID we keep the + # one with the highest `valid_until_ts`. + existing_key = found_keys.get(key_id) + if existing_key: + if key.valid_until_ts <= existing_key.valid_until_ts: + continue - # see if any of the keys we got this time are sufficient to - # complete this VerifyJsonRequest. - result_keys = results.get(server_name, {}) - for key_id in verify_request.key_ids: - fetch_key_result = result_keys.get(key_id) - if not fetch_key_result: - # we didn't get a result for this key - continue + # We always store the returned key even if it doesn't the + # `minimum_valid_until_ts` requirement, as some verification + # requests may still be able to be satisfied by it. + # + # We still keep looking for the key from other fetchers in that + # case though. + found_keys[key_id] = key - if ( - fetch_key_result.valid_until_ts - < verify_request.minimum_valid_until_ts - ): - # key was not valid at this point + if key.valid_until_ts < verify_request.minimum_valid_until_ts: continue - # we have a valid key for this request. If we run the callback - # immediately, it may cancel our loggingcontext while we are still in - # it, so instead we schedule it for the next time round the reactor. - # - # (this also ensures that we don't get a stack overflow if we had - # a massive queue of lookups waiting for this server). - logger.debug( - "Found key %s:%s for %s", - server_name, - key_id, - verify_request.request_name, - ) - self.clock.call_later( - 0, - verify_request.key_ready.callback, - (server_name, key_id, fetch_key_result.verify_key), - ) - completed.append(verify_request) - break + missing_key_ids.discard(key_id) - remaining_requests.difference_update(completed) + return found_keys class KeyFetcher(metaclass=abc.ABCMeta): - @abc.abstractmethod + def __init__(self, hs: "HomeServer"): + self._queue = BatchingQueue( + self.__class__.__name__, hs.get_clock(), self._fetch_keys + ) + async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] - ) -> Dict[str, Dict[str, FetchKeyResult]]: - """ - Args: - keys_to_fetch: - the keys to be fetched. server_name -> key_id -> min_valid_ts + self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + results = await self._queue.add_to_queue( + _FetchKeyRequest( + server_name=server_name, + key_ids=key_ids, + minimum_valid_until_ts=minimum_valid_until_ts, + ) + ) + return results.get(server_name, {}) - Returns: - Map from server_name -> key_id -> FetchKeyResult - """ - raise NotImplementedError + @abc.abstractmethod + async def _fetch_keys( + self, keys_to_fetch: List[_FetchKeyRequest] + ) -> Dict[str, Dict[str, FetchKeyResult]]: + pass class StoreKeyFetcher(KeyFetcher): """KeyFetcher impl which fetches keys from our data store""" def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastore() + super().__init__(hs) - async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] - ) -> Dict[str, Dict[str, FetchKeyResult]]: - """see KeyFetcher.get_keys""" + self.store = hs.get_datastore() + async def _fetch_keys(self, keys_to_fetch: List[_FetchKeyRequest]): key_ids_to_fetch = ( - (server_name, key_id) - for server_name, keys_for_server in keys_to_fetch.items() - for key_id in keys_for_server.keys() + (queue_value.server_name, key_id) + for queue_value in keys_to_fetch + for key_id in queue_value.key_ids ) res = await self.store.get_server_verify_keys(key_ids_to_fetch) @@ -578,6 +485,8 @@ async def get_keys( class BaseV2KeyFetcher(KeyFetcher): def __init__(self, hs: "HomeServer"): + super().__init__(hs) + self.store = hs.get_datastore() self.config = hs.config @@ -685,10 +594,10 @@ def __init__(self, hs: "HomeServer"): self.client = hs.get_federation_http_client() self.key_servers = self.config.key_servers - async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] + async def _fetch_keys( + self, keys_to_fetch: List[_FetchKeyRequest] ) -> Dict[str, Dict[str, FetchKeyResult]]: - """see KeyFetcher.get_keys""" + """see KeyFetcher._fetch_keys""" async def get_key(key_server: TrustedKeyServer) -> Dict: try: @@ -724,12 +633,12 @@ async def get_key(key_server: TrustedKeyServer) -> Dict: return union_of_keys async def get_server_verify_key_v2_indirect( - self, keys_to_fetch: Dict[str, Dict[str, int]], key_server: TrustedKeyServer + self, keys_to_fetch: List[_FetchKeyRequest], key_server: TrustedKeyServer ) -> Dict[str, Dict[str, FetchKeyResult]]: """ Args: keys_to_fetch: - the keys to be fetched. server_name -> key_id -> min_valid_ts + the keys to be fetched. key_server: notary server to query for the keys @@ -743,7 +652,7 @@ async def get_server_verify_key_v2_indirect( perspective_name = key_server.server_name logger.info( "Requesting keys %s from notary server %s", - keys_to_fetch.items(), + keys_to_fetch, perspective_name, ) @@ -753,11 +662,13 @@ async def get_server_verify_key_v2_indirect( path="/_matrix/key/v2/query", data={ "server_keys": { - server_name: { - key_id: {"minimum_valid_until_ts": min_valid_ts} - for key_id, min_valid_ts in server_keys.items() + queue_value.server_name: { + key_id: { + "minimum_valid_until_ts": queue_value.minimum_valid_until_ts, + } + for key_id in queue_value.key_ids } - for server_name, server_keys in keys_to_fetch.items() + for queue_value in keys_to_fetch } }, ) @@ -858,7 +769,20 @@ def __init__(self, hs: "HomeServer"): self.client = hs.get_federation_http_client() async def get_keys( - self, keys_to_fetch: Dict[str, Dict[str, int]] + self, server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + results = await self._queue.add_to_queue( + _FetchKeyRequest( + server_name=server_name, + key_ids=key_ids, + minimum_valid_until_ts=minimum_valid_until_ts, + ), + key=server_name, + ) + return results.get(server_name, {}) + + async def _fetch_keys( + self, keys_to_fetch: List[_FetchKeyRequest] ) -> Dict[str, Dict[str, FetchKeyResult]]: """ Args: @@ -871,8 +795,10 @@ async def get_keys( results = {} - async def get_key(key_to_fetch_item: Tuple[str, Dict[str, int]]) -> None: - server_name, key_ids = key_to_fetch_item + async def get_key(key_to_fetch_item: _FetchKeyRequest) -> None: + server_name = key_to_fetch_item.server_name + key_ids = key_to_fetch_item.key_ids + try: keys = await self.get_server_verify_key_v2_direct(server_name, key_ids) results[server_name] = keys @@ -883,7 +809,7 @@ async def get_key(key_to_fetch_item: Tuple[str, Dict[str, int]]) -> None: except Exception: logger.exception("Error getting keys %s from %s", key_ids, server_name) - await yieldable_gather_results(get_key, keys_to_fetch.items()) + await yieldable_gather_results(get_key, keys_to_fetch) return results async def get_server_verify_key_v2_direct( @@ -955,37 +881,3 @@ async def get_server_verify_key_v2_direct( keys.update(response_keys) return keys - - -async def _handle_key_deferred(verify_request: VerifyJsonRequest) -> None: - """Waits for the key to become available, and then performs a verification - - Args: - verify_request: - - Raises: - SynapseError if there was a problem performing the verification - """ - server_name = verify_request.server_name - with PreserveLoggingContext(): - _, key_id, verify_key = await verify_request.key_ready - - json_object = verify_request.get_json_object() - - try: - verify_signed_json(json_object, server_name, verify_key) - except SignatureVerifyException as e: - logger.debug( - "Error verifying signature for %s:%s:%s with key %s: %s", - server_name, - verify_key.alg, - verify_key.version, - encode_verify_key_base64(verify_key), - str(e), - ) - raise SynapseError( - 401, - "Invalid signature for server %s with key %s:%s: %s" - % (server_name, verify_key.alg, verify_key.version, str(e)), - Codes.UNAUTHORIZED, - ) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index fdeaa0f37c65..5756fcb551ab 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -152,7 +152,9 @@ async def authenticate_request(self, request, content): ) await self.keyring.verify_json_for_server( - origin, json_request, now, "Incoming request" + origin, + json_request, + now, ) logger.debug("Request from %s", origin) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index d2fc8be5f571..ff8372c4e9ad 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -108,7 +108,9 @@ async def verify_attestation( assert server_name is not None await self.keyring.verify_json_for_server( - server_name, attestation, now, "Group attestation" + server_name, + attestation, + now, ) def create_attestation(self, group_id: str, user_id: str) -> JsonDict: diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index aba1734a55ac..d56a1ae48279 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -22,6 +22,7 @@ from synapse.http.server import DirectServeJsonResource, respond_with_json from synapse.http.servlet import parse_integer, parse_json_object_from_request from synapse.util import json_decoder +from synapse.util.async_helpers import yieldable_gather_results logger = logging.getLogger(__name__) @@ -210,7 +211,13 @@ async def query_keys(self, request, query, query_remote_on_cache_miss=False): # If there is a cache miss, request the missing keys, then recurse (and # ensure the result is sent). if cache_misses and query_remote_on_cache_miss: - await self.fetcher.get_keys(cache_misses) + await yieldable_gather_results( + lambda t: self.fetcher.get_keys(*t), + ( + (server_name, list(keys), 0) + for server_name, keys in cache_misses.items() + ), + ) await self.query_keys(request, query, query_remote_on_cache_miss=False) else: signed_keys = [] diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 2775dfd8807d..745c295d3ba1 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import time +from typing import Dict, List from unittest.mock import Mock import attr @@ -21,7 +22,6 @@ from nacl.signing import SigningKey from signedjson.key import encode_verify_key_base64, get_verify_key -from twisted.internet import defer from twisted.internet.defer import Deferred, ensureDeferred from synapse.api.errors import SynapseError @@ -92,23 +92,23 @@ def test_verify_json_objects_for_server_awaits_previous_requests(self): # deferred completes. first_lookup_deferred = Deferred() - async def first_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request.id, "context_11") - self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}}) + async def first_lookup_fetch( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + # self.assertEquals(current_context().request.id, "context_11") + self.assertEqual(server_name, "server10") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 0) await make_deferred_yieldable(first_lookup_deferred) - return { - "server10": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) - } - } + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)} mock_fetcher.get_keys.side_effect = first_lookup_fetch async def first_lookup(): with LoggingContext("context_11", request=FakeRequest("context_11")): res_deferreds = kr.verify_json_objects_for_server( - [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] + [("server10", json1, 0), ("server11", {}, 0)] ) # the unsigned json should be rejected pretty quickly @@ -126,18 +126,18 @@ async def first_lookup(): d0 = ensureDeferred(first_lookup()) + self.pump() + mock_fetcher.get_keys.assert_called_once() # a second request for a server with outstanding requests # should block rather than start a second call - async def second_lookup_fetch(keys_to_fetch): - self.assertEquals(current_context().request.id, "context_12") - return { - "server10": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) - } - } + async def second_lookup_fetch( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + # self.assertEquals(current_context().request.id, "context_12") + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100)} mock_fetcher.get_keys.reset_mock() mock_fetcher.get_keys.side_effect = second_lookup_fetch @@ -146,7 +146,13 @@ async def second_lookup_fetch(keys_to_fetch): async def second_lookup(): with LoggingContext("context_12", request=FakeRequest("context_12")): res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1, 0, "test")] + [ + ( + "server10", + json1, + 0, + ) + ] ) res_deferreds_2[0].addBoth(self.check_context, None) second_lookup_state[0] = 1 @@ -183,11 +189,11 @@ def test_verify_json_for_server(self): signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") + d = kr.verify_json_for_server("server9", {}, 0) self.get_failure(d, SynapseError) # should succeed on a signed object - d = _verify_json_for_server(kr, "server9", json1, 500, "test signed") + d = kr.verify_json_for_server("server9", json1, 500) # self.assertFalse(d.called) self.get_success(d) @@ -214,24 +220,24 @@ def test_verify_json_for_server_with_null_valid_until_ms(self): signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") + d = kr.verify_json_for_server("server9", {}, 0) self.get_failure(d, SynapseError) # should fail on a signed object with a non-zero minimum_valid_until_ms, # as it tries to refetch the keys and fails. - d = _verify_json_for_server( - kr, "server9", json1, 500, "test signed non-zero min" - ) + d = kr.verify_json_for_server("server9", json1, 500) self.get_failure(d, SynapseError) # We expect the keyring tried to refetch the key once. mock_fetcher.get_keys.assert_called_once_with( - {"server9": {get_key_id(key1): 500}} + "server9", [get_key_id(key1)], 500 ) # should succeed on a signed object with a 0 minimum_valid_until_ms - d = _verify_json_for_server( - kr, "server9", json1, 0, "test signed with zero min" + d = kr.verify_json_for_server( + "server9", + json1, + 0, ) self.get_success(d) @@ -239,15 +245,15 @@ def test_verify_json_dedupes_key_requests(self): """Two requests for the same key should be deduped.""" key1 = signedjson.key.generate_signing_key(1) - async def get_keys(keys_to_fetch): + async def get_keys( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: # there should only be one request object (with the max validity) - self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + self.assertEqual(server_name, "server1") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 1500) - return { - "server1": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) - } - } + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)} mock_fetcher = Mock() mock_fetcher.get_keys = Mock(side_effect=get_keys) @@ -259,7 +265,14 @@ async def get_keys(keys_to_fetch): # the first request should succeed; the second should fail because the key # has expired results = kr.verify_json_objects_for_server( - [("server1", json1, 500, "test1"), ("server1", json1, 1500, "test2")] + [ + ( + "server1", + json1, + 500, + ), + ("server1", json1, 1500), + ] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -274,19 +287,21 @@ def test_verify_json_falls_back_to_other_fetchers(self): """If the first fetcher cannot provide a recent enough key, we fall back""" key1 = signedjson.key.generate_signing_key(1) - async def get_keys1(keys_to_fetch): - self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) - return { - "server1": {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800)} - } - - async def get_keys2(keys_to_fetch): - self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) - return { - "server1": { - get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) - } - } + async def get_keys1( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + self.assertEqual(server_name, "server1") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 1500) + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800)} + + async def get_keys2( + server_name: str, key_ids: List[str], minimum_valid_until_ts: int + ) -> Dict[str, FetchKeyResult]: + self.assertEqual(server_name, "server1") + self.assertEqual(key_ids, [get_key_id(key1)]) + self.assertEqual(minimum_valid_until_ts, 1500) + return {get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200)} mock_fetcher1 = Mock() mock_fetcher1.get_keys = Mock(side_effect=get_keys1) @@ -298,7 +313,18 @@ async def get_keys2(keys_to_fetch): signedjson.sign.sign_json(json1, "server1", key1) results = kr.verify_json_objects_for_server( - [("server1", json1, 1200, "test1"), ("server1", json1, 1500, "test2")] + [ + ( + "server1", + json1, + 1200, + ), + ( + "server1", + json1, + 1500, + ), + ] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -349,9 +375,8 @@ async def get_json(destination, path, **kwargs): self.http_client.get_json.side_effect = get_json - keys_to_fetch = {SERVER_NAME: {"key1": 0}} - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) - k = keys[SERVER_NAME][testverifykey_id] + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) + k = keys[testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) self.assertEqual(k.verify_key.alg, "ed25519") @@ -378,7 +403,7 @@ async def get_json(destination, path, **kwargs): # change the server name: the result should be ignored response["server_name"] = "OTHER_SERVER" - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) self.assertEqual(keys, {}) @@ -465,10 +490,9 @@ def test_get_keys_from_perspectives(self): self.expect_outgoing_key_query(SERVER_NAME, "key1", response) - keys_to_fetch = {SERVER_NAME: {"key1": 0}} - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) - self.assertIn(SERVER_NAME, keys) - k = keys[SERVER_NAME][testverifykey_id] + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) + self.assertIn(testverifykey_id, keys) + k = keys[testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) self.assertEqual(k.verify_key.alg, "ed25519") @@ -515,10 +539,9 @@ def test_get_perspectives_own_key(self): self.expect_outgoing_key_query(SERVER_NAME, "key1", response) - keys_to_fetch = {SERVER_NAME: {"key1": 0}} - keys = self.get_success(fetcher.get_keys(keys_to_fetch)) - self.assertIn(SERVER_NAME, keys) - k = keys[SERVER_NAME][testverifykey_id] + keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) + self.assertIn(testverifykey_id, keys) + k = keys[testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) self.assertEqual(k.verify_key.alg, "ed25519") @@ -559,14 +582,13 @@ def build_response(): def get_key_from_perspectives(response): fetcher = PerspectivesKeyFetcher(self.hs) - keys_to_fetch = {SERVER_NAME: {"key1": 0}} self.expect_outgoing_key_query(SERVER_NAME, "key1", response) - return self.get_success(fetcher.get_keys(keys_to_fetch)) + return self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) # start with a valid response so we can check we are testing the right thing response = build_response() keys = get_key_from_perspectives(response) - k = keys[SERVER_NAME][testverifykey_id] + k = keys[testverifykey_id] self.assertEqual(k.verify_key, testverifykey) # remove the perspectives server's signature @@ -585,23 +607,3 @@ def get_key_from_perspectives(response): def get_key_id(key): """Get the matrix ID tag for a given SigningKey or VerifyKey""" return "%s:%s" % (key.alg, key.version) - - -@defer.inlineCallbacks -def run_in_context(f, *args, **kwargs): - with LoggingContext("testctx"): - rv = yield f(*args, **kwargs) - return rv - - -def _verify_json_for_server(kr, *args): - """thin wrapper around verify_json_for_server which makes sure it is wrapped - with the patched defer.inlineCallbacks. - """ - - @defer.inlineCallbacks - def v(): - rv1 = yield kr.verify_json_for_server(*args) - return rv1 - - return run_in_context(v) diff --git a/tests/rest/key/v2/test_remote_key_resource.py b/tests/rest/key/v2/test_remote_key_resource.py index 3b275bc23b47..a75c0ea3f04a 100644 --- a/tests/rest/key/v2/test_remote_key_resource.py +++ b/tests/rest/key/v2/test_remote_key_resource.py @@ -208,10 +208,10 @@ def test_get_key(self): keyid = "ed25519:%s" % (testkey.version,) fetcher = PerspectivesKeyFetcher(self.hs2) - d = fetcher.get_keys({"targetserver": {keyid: 1000}}) + d = fetcher.get_keys("targetserver", [keyid], 1000) res = self.get_success(d) - self.assertIn("targetserver", res) - keyres = res["targetserver"][keyid] + self.assertIn(keyid, res) + keyres = res[keyid] assert isinstance(keyres, FetchKeyResult) self.assertEqual( signedjson.key.encode_verify_key_base64(keyres.verify_key), @@ -230,10 +230,10 @@ def test_get_notary_key(self): keyid = "ed25519:%s" % (testkey.version,) fetcher = PerspectivesKeyFetcher(self.hs2) - d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}}) + d = fetcher.get_keys(self.hs.hostname, [keyid], 1000) res = self.get_success(d) - self.assertIn(self.hs.hostname, res) - keyres = res[self.hs.hostname][keyid] + self.assertIn(keyid, res) + keyres = res[keyid] assert isinstance(keyres, FetchKeyResult) self.assertEqual( signedjson.key.encode_verify_key_base64(keyres.verify_key), @@ -247,10 +247,10 @@ def test_get_notary_keyserver_key(self): keyid = "ed25519:%s" % (self.hs_signing_key.version,) fetcher = PerspectivesKeyFetcher(self.hs2) - d = fetcher.get_keys({self.hs.hostname: {keyid: 1000}}) + d = fetcher.get_keys(self.hs.hostname, [keyid], 1000) res = self.get_success(d) - self.assertIn(self.hs.hostname, res) - keyres = res[self.hs.hostname][keyid] + self.assertIn(keyid, res) + keyres = res[keyid] assert isinstance(keyres, FetchKeyResult) self.assertEqual( signedjson.key.encode_verify_key_base64(keyres.verify_key), diff --git a/tests/util/test_batching_queue.py b/tests/util/test_batching_queue.py index edf29e5b96ae..07be57d72c6d 100644 --- a/tests/util/test_batching_queue.py +++ b/tests/util/test_batching_queue.py @@ -45,37 +45,32 @@ async def _process_queue(self, values): self._pending_calls.append((values, d)) return await make_deferred_yieldable(d) + def _get_sample_with_name(self, metric, name) -> int: + """For a prometheus metric get the value of the sample that has a + matching "name" label. + """ + for sample in metric.collect()[0].samples: + if sample.labels.get("name") == name: + return sample.value + + self.fail("Found no matching sample") + def _assert_metrics(self, queued, keys, in_flight): """Assert that the metrics are correct""" - self.assertEqual(len(number_queued.collect()), 1) - self.assertEqual(len(number_queued.collect()[0].samples), 1) + sample = self._get_sample_with_name(number_queued, self.queue._name) self.assertEqual( - number_queued.collect()[0].samples[0].labels, - {"name": self.queue._name}, - ) - self.assertEqual( - number_queued.collect()[0].samples[0].value, + sample, queued, "number_queued", ) - self.assertEqual(len(number_of_keys.collect()), 1) - self.assertEqual(len(number_of_keys.collect()[0].samples), 1) - self.assertEqual( - number_queued.collect()[0].samples[0].labels, {"name": self.queue._name} - ) - self.assertEqual( - number_of_keys.collect()[0].samples[0].value, keys, "number_of_keys" - ) + sample = self._get_sample_with_name(number_of_keys, self.queue._name) + self.assertEqual(sample, keys, "number_of_keys") - self.assertEqual(len(number_in_flight.collect()), 1) - self.assertEqual(len(number_in_flight.collect()[0].samples), 1) - self.assertEqual( - number_queued.collect()[0].samples[0].labels, {"name": self.queue._name} - ) + sample = self._get_sample_with_name(number_in_flight, self.queue._name) self.assertEqual( - number_in_flight.collect()[0].samples[0].value, + sample, in_flight, "number_in_flight", ) From bf6fd9f4fdf60aab29d5bfac2dfbf7ec3cd7e459 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 2 Jun 2021 17:10:37 +0100 Subject: [PATCH 213/222] github actions: summarize Sytest results in an easy-to-read format (#10094) ... using the script from matrix-org/sytest#1052 --- .github/workflows/tests.yml | 4 ++-- changelog.d/10094.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/10094.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 2ae81b5fcf49..955beb4aa08e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -232,9 +232,9 @@ jobs: - name: Run SyTest run: /bootstrap.sh synapse working-directory: /src - - name: Dump results.tap + - name: Summarise results.tap if: ${{ always() }} - run: cat /logs/results.tap + run: /sytest/scripts/tap_to_gha.pl /logs/results.tap - name: Upload SyTest logs uses: actions/upload-artifact@v2 if: ${{ always() }} diff --git a/changelog.d/10094.misc b/changelog.d/10094.misc new file mode 100644 index 000000000000..01efe14f7488 --- /dev/null +++ b/changelog.d/10094.misc @@ -0,0 +1 @@ +In Github Actions workflows, summarize the Sytest results in an easy-to-read format. From 0284d2a2976e3d58e9970fdb7590f98a2556326d Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 2 Jun 2021 19:50:35 +0200 Subject: [PATCH 214/222] Add new admin APIs to remove media by media ID from quarantine. (#10044) Related to: #6681, #5956, #10040 Signed-off-by: Dirk Klimpel dirk@klimpel.org --- changelog.d/10044.feature | 1 + docs/admin_api/media_admin_api.md | 22 +++++ synapse/rest/admin/media.py | 30 ++++++ synapse/storage/databases/main/room.py | 30 ++++-- tests/rest/admin/test_media.py | 128 +++++++++++++++++++++++++ 5 files changed, 201 insertions(+), 10 deletions(-) create mode 100644 changelog.d/10044.feature diff --git a/changelog.d/10044.feature b/changelog.d/10044.feature new file mode 100644 index 000000000000..70c0a3851e84 --- /dev/null +++ b/changelog.d/10044.feature @@ -0,0 +1 @@ +Add new admin APIs to remove media by media ID from quarantine. Contributed by @dkimpel. diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index d1b7e390d5a9..7709f3d8c740 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -4,6 +4,7 @@ * [List all media uploaded by a user](#list-all-media-uploaded-by-a-user) - [Quarantine media](#quarantine-media) * [Quarantining media by ID](#quarantining-media-by-id) + * [Remove media from quarantine by ID](#remove-media-from-quarantine-by-id) * [Quarantining media in a room](#quarantining-media-in-a-room) * [Quarantining all media of a user](#quarantining-all-media-of-a-user) * [Protecting media from being quarantined](#protecting-media-from-being-quarantined) @@ -77,6 +78,27 @@ Response: {} ``` +## Remove media from quarantine by ID + +This API removes a single piece of local or remote media from quarantine. + +Request: + +``` +POST /_synapse/admin/v1/media/unquarantine// + +{} +``` + +Where `server_name` is in the form of `example.org`, and `media_id` is in the +form of `abcdefg12345...`. + +Response: + +```json +{} +``` + ## Quarantining media in a room This API quarantines all local and remote media in a room. diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 2c71af4279bf..b68db2c57c16 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -120,6 +120,35 @@ async def on_POST( return 200, {} +class UnquarantineMediaByID(RestServlet): + """Quarantines local or remote media by a given ID so that no one can download + it via this server. + """ + + PATTERNS = admin_patterns( + "/media/unquarantine/(?P[^/]+)/(?P[^/]+)" + ) + + def __init__(self, hs: "HomeServer"): + self.store = hs.get_datastore() + self.auth = hs.get_auth() + + async def on_POST( + self, request: SynapseRequest, server_name: str, media_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + logging.info( + "Remove from quarantine local media by ID: %s/%s", server_name, media_id + ) + + # Remove from quarantine this media id + await self.store.quarantine_media_by_id(server_name, media_id, None) + + return 200, {} + + class ProtectMediaByID(RestServlet): """Protect local media from being quarantined.""" @@ -290,6 +319,7 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server): PurgeMediaCacheRestServlet(hs).register(http_server) QuarantineMediaInRoom(hs).register(http_server) QuarantineMediaByID(hs).register(http_server) + UnquarantineMediaByID(hs).register(http_server) QuarantineMediaByUser(hs).register(http_server) ProtectMediaByID(hs).register(http_server) UnprotectMediaByID(hs).register(http_server) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 0cf450f81d73..2a96bcd3141b 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -764,14 +764,15 @@ async def quarantine_media_by_id( self, server_name: str, media_id: str, - quarantined_by: str, + quarantined_by: Optional[str], ) -> int: - """quarantines a single local or remote media id + """quarantines or unquarantines a single local or remote media id Args: server_name: The name of the server that holds this media media_id: The ID of the media to be quarantined quarantined_by: The user ID that initiated the quarantine request + If it is `None` media will be removed from quarantine """ logger.info("Quarantining media: %s/%s", server_name, media_id) is_local = server_name == self.config.server_name @@ -838,9 +839,9 @@ def _quarantine_media_txn( txn, local_mxcs: List[str], remote_mxcs: List[Tuple[str, str]], - quarantined_by: str, + quarantined_by: Optional[str], ) -> int: - """Quarantine local and remote media items + """Quarantine and unquarantine local and remote media items Args: txn (cursor) @@ -848,18 +849,27 @@ def _quarantine_media_txn( remote_mxcs: A list of (remote server, media id) tuples representing remote mxc URLs quarantined_by: The ID of the user who initiated the quarantine request + If it is `None` media will be removed from quarantine Returns: The total number of media items quarantined """ + # Update all the tables to set the quarantined_by flag - txn.executemany( - """ + sql = """ UPDATE local_media_repository SET quarantined_by = ? - WHERE media_id = ? AND safe_from_quarantine = ? - """, - ((quarantined_by, media_id, False) for media_id in local_mxcs), - ) + WHERE media_id = ? + """ + + # set quarantine + if quarantined_by is not None: + sql += "AND safe_from_quarantine = ?" + rows = [(quarantined_by, media_id, False) for media_id in local_mxcs] + # remove from quarantine + else: + rows = [(quarantined_by, media_id) for media_id in local_mxcs] + + txn.executemany(sql, rows) # Note that a rowcount of -1 can be used to indicate no rows were affected. total_media_quarantined = txn.rowcount if txn.rowcount > 0 else 0 diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index f741121ea236..6fee0f95b6cb 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -566,6 +566,134 @@ def _access_media(self, server_and_media_id, expect_success=True): self.assertFalse(os.path.exists(local_path)) +class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_media_repo, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + media_repo = hs.get_media_repository_resource() + self.store = hs.get_datastore() + self.server_name = hs.hostname + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + # Create media + upload_resource = media_repo.children[b"upload"] + # file size is 67 Byte + image_data = unhexlify( + b"89504e470d0a1a0a0000000d4948445200000001000000010806" + b"0000001f15c4890000000a49444154789c63000100000500010d" + b"0a2db40000000049454e44ae426082" + ) + + # Upload some media into the room + response = self.helper.upload_media( + upload_resource, image_data, tok=self.admin_user_tok, expect_code=200 + ) + # Extract media ID from the response + server_and_media_id = response["content_uri"][6:] # Cut off 'mxc://' + self.media_id = server_and_media_id.split("/")[1] + + self.url = "/_synapse/admin/v1/media/%s/%s/%s" + + @parameterized.expand(["quarantine", "unquarantine"]) + def test_no_auth(self, action: str): + """ + Try to protect media without authentication. + """ + + channel = self.make_request( + "POST", + self.url % (action, self.server_name, self.media_id), + b"{}", + ) + + self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + @parameterized.expand(["quarantine", "unquarantine"]) + def test_requester_is_no_admin(self, action: str): + """ + If the user is not a server admin, an error is returned. + """ + self.other_user = self.register_user("user", "pass") + self.other_user_token = self.login("user", "pass") + + channel = self.make_request( + "POST", + self.url % (action, self.server_name, self.media_id), + access_token=self.other_user_token, + ) + + self.assertEqual(403, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_quarantine_media(self): + """ + Tests that quarantining and remove from quarantine a media is successfully + """ + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["quarantined_by"]) + + # quarantining + channel = self.make_request( + "POST", + self.url % ("quarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertTrue(media_info["quarantined_by"]) + + # remove from quarantine + channel = self.make_request( + "POST", + self.url % ("unquarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["quarantined_by"]) + + def test_quarantine_protected_media(self): + """ + Tests that quarantining from protected media fails + """ + + # protect + self.get_success(self.store.mark_local_media_as_safe(self.media_id, safe=True)) + + # verify protection + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertTrue(media_info["safe_from_quarantine"]) + + # quarantining + channel = self.make_request( + "POST", + self.url % ("quarantine", self.server_name, self.media_id), + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertFalse(channel.json_body) + + # verify that is not in quarantine + media_info = self.get_success(self.store.get_local_media(self.media_id)) + self.assertFalse(media_info["quarantined_by"]) + + class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): servlets = [ From 36a7ff0c867e6df969517c58d3eb2520b2ab39d9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 2 Jun 2021 11:31:41 -0400 Subject: [PATCH 215/222] Do not show invite-only rooms in spaces summary (unless joined/invited). (#10109) --- changelog.d/10109.bugfix | 1 + synapse/handlers/space_summary.py | 19 +++++++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 changelog.d/10109.bugfix diff --git a/changelog.d/10109.bugfix b/changelog.d/10109.bugfix new file mode 100644 index 000000000000..bc41bf9e5e3c --- /dev/null +++ b/changelog.d/10109.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to users in a space who were not invited. diff --git a/synapse/handlers/space_summary.py b/synapse/handlers/space_summary.py index abd9ddecca20..046dba6fd87a 100644 --- a/synapse/handlers/space_summary.py +++ b/synapse/handlers/space_summary.py @@ -26,7 +26,6 @@ HistoryVisibility, Membership, ) -from synapse.api.errors import AuthError from synapse.events import EventBase from synapse.events.utils import format_event_for_client_v2 from synapse.types import JsonDict @@ -456,16 +455,16 @@ async def _is_room_accessible( return True # Otherwise, check if they should be allowed access via membership in a space. - try: - await self._event_auth_handler.check_restricted_join_rules( - state_ids, room_version, requester, member_event + if self._event_auth_handler.has_restricted_join_rules( + state_ids, room_version + ): + allowed_spaces = ( + await self._event_auth_handler.get_spaces_that_allow_join(state_ids) ) - except AuthError: - # The user doesn't have access due to spaces, but might have access - # another way. Keep trying. - pass - else: - return True + if await self._event_auth_handler.is_user_in_rooms( + allowed_spaces, requester + ): + return True # If this is a request over federation, check if the host is in the room or # is in one of the spaces specified via the join rules. From 57c01dca297b7e14eb7be2b40d80f7577002754f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 3 Jun 2021 08:18:22 -0400 Subject: [PATCH 216/222] 1.35.1 --- CHANGES.md | 9 +++++++++ changelog.d/10109.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/10109.bugfix diff --git a/CHANGES.md b/CHANGES.md index f03a53affc1a..5794f2bffd30 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.35.1 (2021-06-03) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to users in a space who were not invited. ([\#10109](https://github.com/matrix-org/synapse/issues/10109)) + + Synapse 1.35.0 (2021-06-01) =========================== diff --git a/changelog.d/10109.bugfix b/changelog.d/10109.bugfix deleted file mode 100644 index bc41bf9e5e3c..000000000000 --- a/changelog.d/10109.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to users in a space who were not invited. diff --git a/debian/changelog b/debian/changelog index d5efb8ccba63..084e878def4f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.35.1) stable; urgency=medium + + * New synapse release 1.35.1. + + -- Synapse Packaging team Thu, 03 Jun 2021 08:11:29 -0400 + matrix-synapse-py3 (1.35.0) stable; urgency=medium * New synapse release 1.35.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index d9843a170860..445e8a5cad37 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -47,7 +47,7 @@ except ImportError: pass -__version__ = "1.35.0" +__version__ = "1.35.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 56667733419ebf070f1a7f7c9a04070f1b944572 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 3 Jun 2021 08:19:38 -0400 Subject: [PATCH 217/222] Clarify changelog. --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 5794f2bffd30..04d260f8e5f7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -4,7 +4,7 @@ Synapse 1.35.1 (2021-06-03) Bugfixes -------- -- Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to users in a space who were not invited. ([\#10109](https://github.com/matrix-org/synapse/issues/10109)) +- Fix a bug introduced in v1.35.0 where invite-only rooms would be shown to all users in a space, regardless of if the user had access to it. ([\#10109](https://github.com/matrix-org/synapse/issues/10109)) Synapse 1.35.0 (2021-06-01) From 5325f0308c5937d2e4447d2c64c8819b3c148d9c Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 3 Jun 2021 06:50:49 -0600 Subject: [PATCH 218/222] r0.6.1 support: /rooms/:roomId/aliases endpoint (#9224) [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432) added this endpoint originally but it has since been included in the spec for nearly a year. This is progress towards https://github.com/matrix-org/synapse/issues/8334 --- changelog.d/9224.feature | 1 + synapse/rest/client/v1/room.py | 2 +- tests/rest/client/v1/test_rooms.py | 3 +-- 3 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 changelog.d/9224.feature diff --git a/changelog.d/9224.feature b/changelog.d/9224.feature new file mode 100644 index 000000000000..76519c23e2dc --- /dev/null +++ b/changelog.d/9224.feature @@ -0,0 +1 @@ +Add new endpoint `/_matrix/client/r0/rooms/{roomId}/aliases` from Client-Server API r0.6.1 (previously [MSC2432](https://github.com/matrix-org/matrix-doc/pull/2432)). diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 70286b0ff7f2..5a9c27f75f36 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -910,7 +910,7 @@ class RoomAliasListServlet(RestServlet): r"^/_matrix/client/unstable/org\.matrix\.msc2432" r"/rooms/(?P[^/]*)/aliases" ), - ] + ] + list(client_patterns("/rooms/(?P[^/]*)/aliases$", unstable=False)) def __init__(self, hs: "HomeServer"): super().__init__() diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 7c4bdcdfdd45..5b1096d09112 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1880,8 +1880,7 @@ def _get_aliases(self, access_token: str, expected_code: int = 200) -> JsonDict: """Calls the endpoint under test. returns the json response object.""" channel = self.make_request( "GET", - "/_matrix/client/unstable/org.matrix.msc2432/rooms/%s/aliases" - % (self.room_id,), + "/_matrix/client/r0/rooms/%s/aliases" % (self.room_id,), access_token=access_token, ) self.assertEqual(channel.code, expected_code, channel.result) From 73636cab69c32746ef6b7708deeeb0c718b7b3b9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 3 Jun 2021 14:06:03 +0100 Subject: [PATCH 219/222] Convert admin api docs to markdown (#10089) So that they render nicely in mdbook (see #10086), and so that we no longer have a mix of structured text languages in our documentation (excluding files outside of `docs/`). --- changelog.d/10089.doc | 1 + docs/admin_api/account_validity.md | 42 + docs/admin_api/account_validity.rst | 42 - ...e_history_api.rst => purge_history_api.md} | 63 +- docs/admin_api/register_api.md | 73 ++ docs/admin_api/register_api.rst | 68 -- docs/admin_api/user_admin_api.md | 1001 +++++++++++++++++ docs/admin_api/user_admin_api.rst | 981 ---------------- .../{version_api.rst => version_api.md} | 21 +- 9 files changed, 1160 insertions(+), 1132 deletions(-) create mode 100644 changelog.d/10089.doc create mode 100644 docs/admin_api/account_validity.md delete mode 100644 docs/admin_api/account_validity.rst rename docs/admin_api/{purge_history_api.rst => purge_history_api.md} (56%) create mode 100644 docs/admin_api/register_api.md delete mode 100644 docs/admin_api/register_api.rst create mode 100644 docs/admin_api/user_admin_api.md delete mode 100644 docs/admin_api/user_admin_api.rst rename docs/admin_api/{version_api.rst => version_api.md} (59%) diff --git a/changelog.d/10089.doc b/changelog.d/10089.doc new file mode 100644 index 000000000000..d9e93773ab6a --- /dev/null +++ b/changelog.d/10089.doc @@ -0,0 +1 @@ +Convert the remaining Admin API documentation files to markdown. diff --git a/docs/admin_api/account_validity.md b/docs/admin_api/account_validity.md new file mode 100644 index 000000000000..b74b5d0c1a96 --- /dev/null +++ b/docs/admin_api/account_validity.md @@ -0,0 +1,42 @@ +# Account validity API + +This API allows a server administrator to manage the validity of an account. To +use it, you must enable the account validity feature (under +`account_validity`) in Synapse's configuration. + +## Renew account + +This API extends the validity of an account by as much time as configured in the +`period` parameter from the `account_validity` configuration. + +The API is: + +``` +POST /_synapse/admin/v1/account_validity/validity +``` + +with the following body: + +```json +{ + "user_id": "", + "expiration_ts": 0, + "enable_renewal_emails": true +} +``` + + +`expiration_ts` is an optional parameter and overrides the expiration date, +which otherwise defaults to now + validity period. + +`enable_renewal_emails` is also an optional parameter and enables/disables +sending renewal emails to the user. Defaults to true. + +The API returns with the new expiration date for this account, as a timestamp in +milliseconds since epoch: + +```json +{ + "expiration_ts": 0 +} +``` diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst deleted file mode 100644 index 7559de4c5716..000000000000 --- a/docs/admin_api/account_validity.rst +++ /dev/null @@ -1,42 +0,0 @@ -Account validity API -==================== - -This API allows a server administrator to manage the validity of an account. To -use it, you must enable the account validity feature (under -``account_validity``) in Synapse's configuration. - -Renew account -------------- - -This API extends the validity of an account by as much time as configured in the -``period`` parameter from the ``account_validity`` configuration. - -The API is:: - - POST /_synapse/admin/v1/account_validity/validity - -with the following body: - -.. code:: json - - { - "user_id": "", - "expiration_ts": 0, - "enable_renewal_emails": true - } - - -``expiration_ts`` is an optional parameter and overrides the expiration date, -which otherwise defaults to now + validity period. - -``enable_renewal_emails`` is also an optional parameter and enables/disables -sending renewal emails to the user. Defaults to true. - -The API returns with the new expiration date for this account, as a timestamp in -milliseconds since epoch: - -.. code:: json - - { - "expiration_ts": 0 - } diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.md similarity index 56% rename from docs/admin_api/purge_history_api.rst rename to docs/admin_api/purge_history_api.md index 92cd05f2a0b9..44971acd917c 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.md @@ -1,5 +1,4 @@ -Purge History API -================= +# Purge History API The purge history API allows server admins to purge historic events from their database, reclaiming disk space. @@ -13,10 +12,12 @@ delete the last message in a room. The API is: -``POST /_synapse/admin/v1/purge_history/[/]`` +``` +POST /_synapse/admin/v1/purge_history/[/] +``` -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) By default, events sent by local users are not deleted, as they may represent the only copies of this content in existence. (Events sent by remote users are @@ -24,54 +25,54 @@ deleted.) Room state data (such as joins, leaves, topic) is always preserved. -To delete local message events as well, set ``delete_local_events`` in the body: +To delete local message events as well, set `delete_local_events` in the body: -.. code:: json - - { - "delete_local_events": true - } +``` +{ + "delete_local_events": true +} +``` The caller must specify the point in the room to purge up to. This can be specified by including an event_id in the URI, or by setting a -``purge_up_to_event_id`` or ``purge_up_to_ts`` in the request body. If an event +`purge_up_to_event_id` or `purge_up_to_ts` in the request body. If an event id is given, that event (and others at the same graph depth) will be retained. -If ``purge_up_to_ts`` is given, it should be a timestamp since the unix epoch, +If `purge_up_to_ts` is given, it should be a timestamp since the unix epoch, in milliseconds. The API starts the purge running, and returns immediately with a JSON body with a purge id: -.. code:: json - - { - "purge_id": "" - } +```json +{ + "purge_id": "" +} +``` -Purge status query ------------------- +## Purge status query It is possible to poll for updates on recent purges with a second API; -``GET /_synapse/admin/v1/purge_history_status/`` +``` +GET /_synapse/admin/v1/purge_history_status/ +``` -Again, you will need to authenticate by providing an ``access_token`` for a +Again, you will need to authenticate by providing an `access_token` for a server admin. This API returns a JSON body like the following: -.. code:: json - - { - "status": "active" - } +```json +{ + "status": "active" +} +``` -The status will be one of ``active``, ``complete``, or ``failed``. +The status will be one of `active`, `complete`, or `failed`. -Reclaim disk space (Postgres) ------------------------------ +## Reclaim disk space (Postgres) To reclaim the disk space and return it to the operating system, you need to run `VACUUM FULL;` on the database. -https://www.postgresql.org/docs/current/sql-vacuum.html + diff --git a/docs/admin_api/register_api.md b/docs/admin_api/register_api.md new file mode 100644 index 000000000000..c346090bb175 --- /dev/null +++ b/docs/admin_api/register_api.md @@ -0,0 +1,73 @@ +# Shared-Secret Registration + +This API allows for the creation of users in an administrative and +non-interactive way. This is generally used for bootstrapping a Synapse +instance with administrator accounts. + +To authenticate yourself to the server, you will need both the shared secret +(`registration_shared_secret` in the homeserver configuration), and a +one-time nonce. If the registration shared secret is not configured, this API +is not enabled. + +To fetch the nonce, you need to request one from the API: + +``` +> GET /_synapse/admin/v1/register + +< {"nonce": "thisisanonce"} +``` + +Once you have the nonce, you can make a `POST` to the same URL with a JSON +body containing the nonce, username, password, whether they are an admin +(optional, False by default), and a HMAC digest of the content. Also you can +set the displayname (optional, `username` by default). + +As an example: + +``` +> POST /_synapse/admin/v1/register +> { + "nonce": "thisisanonce", + "username": "pepper_roni", + "displayname": "Pepper Roni", + "password": "pizza", + "admin": true, + "mac": "mac_digest_here" + } + +< { + "access_token": "token_here", + "user_id": "@pepper_roni:localhost", + "home_server": "test", + "device_id": "device_id_here" + } +``` + +The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being +the shared secret and the content being the nonce, user, password, either the +string "admin" or "notadmin", and optionally the user_type +each separated by NULs. For an example of generation in Python: + +```python +import hmac, hashlib + +def generate_mac(nonce, user, password, admin=False, user_type=None): + + mac = hmac.new( + key=shared_secret, + digestmod=hashlib.sha1, + ) + + mac.update(nonce.encode('utf8')) + mac.update(b"\x00") + mac.update(user.encode('utf8')) + mac.update(b"\x00") + mac.update(password.encode('utf8')) + mac.update(b"\x00") + mac.update(b"admin" if admin else b"notadmin") + if user_type: + mac.update(b"\x00") + mac.update(user_type.encode('utf8')) + + return mac.hexdigest() +``` \ No newline at end of file diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst deleted file mode 100644 index c3057b204b13..000000000000 --- a/docs/admin_api/register_api.rst +++ /dev/null @@ -1,68 +0,0 @@ -Shared-Secret Registration -========================== - -This API allows for the creation of users in an administrative and -non-interactive way. This is generally used for bootstrapping a Synapse -instance with administrator accounts. - -To authenticate yourself to the server, you will need both the shared secret -(``registration_shared_secret`` in the homeserver configuration), and a -one-time nonce. If the registration shared secret is not configured, this API -is not enabled. - -To fetch the nonce, you need to request one from the API:: - - > GET /_synapse/admin/v1/register - - < {"nonce": "thisisanonce"} - -Once you have the nonce, you can make a ``POST`` to the same URL with a JSON -body containing the nonce, username, password, whether they are an admin -(optional, False by default), and a HMAC digest of the content. Also you can -set the displayname (optional, ``username`` by default). - -As an example:: - - > POST /_synapse/admin/v1/register - > { - "nonce": "thisisanonce", - "username": "pepper_roni", - "displayname": "Pepper Roni", - "password": "pizza", - "admin": true, - "mac": "mac_digest_here" - } - - < { - "access_token": "token_here", - "user_id": "@pepper_roni:localhost", - "home_server": "test", - "device_id": "device_id_here" - } - -The MAC is the hex digest output of the HMAC-SHA1 algorithm, with the key being -the shared secret and the content being the nonce, user, password, either the -string "admin" or "notadmin", and optionally the user_type -each separated by NULs. For an example of generation in Python:: - - import hmac, hashlib - - def generate_mac(nonce, user, password, admin=False, user_type=None): - - mac = hmac.new( - key=shared_secret, - digestmod=hashlib.sha1, - ) - - mac.update(nonce.encode('utf8')) - mac.update(b"\x00") - mac.update(user.encode('utf8')) - mac.update(b"\x00") - mac.update(password.encode('utf8')) - mac.update(b"\x00") - mac.update(b"admin" if admin else b"notadmin") - if user_type: - mac.update(b"\x00") - mac.update(user_type.encode('utf8')) - - return mac.hexdigest() diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md new file mode 100644 index 000000000000..0c843316c93a --- /dev/null +++ b/docs/admin_api/user_admin_api.md @@ -0,0 +1,1001 @@ +# User Admin API + +## Query User Account + +This API returns information about a specific user account. + +The api is: + +``` +GET /_synapse/admin/v2/users/ +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +It returns a JSON body like the following: + +```json +{ + "displayname": "User", + "threepids": [ + { + "medium": "email", + "address": "" + }, + { + "medium": "email", + "address": "" + } + ], + "avatar_url": "", + "admin": 0, + "deactivated": 0, + "shadow_banned": 0, + "password_hash": "$2b$12$p9B4GkqYdRTPGD", + "creation_ts": 1560432506, + "appservice_id": null, + "consent_server_notice_sent": null, + "consent_version": null +} +``` + +URL parameters: + +- `user_id`: fully-qualified user id: for example, `@user:server.com`. + +## Create or modify Account + +This API allows an administrator to create or modify a user account with a +specific `user_id`. + +This api is: + +``` +PUT /_synapse/admin/v2/users/ +``` + +with a body of: + +```json +{ + "password": "user_password", + "displayname": "User", + "threepids": [ + { + "medium": "email", + "address": "" + }, + { + "medium": "email", + "address": "" + } + ], + "avatar_url": "", + "admin": false, + "deactivated": false +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +URL parameters: + +- `user_id`: fully-qualified user id: for example, `@user:server.com`. + +Body parameters: + +- `password`, optional. If provided, the user's password is updated and all + devices are logged out. + +- `displayname`, optional, defaults to the value of `user_id`. + +- `threepids`, optional, allows setting the third-party IDs (email, msisdn) + belonging to a user. + +- `avatar_url`, optional, must be a + [MXC URI](https://matrix.org/docs/spec/client_server/r0.6.0#matrix-content-mxc-uris). + +- `admin`, optional, defaults to `false`. + +- `deactivated`, optional. If unspecified, deactivation state will be left + unchanged on existing accounts and set to `false` for new accounts. + A user cannot be erased by deactivating with this API. For details on + deactivating users see [Deactivate Account](#deactivate-account). + +If the user already exists then optional parameters default to the current value. + +In order to re-activate an account `deactivated` must be set to `false`. If +users do not login via single-sign-on, a new `password` must be provided. + +## List Accounts + +This API returns all local user accounts. +By default, the response is ordered by ascending user ID. + +``` +GET /_synapse/admin/v2/users?from=0&limit=10&guests=false +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "users": [ + { + "name": "", + "is_guest": 0, + "admin": 0, + "user_type": null, + "deactivated": 0, + "shadow_banned": 0, + "displayname": "", + "avatar_url": null + }, { + "name": "", + "is_guest": 0, + "admin": 1, + "user_type": null, + "deactivated": 0, + "shadow_banned": 0, + "displayname": "", + "avatar_url": "" + } + ], + "next_token": "100", + "total": 200 +} +``` + +To paginate, check for `next_token` and if present, call the endpoint again +with `from` set to the value of `next_token`. This will return a new page. + +If the endpoint does not return a `next_token` then there are no more users +to paginate through. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - Is optional and filters to only return users with user IDs + that contain this value. This parameter is ignored when using the `name` parameter. +- `name` - Is optional and filters to only return users with user ID localparts + **or** displaynames that contain this value. +- `guests` - string representing a bool - Is optional and if `false` will **exclude** guest users. + Defaults to `true` to include guest users. +- `deactivated` - string representing a bool - Is optional and if `true` will **include** deactivated users. + Defaults to `false` to exclude deactivated users. +- `limit` - string representing a positive integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to `100`. +- `from` - string representing a positive integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of `next_token` from a previous call. + Defaults to `0`. +- `order_by` - The method by which to sort the returned list of users. + If the ordered field has duplicates, the second order is always by ascending `name`, + which guarantees a stable ordering. Valid values are: + + - `name` - Users are ordered alphabetically by `name`. This is the default. + - `is_guest` - Users are ordered by `is_guest` status. + - `admin` - Users are ordered by `admin` status. + - `user_type` - Users are ordered alphabetically by `user_type`. + - `deactivated` - Users are ordered by `deactivated` status. + - `shadow_banned` - Users are ordered by `shadow_banned` status. + - `displayname` - Users are ordered alphabetically by `displayname`. + - `avatar_url` - Users are ordered alphabetically by avatar URL. + +- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. + Setting this value to `b` will reverse the above sort order. Defaults to `f`. + +Caution. The database only has indexes on the columns `name` and `created_ts`. +This means that if a different sort order is used (`is_guest`, `admin`, +`user_type`, `deactivated`, `shadow_banned`, `avatar_url` or `displayname`), +this can cause a large load on the database, especially for large environments. + +**Response** + +The following fields are returned in the JSON response body: + +- `users` - An array of objects, each containing information about an user. + User objects contain the following fields: + + - `name` - string - Fully-qualified user ID (ex. `@user:server.com`). + - `is_guest` - bool - Status if that user is a guest account. + - `admin` - bool - Status if that user is a server administrator. + - `user_type` - string - Type of the user. Normal users are type `None`. + This allows user type specific behaviour. There are also types `support` and `bot`. + - `deactivated` - bool - Status if that user has been marked as deactivated. + - `shadow_banned` - bool - Status if that user has been marked as shadow banned. + - `displayname` - string - The user's display name if they have set one. + - `avatar_url` - string - The user's avatar URL if they have set one. + +- `next_token`: string representing a positive integer - Indication for pagination. See above. +- `total` - integer - Total number of media. + + +## Query current sessions for a user + +This API returns information about the active sessions for a specific user. + +The endpoints are: + +``` +GET /_synapse/admin/v1/whois/ +``` + +and: + +``` +GET /_matrix/client/r0/admin/whois/ +``` + +See also: [Client Server +API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid). + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +It returns a JSON body like the following: + +```json +{ + "user_id": "", + "devices": { + "": { + "sessions": [ + { + "connections": [ + { + "ip": "1.2.3.4", + "last_seen": 1417222374433, + "user_agent": "Mozilla/5.0 ..." + }, + { + "ip": "1.2.3.10", + "last_seen": 1417222374500, + "user_agent": "Dalvik/2.1.0 ..." + } + ] + } + ] + } + } +} +``` + +`last_seen` is measured in milliseconds since the Unix epoch. + +## Deactivate Account + +This API deactivates an account. It removes active access tokens, resets the +password, and deletes third-party IDs (to prevent the user requesting a +password reset). + +It can also mark the user as GDPR-erased. This means messages sent by the +user will still be visible by anyone that was in the room when these messages +were sent, but hidden from users joining the room afterwards. + +The api is: + +``` +POST /_synapse/admin/v1/deactivate/ +``` + +with a body of: + +```json +{ + "erase": true +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +The erase parameter is optional and defaults to `false`. +An empty body may be passed for backwards compatibility. + +The following actions are performed when deactivating an user: + +- Try to unpind 3PIDs from the identity server +- Remove all 3PIDs from the homeserver +- Delete all devices and E2EE keys +- Delete all access tokens +- Delete the password hash +- Removal from all rooms the user is a member of +- Remove the user from the user directory +- Reject all pending invites +- Remove all account validity information related to the user + +The following additional actions are performed during deactivation if `erase` +is set to `true`: + +- Remove the user's display name +- Remove the user's avatar URL +- Mark the user as erased + + +## Reset password + +Changes the password of another user. This will automatically log the user out of all their devices. + +The api is: + +``` +POST /_synapse/admin/v1/reset_password/ +``` + +with a body of: + +```json +{ + "new_password": "", + "logout_devices": true +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +The parameter `new_password` is required. +The parameter `logout_devices` is optional and defaults to `true`. + + +## Get whether a user is a server administrator or not + +The api is: + +``` +GET /_synapse/admin/v1/users//admin +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "admin": true +} +``` + + +## Change whether a user is a server administrator or not + +Note that you cannot demote yourself. + +The api is: + +``` +PUT /_synapse/admin/v1/users//admin +``` + +with a body of: + +```json +{ + "admin": true +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + + +## List room memberships of a user + +Gets a list of all `room_id` that a specific `user_id` is member. + +The API is: + +``` +GET /_synapse/admin/v1/users//joined_rooms +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json + { + "joined_rooms": [ + "!DuGcnbhHGaSZQoNQR:matrix.org", + "!ZtSaPCawyWtxfWiIy:matrix.org" + ], + "total": 2 + } +``` + +The server returns the list of rooms of which the user and the server +are member. If the user is local, all the rooms of which the user is +member are returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +**Response** + +The following fields are returned in the JSON response body: + +- `joined_rooms` - An array of `room_id`. +- `total` - Number of rooms. + + +## List media of a user +Gets a list of all local media that a specific `user_id` has created. +By default, the response is ordered by descending creation date and ascending media ID. +The newest media is on top. You can change the order with parameters +`order_by` and `dir`. + +The API is: + +``` +GET /_synapse/admin/v1/users//media +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "media": [ + { + "created_ts": 100400, + "last_access_ts": null, + "media_id": "qXhyRzulkwLsNHTbpHreuEgo", + "media_length": 67, + "media_type": "image/png", + "quarantined_by": null, + "safe_from_quarantine": false, + "upload_name": "test1.png" + }, + { + "created_ts": 200400, + "last_access_ts": null, + "media_id": "FHfiSnzoINDatrXHQIXBtahw", + "media_length": 67, + "media_type": "image/png", + "quarantined_by": null, + "safe_from_quarantine": false, + "upload_name": "test2.png" + } + ], + "next_token": 3, + "total": 2 +} +``` + +To paginate, check for `next_token` and if present, call the endpoint again +with `from` set to the value of `next_token`. This will return a new page. + +If the endpoint does not return a `next_token` then there are no more +reports to paginate through. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - string - fully qualified: for example, `@user:server.com`. +- `limit`: string representing a positive integer - Is optional but is used for pagination, + denoting the maximum number of items to return in this call. Defaults to `100`. +- `from`: string representing a positive integer - Is optional but used for pagination, + denoting the offset in the returned results. This should be treated as an opaque value and + not explicitly set to anything other than the return value of `next_token` from a previous call. + Defaults to `0`. +- `order_by` - The method by which to sort the returned list of media. + If the ordered field has duplicates, the second order is always by ascending `media_id`, + which guarantees a stable ordering. Valid values are: + + - `media_id` - Media are ordered alphabetically by `media_id`. + - `upload_name` - Media are ordered alphabetically by name the media was uploaded with. + - `created_ts` - Media are ordered by when the content was uploaded in ms. + Smallest to largest. This is the default. + - `last_access_ts` - Media are ordered by when the content was last accessed in ms. + Smallest to largest. + - `media_length` - Media are ordered by length of the media in bytes. + Smallest to largest. + - `media_type` - Media are ordered alphabetically by MIME-type. + - `quarantined_by` - Media are ordered alphabetically by the user ID that + initiated the quarantine request for this media. + - `safe_from_quarantine` - Media are ordered by the status if this media is safe + from quarantining. + +- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards. + Setting this value to `b` will reverse the above sort order. Defaults to `f`. + +If neither `order_by` nor `dir` is set, the default order is newest media on top +(corresponds to `order_by` = `created_ts` and `dir` = `b`). + +Caution. The database only has indexes on the columns `media_id`, +`user_id` and `created_ts`. This means that if a different sort order is used +(`upload_name`, `last_access_ts`, `media_length`, `media_type`, +`quarantined_by` or `safe_from_quarantine`), this can cause a large load on the +database, especially for large environments. + +**Response** + +The following fields are returned in the JSON response body: + +- `media` - An array of objects, each containing information about a media. + Media objects contain the following fields: + + - `created_ts` - integer - Timestamp when the content was uploaded in ms. + - `last_access_ts` - integer - Timestamp when the content was last accessed in ms. + - `media_id` - string - The id used to refer to the media. + - `media_length` - integer - Length of the media in bytes. + - `media_type` - string - The MIME-type of the media. + - `quarantined_by` - string - The user ID that initiated the quarantine request + for this media. + + - `safe_from_quarantine` - bool - Status if this media is safe from quarantining. + - `upload_name` - string - The name the media was uploaded with. + +- `next_token`: integer - Indication for pagination. See above. +- `total` - integer - Total number of media. + +## Login as a user + +Get an access token that can be used to authenticate as that user. Useful for +when admins wish to do actions on behalf of a user. + +The API is: + +``` +POST /_synapse/admin/v1/users//login +{} +``` + +An optional `valid_until_ms` field can be specified in the request body as an +integer timestamp that specifies when the token should expire. By default tokens +do not expire. + +A response body like the following is returned: + +```json +{ + "access_token": "" +} +``` + +This API does *not* generate a new device for the user, and so will not appear +their `/devices` list, and in general the target user should not be able to +tell they have been logged in as. + +To expire the token call the standard `/logout` API with the token. + +Note: The token will expire if the *admin* user calls `/logout/all` from any +of their devices, but the token will *not* expire if the target user does the +same. + + +## User devices + +### List all devices +Gets information about all devices for a specific `user_id`. + +The API is: + +``` +GET /_synapse/admin/v2/users//devices +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "devices": [ + { + "device_id": "QBUAZIFURK", + "display_name": "android", + "last_seen_ip": "1.2.3.4", + "last_seen_ts": 1474491775024, + "user_id": "" + }, + { + "device_id": "AUIECTSRND", + "display_name": "ios", + "last_seen_ip": "1.2.3.5", + "last_seen_ts": 1474491775025, + "user_id": "" + } + ], + "total": 2 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +**Response** + +The following fields are returned in the JSON response body: + +- `devices` - An array of objects, each containing information about a device. + Device objects contain the following fields: + + - `device_id` - Identifier of device. + - `display_name` - Display name set by the user for this device. + Absent if no name has been set. + - `last_seen_ip` - The IP address where this device was last seen. + (May be a few minutes out of date, for efficiency reasons). + - `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this + devices was last seen. (May be a few minutes out of date, for efficiency reasons). + - `user_id` - Owner of device. + +- `total` - Total number of user's devices. + +### Delete multiple devices +Deletes the given devices for a specific `user_id`, and invalidates +any access token associated with them. + +The API is: + +``` +POST /_synapse/admin/v2/users//delete_devices + +{ + "devices": [ + "QBUAZIFURK", + "AUIECTSRND" + ], +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +The following fields are required in the JSON request body: + +- `devices` - The list of device IDs to delete. + +### Show a device +Gets information on a single device, by `device_id` for a specific `user_id`. + +The API is: + +``` +GET /_synapse/admin/v2/users//devices/ +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "device_id": "", + "display_name": "android", + "last_seen_ip": "1.2.3.4", + "last_seen_ts": 1474491775024, + "user_id": "" +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. +- `device_id` - The device to retrieve. + +**Response** + +The following fields are returned in the JSON response body: + +- `device_id` - Identifier of device. +- `display_name` - Display name set by the user for this device. + Absent if no name has been set. +- `last_seen_ip` - The IP address where this device was last seen. + (May be a few minutes out of date, for efficiency reasons). +- `last_seen_ts` - The timestamp (in milliseconds since the unix epoch) when this + devices was last seen. (May be a few minutes out of date, for efficiency reasons). +- `user_id` - Owner of device. + +### Update a device +Updates the metadata on the given `device_id` for a specific `user_id`. + +The API is: + +``` +PUT /_synapse/admin/v2/users//devices/ + +{ + "display_name": "My other phone" +} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. +- `device_id` - The device to update. + +The following fields are required in the JSON request body: + +- `display_name` - The new display name for this device. If not given, + the display name is unchanged. + +### Delete a device +Deletes the given `device_id` for a specific `user_id`, +and invalidates any access token associated with it. + +The API is: + +``` +DELETE /_synapse/admin/v2/users//devices/ + +{} +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. +- `device_id` - The device to delete. + +## List all pushers +Gets information about all pushers for a specific `user_id`. + +The API is: + +``` +GET /_synapse/admin/v1/users//pushers +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "pushers": [ + { + "app_display_name":"HTTP Push Notifications", + "app_id":"m.http", + "data": { + "url":"example.com" + }, + "device_display_name":"pushy push", + "kind":"http", + "lang":"None", + "profile_tag":"", + "pushkey":"a@example.com" + } + ], + "total": 1 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - fully qualified: for example, `@user:server.com`. + +**Response** + +The following fields are returned in the JSON response body: + +- `pushers` - An array containing the current pushers for the user + + - `app_display_name` - string - A string that will allow the user to identify + what application owns this pusher. + + - `app_id` - string - This is a reverse-DNS style identifier for the application. + Max length, 64 chars. + + - `data` - A dictionary of information for the pusher implementation itself. + + - `url` - string - Required if `kind` is `http`. The URL to use to send + notifications to. + + - `format` - string - The format to use when sending notifications to the + Push Gateway. + + - `device_display_name` - string - A string that will allow the user to identify + what device owns this pusher. + + - `profile_tag` - string - This string determines which set of device specific rules + this pusher executes. + + - `kind` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes. + - `lang` - string - The preferred language for receiving notifications + (e.g. 'en' or 'en-US') + + - `profile_tag` - string - This string determines which set of device specific rules + this pusher executes. + + - `pushkey` - string - This is a unique identifier for this pusher. + Max length, 512 bytes. + +- `total` - integer - Number of pushers. + +See also the +[Client-Server API Spec on pushers](https://matrix.org/docs/spec/client_server/latest#get-matrix-client-r0-pushers). + +## Shadow-banning users + +Shadow-banning is a useful tool for moderating malicious or egregiously abusive users. +A shadow-banned users receives successful responses to their client-server API requests, +but the events are not propagated into rooms. This can be an effective tool as it +(hopefully) takes longer for the user to realise they are being moderated before +pivoting to another account. + +Shadow-banning a user should be used as a tool of last resort and may lead to confusing +or broken behaviour for the client. A shadow-banned user will not receive any +notification and it is generally more appropriate to ban or kick abusive users. +A shadow-banned user will be unable to contact anyone on the server. + +The API is: + +``` +POST /_synapse/admin/v1/users//shadow_ban +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +An empty JSON dict is returned. + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + +## Override ratelimiting for users + +This API allows to override or disable ratelimiting for a specific user. +There are specific APIs to set, get and delete a ratelimit. + +### Get status of ratelimit + +The API is: + +``` +GET /_synapse/admin/v1/users//override_ratelimit +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "messages_per_second": 0, + "burst_count": 0 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + +**Response** + +The following fields are returned in the JSON response body: + +- `messages_per_second` - integer - The number of actions that can + be performed in a second. `0` mean that ratelimiting is disabled for this user. +- `burst_count` - integer - How many actions that can be performed before + being limited. + +If **no** custom ratelimit is set, an empty JSON dict is returned. + +```json +{} +``` + +### Set ratelimit + +The API is: + +``` +POST /_synapse/admin/v1/users//override_ratelimit +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +A response body like the following is returned: + +```json +{ + "messages_per_second": 0, + "burst_count": 0 +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + +Body parameters: + +- `messages_per_second` - positive integer, optional. The number of actions that can + be performed in a second. Defaults to `0`. +- `burst_count` - positive integer, optional. How many actions that can be performed + before being limited. Defaults to `0`. + +To disable users' ratelimit set both values to `0`. + +**Response** + +The following fields are returned in the JSON response body: + +- `messages_per_second` - integer - The number of actions that can + be performed in a second. +- `burst_count` - integer - How many actions that can be performed before + being limited. + +### Delete ratelimit + +The API is: + +``` +DELETE /_synapse/admin/v1/users//override_ratelimit +``` + +To use it, you will need to authenticate by providing an `access_token` for a +server admin: [Admin API](README.rst) + +An empty JSON dict is returned. + +```json +{} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must + be local. + diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst deleted file mode 100644 index dbce9c90b6c7..000000000000 --- a/docs/admin_api/user_admin_api.rst +++ /dev/null @@ -1,981 +0,0 @@ -.. contents:: - -Query User Account -================== - -This API returns information about a specific user account. - -The api is:: - - GET /_synapse/admin/v2/users/ - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -It returns a JSON body like the following: - -.. code:: json - - { - "displayname": "User", - "threepids": [ - { - "medium": "email", - "address": "" - }, - { - "medium": "email", - "address": "" - } - ], - "avatar_url": "", - "admin": 0, - "deactivated": 0, - "shadow_banned": 0, - "password_hash": "$2b$12$p9B4GkqYdRTPGD", - "creation_ts": 1560432506, - "appservice_id": null, - "consent_server_notice_sent": null, - "consent_version": null - } - -URL parameters: - -- ``user_id``: fully-qualified user id: for example, ``@user:server.com``. - -Create or modify Account -======================== - -This API allows an administrator to create or modify a user account with a -specific ``user_id``. - -This api is:: - - PUT /_synapse/admin/v2/users/ - -with a body of: - -.. code:: json - - { - "password": "user_password", - "displayname": "User", - "threepids": [ - { - "medium": "email", - "address": "" - }, - { - "medium": "email", - "address": "" - } - ], - "avatar_url": "", - "admin": false, - "deactivated": false - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -URL parameters: - -- ``user_id``: fully-qualified user id: for example, ``@user:server.com``. - -Body parameters: - -- ``password``, optional. If provided, the user's password is updated and all - devices are logged out. - -- ``displayname``, optional, defaults to the value of ``user_id``. - -- ``threepids``, optional, allows setting the third-party IDs (email, msisdn) - belonging to a user. - -- ``avatar_url``, optional, must be a - `MXC URI `_. - -- ``admin``, optional, defaults to ``false``. - -- ``deactivated``, optional. If unspecified, deactivation state will be left - unchanged on existing accounts and set to ``false`` for new accounts. - A user cannot be erased by deactivating with this API. For details on deactivating users see - `Deactivate Account <#deactivate-account>`_. - -If the user already exists then optional parameters default to the current value. - -In order to re-activate an account ``deactivated`` must be set to ``false``. If -users do not login via single-sign-on, a new ``password`` must be provided. - -List Accounts -============= - -This API returns all local user accounts. -By default, the response is ordered by ascending user ID. - -The API is:: - - GET /_synapse/admin/v2/users?from=0&limit=10&guests=false - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "users": [ - { - "name": "", - "is_guest": 0, - "admin": 0, - "user_type": null, - "deactivated": 0, - "shadow_banned": 0, - "displayname": "", - "avatar_url": null - }, { - "name": "", - "is_guest": 0, - "admin": 1, - "user_type": null, - "deactivated": 0, - "shadow_banned": 0, - "displayname": "", - "avatar_url": "" - } - ], - "next_token": "100", - "total": 200 - } - -To paginate, check for ``next_token`` and if present, call the endpoint again -with ``from`` set to the value of ``next_token``. This will return a new page. - -If the endpoint does not return a ``next_token`` then there are no more users -to paginate through. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - Is optional and filters to only return users with user IDs - that contain this value. This parameter is ignored when using the ``name`` parameter. -- ``name`` - Is optional and filters to only return users with user ID localparts - **or** displaynames that contain this value. -- ``guests`` - string representing a bool - Is optional and if ``false`` will **exclude** guest users. - Defaults to ``true`` to include guest users. -- ``deactivated`` - string representing a bool - Is optional and if ``true`` will **include** deactivated users. - Defaults to ``false`` to exclude deactivated users. -- ``limit`` - string representing a positive integer - Is optional but is used for pagination, - denoting the maximum number of items to return in this call. Defaults to ``100``. -- ``from`` - string representing a positive integer - Is optional but used for pagination, - denoting the offset in the returned results. This should be treated as an opaque value and - not explicitly set to anything other than the return value of ``next_token`` from a previous call. - Defaults to ``0``. -- ``order_by`` - The method by which to sort the returned list of users. - If the ordered field has duplicates, the second order is always by ascending ``name``, - which guarantees a stable ordering. Valid values are: - - - ``name`` - Users are ordered alphabetically by ``name``. This is the default. - - ``is_guest`` - Users are ordered by ``is_guest`` status. - - ``admin`` - Users are ordered by ``admin`` status. - - ``user_type`` - Users are ordered alphabetically by ``user_type``. - - ``deactivated`` - Users are ordered by ``deactivated`` status. - - ``shadow_banned`` - Users are ordered by ``shadow_banned`` status. - - ``displayname`` - Users are ordered alphabetically by ``displayname``. - - ``avatar_url`` - Users are ordered alphabetically by avatar URL. - -- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards. - Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``. - -Caution. The database only has indexes on the columns ``name`` and ``created_ts``. -This means that if a different sort order is used (``is_guest``, ``admin``, -``user_type``, ``deactivated``, ``shadow_banned``, ``avatar_url`` or ``displayname``), -this can cause a large load on the database, especially for large environments. - -**Response** - -The following fields are returned in the JSON response body: - -- ``users`` - An array of objects, each containing information about an user. - User objects contain the following fields: - - - ``name`` - string - Fully-qualified user ID (ex. ``@user:server.com``). - - ``is_guest`` - bool - Status if that user is a guest account. - - ``admin`` - bool - Status if that user is a server administrator. - - ``user_type`` - string - Type of the user. Normal users are type ``None``. - This allows user type specific behaviour. There are also types ``support`` and ``bot``. - - ``deactivated`` - bool - Status if that user has been marked as deactivated. - - ``shadow_banned`` - bool - Status if that user has been marked as shadow banned. - - ``displayname`` - string - The user's display name if they have set one. - - ``avatar_url`` - string - The user's avatar URL if they have set one. - -- ``next_token``: string representing a positive integer - Indication for pagination. See above. -- ``total`` - integer - Total number of media. - - -Query current sessions for a user -================================= - -This API returns information about the active sessions for a specific user. - -The api is:: - - GET /_synapse/admin/v1/whois/ - -and:: - - GET /_matrix/client/r0/admin/whois/ - -See also: `Client Server API Whois -`_ - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -It returns a JSON body like the following: - -.. code:: json - - { - "user_id": "", - "devices": { - "": { - "sessions": [ - { - "connections": [ - { - "ip": "1.2.3.4", - "last_seen": 1417222374433, - "user_agent": "Mozilla/5.0 ..." - }, - { - "ip": "1.2.3.10", - "last_seen": 1417222374500, - "user_agent": "Dalvik/2.1.0 ..." - } - ] - } - ] - } - } - } - -``last_seen`` is measured in milliseconds since the Unix epoch. - -Deactivate Account -================== - -This API deactivates an account. It removes active access tokens, resets the -password, and deletes third-party IDs (to prevent the user requesting a -password reset). - -It can also mark the user as GDPR-erased. This means messages sent by the -user will still be visible by anyone that was in the room when these messages -were sent, but hidden from users joining the room afterwards. - -The api is:: - - POST /_synapse/admin/v1/deactivate/ - -with a body of: - -.. code:: json - - { - "erase": true - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -The erase parameter is optional and defaults to ``false``. -An empty body may be passed for backwards compatibility. - -The following actions are performed when deactivating an user: - -- Try to unpind 3PIDs from the identity server -- Remove all 3PIDs from the homeserver -- Delete all devices and E2EE keys -- Delete all access tokens -- Delete the password hash -- Removal from all rooms the user is a member of -- Remove the user from the user directory -- Reject all pending invites -- Remove all account validity information related to the user - -The following additional actions are performed during deactivation if ``erase`` -is set to ``true``: - -- Remove the user's display name -- Remove the user's avatar URL -- Mark the user as erased - - -Reset password -============== - -Changes the password of another user. This will automatically log the user out of all their devices. - -The api is:: - - POST /_synapse/admin/v1/reset_password/ - -with a body of: - -.. code:: json - - { - "new_password": "", - "logout_devices": true - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -The parameter ``new_password`` is required. -The parameter ``logout_devices`` is optional and defaults to ``true``. - -Get whether a user is a server administrator or not -=================================================== - - -The api is:: - - GET /_synapse/admin/v1/users//admin - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "admin": true - } - - -Change whether a user is a server administrator or not -====================================================== - -Note that you cannot demote yourself. - -The api is:: - - PUT /_synapse/admin/v1/users//admin - -with a body of: - -.. code:: json - - { - "admin": true - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - - -List room memberships of an user -================================ -Gets a list of all ``room_id`` that a specific ``user_id`` is member. - -The API is:: - - GET /_synapse/admin/v1/users//joined_rooms - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "joined_rooms": [ - "!DuGcnbhHGaSZQoNQR:matrix.org", - "!ZtSaPCawyWtxfWiIy:matrix.org" - ], - "total": 2 - } - -The server returns the list of rooms of which the user and the server -are member. If the user is local, all the rooms of which the user is -member are returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``joined_rooms`` - An array of ``room_id``. -- ``total`` - Number of rooms. - - -List media of a user -==================== -Gets a list of all local media that a specific ``user_id`` has created. -By default, the response is ordered by descending creation date and ascending media ID. -The newest media is on top. You can change the order with parameters -``order_by`` and ``dir``. - -The API is:: - - GET /_synapse/admin/v1/users//media - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "media": [ - { - "created_ts": 100400, - "last_access_ts": null, - "media_id": "qXhyRzulkwLsNHTbpHreuEgo", - "media_length": 67, - "media_type": "image/png", - "quarantined_by": null, - "safe_from_quarantine": false, - "upload_name": "test1.png" - }, - { - "created_ts": 200400, - "last_access_ts": null, - "media_id": "FHfiSnzoINDatrXHQIXBtahw", - "media_length": 67, - "media_type": "image/png", - "quarantined_by": null, - "safe_from_quarantine": false, - "upload_name": "test2.png" - } - ], - "next_token": 3, - "total": 2 - } - -To paginate, check for ``next_token`` and if present, call the endpoint again -with ``from`` set to the value of ``next_token``. This will return a new page. - -If the endpoint does not return a ``next_token`` then there are no more -reports to paginate through. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - string - fully qualified: for example, ``@user:server.com``. -- ``limit``: string representing a positive integer - Is optional but is used for pagination, - denoting the maximum number of items to return in this call. Defaults to ``100``. -- ``from``: string representing a positive integer - Is optional but used for pagination, - denoting the offset in the returned results. This should be treated as an opaque value and - not explicitly set to anything other than the return value of ``next_token`` from a previous call. - Defaults to ``0``. -- ``order_by`` - The method by which to sort the returned list of media. - If the ordered field has duplicates, the second order is always by ascending ``media_id``, - which guarantees a stable ordering. Valid values are: - - - ``media_id`` - Media are ordered alphabetically by ``media_id``. - - ``upload_name`` - Media are ordered alphabetically by name the media was uploaded with. - - ``created_ts`` - Media are ordered by when the content was uploaded in ms. - Smallest to largest. This is the default. - - ``last_access_ts`` - Media are ordered by when the content was last accessed in ms. - Smallest to largest. - - ``media_length`` - Media are ordered by length of the media in bytes. - Smallest to largest. - - ``media_type`` - Media are ordered alphabetically by MIME-type. - - ``quarantined_by`` - Media are ordered alphabetically by the user ID that - initiated the quarantine request for this media. - - ``safe_from_quarantine`` - Media are ordered by the status if this media is safe - from quarantining. - -- ``dir`` - Direction of media order. Either ``f`` for forwards or ``b`` for backwards. - Setting this value to ``b`` will reverse the above sort order. Defaults to ``f``. - -If neither ``order_by`` nor ``dir`` is set, the default order is newest media on top -(corresponds to ``order_by`` = ``created_ts`` and ``dir`` = ``b``). - -Caution. The database only has indexes on the columns ``media_id``, -``user_id`` and ``created_ts``. This means that if a different sort order is used -(``upload_name``, ``last_access_ts``, ``media_length``, ``media_type``, -``quarantined_by`` or ``safe_from_quarantine``), this can cause a large load on the -database, especially for large environments. - -**Response** - -The following fields are returned in the JSON response body: - -- ``media`` - An array of objects, each containing information about a media. - Media objects contain the following fields: - - - ``created_ts`` - integer - Timestamp when the content was uploaded in ms. - - ``last_access_ts`` - integer - Timestamp when the content was last accessed in ms. - - ``media_id`` - string - The id used to refer to the media. - - ``media_length`` - integer - Length of the media in bytes. - - ``media_type`` - string - The MIME-type of the media. - - ``quarantined_by`` - string - The user ID that initiated the quarantine request - for this media. - - - ``safe_from_quarantine`` - bool - Status if this media is safe from quarantining. - - ``upload_name`` - string - The name the media was uploaded with. - -- ``next_token``: integer - Indication for pagination. See above. -- ``total`` - integer - Total number of media. - -Login as a user -=============== - -Get an access token that can be used to authenticate as that user. Useful for -when admins wish to do actions on behalf of a user. - -The API is:: - - POST /_synapse/admin/v1/users//login - {} - -An optional ``valid_until_ms`` field can be specified in the request body as an -integer timestamp that specifies when the token should expire. By default tokens -do not expire. - -A response body like the following is returned: - -.. code:: json - - { - "access_token": "" - } - - -This API does *not* generate a new device for the user, and so will not appear -their ``/devices`` list, and in general the target user should not be able to -tell they have been logged in as. - -To expire the token call the standard ``/logout`` API with the token. - -Note: The token will expire if the *admin* user calls ``/logout/all`` from any -of their devices, but the token will *not* expire if the target user does the -same. - - -User devices -============ - -List all devices ----------------- -Gets information about all devices for a specific ``user_id``. - -The API is:: - - GET /_synapse/admin/v2/users//devices - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "devices": [ - { - "device_id": "QBUAZIFURK", - "display_name": "android", - "last_seen_ip": "1.2.3.4", - "last_seen_ts": 1474491775024, - "user_id": "" - }, - { - "device_id": "AUIECTSRND", - "display_name": "ios", - "last_seen_ip": "1.2.3.5", - "last_seen_ts": 1474491775025, - "user_id": "" - } - ], - "total": 2 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``devices`` - An array of objects, each containing information about a device. - Device objects contain the following fields: - - - ``device_id`` - Identifier of device. - - ``display_name`` - Display name set by the user for this device. - Absent if no name has been set. - - ``last_seen_ip`` - The IP address where this device was last seen. - (May be a few minutes out of date, for efficiency reasons). - - ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this - devices was last seen. (May be a few minutes out of date, for efficiency reasons). - - ``user_id`` - Owner of device. - -- ``total`` - Total number of user's devices. - -Delete multiple devices ------------------- -Deletes the given devices for a specific ``user_id``, and invalidates -any access token associated with them. - -The API is:: - - POST /_synapse/admin/v2/users//delete_devices - - { - "devices": [ - "QBUAZIFURK", - "AUIECTSRND" - ], - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -The following fields are required in the JSON request body: - -- ``devices`` - The list of device IDs to delete. - -Show a device ---------------- -Gets information on a single device, by ``device_id`` for a specific ``user_id``. - -The API is:: - - GET /_synapse/admin/v2/users//devices/ - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "device_id": "", - "display_name": "android", - "last_seen_ip": "1.2.3.4", - "last_seen_ts": 1474491775024, - "user_id": "" - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. -- ``device_id`` - The device to retrieve. - -**Response** - -The following fields are returned in the JSON response body: - -- ``device_id`` - Identifier of device. -- ``display_name`` - Display name set by the user for this device. - Absent if no name has been set. -- ``last_seen_ip`` - The IP address where this device was last seen. - (May be a few minutes out of date, for efficiency reasons). -- ``last_seen_ts`` - The timestamp (in milliseconds since the unix epoch) when this - devices was last seen. (May be a few minutes out of date, for efficiency reasons). -- ``user_id`` - Owner of device. - -Update a device ---------------- -Updates the metadata on the given ``device_id`` for a specific ``user_id``. - -The API is:: - - PUT /_synapse/admin/v2/users//devices/ - - { - "display_name": "My other phone" - } - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. -- ``device_id`` - The device to update. - -The following fields are required in the JSON request body: - -- ``display_name`` - The new display name for this device. If not given, - the display name is unchanged. - -Delete a device ---------------- -Deletes the given ``device_id`` for a specific ``user_id``, -and invalidates any access token associated with it. - -The API is:: - - DELETE /_synapse/admin/v2/users//devices/ - - {} - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. -- ``device_id`` - The device to delete. - -List all pushers -================ -Gets information about all pushers for a specific ``user_id``. - -The API is:: - - GET /_synapse/admin/v1/users//pushers - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "pushers": [ - { - "app_display_name":"HTTP Push Notifications", - "app_id":"m.http", - "data": { - "url":"example.com" - }, - "device_display_name":"pushy push", - "kind":"http", - "lang":"None", - "profile_tag":"", - "pushkey":"a@example.com" - } - ], - "total": 1 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - fully qualified: for example, ``@user:server.com``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``pushers`` - An array containing the current pushers for the user - - - ``app_display_name`` - string - A string that will allow the user to identify - what application owns this pusher. - - - ``app_id`` - string - This is a reverse-DNS style identifier for the application. - Max length, 64 chars. - - - ``data`` - A dictionary of information for the pusher implementation itself. - - - ``url`` - string - Required if ``kind`` is ``http``. The URL to use to send - notifications to. - - - ``format`` - string - The format to use when sending notifications to the - Push Gateway. - - - ``device_display_name`` - string - A string that will allow the user to identify - what device owns this pusher. - - - ``profile_tag`` - string - This string determines which set of device specific rules - this pusher executes. - - - ``kind`` - string - The kind of pusher. "http" is a pusher that sends HTTP pokes. - - ``lang`` - string - The preferred language for receiving notifications - (e.g. 'en' or 'en-US') - - - ``profile_tag`` - string - This string determines which set of device specific rules - this pusher executes. - - - ``pushkey`` - string - This is a unique identifier for this pusher. - Max length, 512 bytes. - -- ``total`` - integer - Number of pushers. - -See also `Client-Server API Spec `_ - -Shadow-banning users -==================== - -Shadow-banning is a useful tool for moderating malicious or egregiously abusive users. -A shadow-banned users receives successful responses to their client-server API requests, -but the events are not propagated into rooms. This can be an effective tool as it -(hopefully) takes longer for the user to realise they are being moderated before -pivoting to another account. - -Shadow-banning a user should be used as a tool of last resort and may lead to confusing -or broken behaviour for the client. A shadow-banned user will not receive any -notification and it is generally more appropriate to ban or kick abusive users. -A shadow-banned user will be unable to contact anyone on the server. - -The API is:: - - POST /_synapse/admin/v1/users//shadow_ban - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - -Override ratelimiting for users -=============================== - -This API allows to override or disable ratelimiting for a specific user. -There are specific APIs to set, get and delete a ratelimit. - -Get status of ratelimit ------------------------ - -The API is:: - - GET /_synapse/admin/v1/users//override_ratelimit - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "messages_per_second": 0, - "burst_count": 0 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - -**Response** - -The following fields are returned in the JSON response body: - -- ``messages_per_second`` - integer - The number of actions that can - be performed in a second. `0` mean that ratelimiting is disabled for this user. -- ``burst_count`` - integer - How many actions that can be performed before - being limited. - -If **no** custom ratelimit is set, an empty JSON dict is returned. - -.. code:: json - - {} - -Set ratelimit -------------- - -The API is:: - - POST /_synapse/admin/v1/users//override_ratelimit - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -A response body like the following is returned: - -.. code:: json - - { - "messages_per_second": 0, - "burst_count": 0 - } - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - -Body parameters: - -- ``messages_per_second`` - positive integer, optional. The number of actions that can - be performed in a second. Defaults to ``0``. -- ``burst_count`` - positive integer, optional. How many actions that can be performed - before being limited. Defaults to ``0``. - -To disable users' ratelimit set both values to ``0``. - -**Response** - -The following fields are returned in the JSON response body: - -- ``messages_per_second`` - integer - The number of actions that can - be performed in a second. -- ``burst_count`` - integer - How many actions that can be performed before - being limited. - -Delete ratelimit ----------------- - -The API is:: - - DELETE /_synapse/admin/v1/users//override_ratelimit - -To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see `README.rst `_. - -An empty JSON dict is returned. - -.. code:: json - - {} - -**Parameters** - -The following parameters should be set in the URL: - -- ``user_id`` - The fully qualified MXID: for example, ``@user:server.com``. The user must - be local. - diff --git a/docs/admin_api/version_api.rst b/docs/admin_api/version_api.md similarity index 59% rename from docs/admin_api/version_api.rst rename to docs/admin_api/version_api.md index 833d9028bea1..efb4a0c0f7aa 100644 --- a/docs/admin_api/version_api.rst +++ b/docs/admin_api/version_api.md @@ -1,20 +1,21 @@ -Version API -=========== +# Version API This API returns the running Synapse version and the Python version on which Synapse is being run. This is useful when a Synapse instance is behind a proxy that does not forward the 'Server' header (which also contains Synapse version information). -The api is:: +The api is: - GET /_synapse/admin/v1/server_version +``` +GET /_synapse/admin/v1/server_version +``` It returns a JSON body like the following: -.. code:: json - - { - "server_version": "0.99.2rc1 (b=develop, abcdef123)", - "python_version": "3.6.8" - } +```json +{ + "server_version": "0.99.2rc1 (b=develop, abcdef123)", + "python_version": "3.6.8" +} +``` From 1d143074c5534912cf40d28a4c31deabab2b1710 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 3 Jun 2021 16:01:30 +0100 Subject: [PATCH 220/222] Improve opentracing annotations for Notifier (#10111) The existing tracing reports an error each time there is a timeout, which isn't really representative. Additionally, we log things about the way `wait_for_events` works (eg, the result of the callback) to the *parent* span, which is confusing. --- changelog.d/10111.misc | 1 + synapse/notifier.py | 66 +++++++++++++++++++++--------------------- 2 files changed, 34 insertions(+), 33 deletions(-) create mode 100644 changelog.d/10111.misc diff --git a/changelog.d/10111.misc b/changelog.d/10111.misc new file mode 100644 index 000000000000..42e42b69ab16 --- /dev/null +++ b/changelog.d/10111.misc @@ -0,0 +1 @@ +Improve opentracing annotations for `Notifier`. diff --git a/synapse/notifier.py b/synapse/notifier.py index 24b4e6649f18..3c3cc476318b 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -485,21 +485,21 @@ async def wait_for_events( end_time = self.clock.time_msec() + timeout while not result: - try: - now = self.clock.time_msec() - if end_time <= now: - break - - # Now we wait for the _NotifierUserStream to be told there - # is a new token. - listener = user_stream.new_listener(prev_token) - listener.deferred = timeout_deferred( - listener.deferred, - (end_time - now) / 1000.0, - self.hs.get_reactor(), - ) + with start_active_span("wait_for_events"): + try: + now = self.clock.time_msec() + if end_time <= now: + break + + # Now we wait for the _NotifierUserStream to be told there + # is a new token. + listener = user_stream.new_listener(prev_token) + listener.deferred = timeout_deferred( + listener.deferred, + (end_time - now) / 1000.0, + self.hs.get_reactor(), + ) - with start_active_span("wait_for_events.deferred"): log_kv( { "wait_for_events": "sleep", @@ -517,27 +517,27 @@ async def wait_for_events( } ) - current_token = user_stream.current_token + current_token = user_stream.current_token - result = await callback(prev_token, current_token) - log_kv( - { - "wait_for_events": "result", - "result": bool(result), - } - ) - if result: + result = await callback(prev_token, current_token) + log_kv( + { + "wait_for_events": "result", + "result": bool(result), + } + ) + if result: + break + + # Update the prev_token to the current_token since nothing + # has happened between the old prev_token and the current_token + prev_token = current_token + except defer.TimeoutError: + log_kv({"wait_for_events": "timeout"}) + break + except defer.CancelledError: + log_kv({"wait_for_events": "cancelled"}) break - - # Update the prev_token to the current_token since nothing - # has happened between the old prev_token and the current_token - prev_token = current_token - except defer.TimeoutError: - log_kv({"wait_for_events": "timeout"}) - break - except defer.CancelledError: - log_kv({"wait_for_events": "cancelled"}) - break if result is None: # This happened if there was no timeout or if the timeout had From 9eea4646be5eef1e2b24e3b0bb0fc94999c2250c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 3 Jun 2021 16:31:56 +0100 Subject: [PATCH 221/222] Add OpenTracing for database activity. (#10113) This adds quite a lot of OpenTracing decoration for database activity. Specifically it adds tracing at four different levels: * emit a span for each "interaction" - ie, the top level database function that we tend to call "transaction", but isn't really, because it can end up as multiple transactions. * emit a span while we hold a database connection open * emit a span for each database transaction - actual actual transaction. * emit a span for each database query. I'm aware this might be quite a lot of overhead, but even just running it on a local Synapse it looks really interesting, and I hope the overhead can be offset just by turning down the sampling frequency and finding other ways of tracing requests of interest (eg, the `force_tracing_for_users` setting). --- changelog.d/10113.feature | 1 + synapse/logging/opentracing.py | 6 +++ synapse/storage/database.py | 86 +++++++++++++++++++++------------- 3 files changed, 60 insertions(+), 33 deletions(-) create mode 100644 changelog.d/10113.feature diff --git a/changelog.d/10113.feature b/changelog.d/10113.feature new file mode 100644 index 000000000000..2658ab891882 --- /dev/null +++ b/changelog.d/10113.feature @@ -0,0 +1 @@ +Report OpenTracing spans for database activity. diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index f64845b80cc0..68f0c00151d5 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -271,6 +271,12 @@ class SynapseTags: # HTTP request tag (used to distinguish full vs incremental syncs, etc) REQUEST_TAG = "request_tag" + # Text description of a database transaction + DB_TXN_DESC = "db.txn_desc" + + # Uniqueish ID of a database transaction + DB_TXN_ID = "db.txn_id" + # Block everything by default # A regex which matches the server_names to expose traces for. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index a761ad603bbb..974703d13ae2 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -40,6 +40,7 @@ from synapse.api.errors import StoreError from synapse.config.database import DatabaseConnectionConfig +from synapse.logging import opentracing from synapse.logging.context import ( LoggingContext, current_context, @@ -313,7 +314,14 @@ def _do_execute(self, func: Callable[..., R], sql: str, *args: Any) -> R: start = time.time() try: - return func(sql, *args) + with opentracing.start_active_span( + "db.query", + tags={ + opentracing.tags.DATABASE_TYPE: "sql", + opentracing.tags.DATABASE_STATEMENT: sql, + }, + ): + return func(sql, *args) except Exception as e: sql_logger.debug("[SQL FAIL] {%s} %s", self.name, e) raise @@ -525,9 +533,16 @@ def new_transaction( exception_callbacks=exception_callbacks, ) try: - r = func(cursor, *args, **kwargs) - conn.commit() - return r + with opentracing.start_active_span( + "db.txn", + tags={ + opentracing.SynapseTags.DB_TXN_DESC: desc, + opentracing.SynapseTags.DB_TXN_ID: name, + }, + ): + r = func(cursor, *args, **kwargs) + conn.commit() + return r except self.engine.module.OperationalError as e: # This can happen if the database disappears mid # transaction. @@ -653,16 +668,17 @@ async def runInteraction( logger.warning("Starting db txn '%s' from sentinel context", desc) try: - result = await self.runWithConnection( - self.new_transaction, - desc, - after_callbacks, - exception_callbacks, - func, - *args, - db_autocommit=db_autocommit, - **kwargs, - ) + with opentracing.start_active_span(f"db.{desc}"): + result = await self.runWithConnection( + self.new_transaction, + desc, + after_callbacks, + exception_callbacks, + func, + *args, + db_autocommit=db_autocommit, + **kwargs, + ) for after_callback, after_args, after_kwargs in after_callbacks: after_callback(*after_args, **after_kwargs) @@ -718,25 +734,29 @@ def inner_func(conn, *args, **kwargs): with LoggingContext( str(curr_context), parent_context=parent_context ) as context: - sched_duration_sec = monotonic_time() - start_time - sql_scheduling_timer.observe(sched_duration_sec) - context.add_database_scheduled(sched_duration_sec) - - if self.engine.is_connection_closed(conn): - logger.debug("Reconnecting closed database connection") - conn.reconnect() - - try: - if db_autocommit: - self.engine.attempt_to_set_autocommit(conn, True) - - db_conn = LoggingDatabaseConnection( - conn, self.engine, "runWithConnection" - ) - return func(db_conn, *args, **kwargs) - finally: - if db_autocommit: - self.engine.attempt_to_set_autocommit(conn, False) + with opentracing.start_active_span( + operation_name="db.connection", + ): + sched_duration_sec = monotonic_time() - start_time + sql_scheduling_timer.observe(sched_duration_sec) + context.add_database_scheduled(sched_duration_sec) + + if self.engine.is_connection_closed(conn): + logger.debug("Reconnecting closed database connection") + conn.reconnect() + opentracing.log_kv({"message": "reconnected"}) + + try: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, True) + + db_conn = LoggingDatabaseConnection( + conn, self.engine, "runWithConnection" + ) + return func(db_conn, *args, **kwargs) + finally: + if db_autocommit: + self.engine.attempt_to_set_autocommit(conn, False) return await make_deferred_yieldable( self._db_pool.runWithConnection(inner_func, *args, **kwargs) From fd9856e4a98fb3fa9c139317b0a3b79f22aff1c7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 3 Jun 2021 17:20:40 +0100 Subject: [PATCH 222/222] Compile and render Synapse's docs into a browsable, mobile-friendly and searchable website (#10086) --- .github/workflows/docs.yaml | 31 ++ .gitignore | 3 + MANIFEST.in | 1 + book.toml | 39 +++ changelog.d/10086.doc | 1 + docs/README.md | 71 +++- docs/SUMMARY.md | 87 +++++ docs/admin_api/README.rst | 30 +- docs/admin_api/delete_group.md | 2 +- docs/admin_api/event_reports.md | 4 +- docs/admin_api/media_admin_api.md | 4 +- docs/admin_api/purge_history_api.md | 2 +- docs/admin_api/room_membership.md | 2 +- docs/admin_api/rooms.md | 2 +- docs/admin_api/statistics.md | 2 +- docs/admin_api/user_admin_api.md | 40 +-- docs/development/contributing_guide.md | 7 + .../internal_documentation/README.md | 12 + docs/favicon.png | Bin 0 -> 7908 bytes docs/favicon.svg | 58 ++++ docs/setup/installation.md | 7 + docs/upgrading/README.md | 7 + docs/usage/administration/README.md | 7 + docs/usage/administration/admin_api/README.md | 29 ++ docs/usage/configuration/README.md | 4 + .../configuration/homeserver_sample_config.md | 14 + .../configuration/logging_sample_config.md | 14 + .../user_authentication/README.md | 15 + docs/website_files/README.md | 30 ++ docs/website_files/indent-section-headers.css | 7 + docs/website_files/remove-nav-buttons.css | 8 + docs/website_files/table-of-contents.css | 42 +++ docs/website_files/table-of-contents.js | 134 ++++++++ docs/website_files/theme/index.hbs | 312 ++++++++++++++++++ docs/welcome_and_overview.md | 4 + 35 files changed, 978 insertions(+), 54 deletions(-) create mode 100644 .github/workflows/docs.yaml create mode 100644 book.toml create mode 100644 changelog.d/10086.doc create mode 100644 docs/SUMMARY.md create mode 100644 docs/development/contributing_guide.md create mode 100644 docs/development/internal_documentation/README.md create mode 100644 docs/favicon.png create mode 100644 docs/favicon.svg create mode 100644 docs/setup/installation.md create mode 100644 docs/upgrading/README.md create mode 100644 docs/usage/administration/README.md create mode 100644 docs/usage/administration/admin_api/README.md create mode 100644 docs/usage/configuration/README.md create mode 100644 docs/usage/configuration/homeserver_sample_config.md create mode 100644 docs/usage/configuration/logging_sample_config.md create mode 100644 docs/usage/configuration/user_authentication/README.md create mode 100644 docs/website_files/README.md create mode 100644 docs/website_files/indent-section-headers.css create mode 100644 docs/website_files/remove-nav-buttons.css create mode 100644 docs/website_files/table-of-contents.css create mode 100644 docs/website_files/table-of-contents.js create mode 100644 docs/website_files/theme/index.hbs create mode 100644 docs/welcome_and_overview.md diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml new file mode 100644 index 000000000000..a746ae6de3a8 --- /dev/null +++ b/.github/workflows/docs.yaml @@ -0,0 +1,31 @@ +name: Deploy the documentation + +on: + push: + branches: + - develop + + workflow_dispatch: + +jobs: + pages: + name: GitHub Pages + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Setup mdbook + uses: peaceiris/actions-mdbook@4b5ef36b314c2599664ca107bb8c02412548d79d # v1.1.14 + with: + mdbook-version: '0.4.9' + + - name: Build the documentation + run: mdbook build + + - name: Deploy latest documentation + uses: peaceiris/actions-gh-pages@068dc23d9710f1ba62e86896f84735d869951305 # v3.8.0 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + keep_files: true + publish_dir: ./book + destination_dir: ./develop diff --git a/.gitignore b/.gitignore index 295a18b5399a..6b9257b5c95b 100644 --- a/.gitignore +++ b/.gitignore @@ -46,3 +46,6 @@ __pycache__/ /docs/build/ /htmlcov /pip-wheel-metadata/ + +# docs +book/ diff --git a/MANIFEST.in b/MANIFEST.in index 25d1cb758e52..0522319c4000 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -40,6 +40,7 @@ exclude mypy.ini exclude sytest-blacklist exclude test_postgresql.sh +include book.toml include pyproject.toml recursive-include changelog.d * diff --git a/book.toml b/book.toml new file mode 100644 index 000000000000..fa83d86ffc1c --- /dev/null +++ b/book.toml @@ -0,0 +1,39 @@ +# Documentation for possible options in this file is at +# https://rust-lang.github.io/mdBook/format/config.html +[book] +title = "Synapse" +authors = ["The Matrix.org Foundation C.I.C."] +language = "en" +multilingual = false + +# The directory that documentation files are stored in +src = "docs" + +[build] +# Prevent markdown pages from being automatically generated when they're +# linked to in SUMMARY.md +create-missing = false + +[output.html] +# The URL visitors will be directed to when they try to edit a page +edit-url-template = "https://github.com/matrix-org/synapse/edit/develop/{path}" + +# Remove the numbers that appear before each item in the sidebar, as they can +# get quite messy as we nest deeper +no-section-label = true + +# The source code URL of the repository +git-repository-url = "https://github.com/matrix-org/synapse" + +# The path that the docs are hosted on +site-url = "/synapse/" + +# Additional HTML, JS, CSS that's injected into each page of the book. +# More information available in docs/website_files/README.md +additional-css = [ + "docs/website_files/table-of-contents.css", + "docs/website_files/remove-nav-buttons.css", + "docs/website_files/indent-section-headers.css", +] +additional-js = ["docs/website_files/table-of-contents.js"] +theme = "docs/website_files/theme" \ No newline at end of file diff --git a/changelog.d/10086.doc b/changelog.d/10086.doc new file mode 100644 index 000000000000..2200579012db --- /dev/null +++ b/changelog.d/10086.doc @@ -0,0 +1 @@ +Add initial infrastructure for rendering Synapse documentation with mdbook. diff --git a/docs/README.md b/docs/README.md index 3c6ea48c66bb..e113f55d2a70 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,7 +1,72 @@ # Synapse Documentation -This directory contains documentation specific to the `synapse` homeserver. +**The documentation is currently hosted [here](https://matrix-org.github.io/synapse).** +Please update any links to point to the new website instead. -All matrix-generic documentation now lives in its own project, located at [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) +## About -(Note: some items here may be moved to [matrix-org/matrix-doc](https://github.com/matrix-org/matrix-doc) at some point in the future.) +This directory currently holds a series of markdown files documenting how to install, use +and develop Synapse, the reference Matrix homeserver. The documentation is readable directly +from this repository, but it is recommended to instead browse through the +[website](https://matrix-org.github.io/synapse) for easier discoverability. + +## Adding to the documentation + +Most of the documentation currently exists as top-level files, as when organising them into +a structured website, these files were kept in place so that existing links would not break. +The rest of the documentation is stored in folders, such as `setup`, `usage`, and `development` +etc. **All new documentation files should be placed in structured folders.** For example: + +To create a new user-facing documentation page about a new Single Sign-On protocol named +"MyCoolProtocol", one should create a new file with a relevant name, such as "my_cool_protocol.md". +This file might fit into the documentation structure at: + +- Usage + - Configuration + - User Authentication + - Single Sign-On + - **My Cool Protocol** + +Given that, one would place the new file under +`usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md`. + +Note that the structure of the documentation (and thus the left sidebar on the website) is determined +by the list in [SUMMARY.md](SUMMARY.md). The final thing to do when adding a new page is to add a new +line linking to the new documentation file: + +```markdown +- [My Cool Protocol](usage/configuration/user_authentication/single_sign_on/my_cool_protocol.md) +``` + +## Building the documentation + +The documentation is built with [mdbook](https://rust-lang.github.io/mdBook/), and the outline of the +documentation is determined by the structure of [SUMMARY.md](SUMMARY.md). + +First, [get mdbook](https://github.com/rust-lang/mdBook#installation). Then, **from the root of the repository**, +build the documentation with: + +```sh +mdbook build +``` + +The rendered contents will be outputted to a new `book/` directory at the root of the repository. You can +browse the book by opening `book/index.html` in a web browser. + +You can also have mdbook host the docs on a local webserver with hot-reload functionality via: + +```sh +mdbook serve +``` + +The URL at which the docs can be viewed at will be logged. + +## Configuration and theming + +The look and behaviour of the website is configured by the [book.toml](../book.toml) file +at the root of the repository. See +[mdbook's documentation on configuration](https://rust-lang.github.io/mdBook/format/config.html) +for available options. + +The site can be themed and additionally extended with extra UI and features. See +[website_files/README.md](website_files/README.md) for details. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md new file mode 100644 index 000000000000..8f39ae027001 --- /dev/null +++ b/docs/SUMMARY.md @@ -0,0 +1,87 @@ +# Summary + +# Introduction +- [Welcome and Overview](welcome_and_overview.md) + +# Setup + - [Installation](setup/installation.md) + - [Using Postgres](postgres.md) + - [Configuring a Reverse Proxy](reverse_proxy.md) + - [Configuring a Turn Server](turn-howto.md) + - [Delegation](delegate.md) + +# Upgrading + - [Upgrading between Synapse Versions](upgrading/README.md) + - [Upgrading from pre-Synapse 1.0](MSC1711_certificates_FAQ.md) + +# Usage + - [Federation](federate.md) + - [Configuration](usage/configuration/README.md) + - [Homeserver Sample Config File](usage/configuration/homeserver_sample_config.md) + - [Logging Sample Config File](usage/configuration/logging_sample_config.md) + - [Structured Logging](structured_logging.md) + - [User Authentication](usage/configuration/user_authentication/README.md) + - [Single-Sign On]() + - [OpenID Connect](openid.md) + - [SAML]() + - [CAS]() + - [SSO Mapping Providers](sso_mapping_providers.md) + - [Password Auth Providers](password_auth_providers.md) + - [JSON Web Tokens](jwt.md) + - [Registration Captcha](CAPTCHA_SETUP.md) + - [Application Services](application_services.md) + - [Server Notices](server_notices.md) + - [Consent Tracking](consent_tracking.md) + - [URL Previews](url_previews.md) + - [User Directory](user_directory.md) + - [Message Retention Policies](message_retention_policies.md) + - [Pluggable Modules]() + - [Third Party Rules]() + - [Spam Checker](spam_checker.md) + - [Presence Router](presence_router_module.md) + - [Media Storage Providers]() + - [Workers](workers.md) + - [Using `synctl` with Workers](synctl_workers.md) + - [Systemd](systemd-with-workers/README.md) + - [Administration](usage/administration/README.md) + - [Admin API](usage/administration/admin_api/README.md) + - [Account Validity](admin_api/account_validity.md) + - [Delete Group](admin_api/delete_group.md) + - [Event Reports](admin_api/event_reports.md) + - [Media](admin_api/media_admin_api.md) + - [Purge History](admin_api/purge_history_api.md) + - [Purge Rooms](admin_api/purge_room.md) + - [Register Users](admin_api/register_api.md) + - [Manipulate Room Membership](admin_api/room_membership.md) + - [Rooms](admin_api/rooms.md) + - [Server Notices](admin_api/server_notices.md) + - [Shutdown Room](admin_api/shutdown_room.md) + - [Statistics](admin_api/statistics.md) + - [Users](admin_api/user_admin_api.md) + - [Server Version](admin_api/version_api.md) + - [Manhole](manhole.md) + - [Monitoring](metrics-howto.md) + - [Scripts]() + +# Development + - [Contributing Guide](development/contributing_guide.md) + - [Code Style](code_style.md) + - [Git Usage](dev/git.md) + - [Testing]() + - [OpenTracing](opentracing.md) + - [Synapse Architecture]() + - [Log Contexts](log_contexts.md) + - [Replication](replication.md) + - [TCP Replication](tcp_replication.md) + - [Internal Documentation](development/internal_documentation/README.md) + - [Single Sign-On]() + - [SAML](dev/saml.md) + - [CAS](dev/cas.md) + - [State Resolution]() + - [The Auth Chain Difference Algorithm](auth_chain_difference_algorithm.md) + - [Media Repository](media_repository.md) + - [Room and User Statistics](room_and_user_statistics.md) + - [Scripts]() + +# Other + - [Dependency Deprecation Policy](deprecation_policy.md) \ No newline at end of file diff --git a/docs/admin_api/README.rst b/docs/admin_api/README.rst index 9587bee0ce5f..37cee87d32e0 100644 --- a/docs/admin_api/README.rst +++ b/docs/admin_api/README.rst @@ -1,28 +1,14 @@ Admin APIs ========== -This directory includes documentation for the various synapse specific admin -APIs available. - -Authenticating as a server admin --------------------------------- - -Many of the API calls in the admin api will require an `access_token` for a -server admin. (Note that a server admin is distinct from a room admin.) - -A user can be marked as a server admin by updating the database directly, e.g.: - -.. code-block:: sql +**Note**: The latest documentation can be viewed `here `_. +See `docs/README.md <../docs/README.md>`_ for more information. - UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; +**Please update links to point to the website instead.** Existing files in this directory +are preserved to maintain historical links, but may be moved in the future. -A new server admin user can also be created using the -``register_new_matrix_user`` script. - -Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. - -Once you have your `access_token`, to include it in a request, the best option is to add the token to a request header: - -``curl --header "Authorization: Bearer " `` +This directory includes documentation for the various synapse specific admin +APIs available. Updates to the existing Admin API documentation should still +be made to these files, but any new documentation files should instead be placed under +`docs/usage/administration/admin_api <../docs/usage/administration/admin_api>`_. -Fore more details, please refer to the complete `matrix spec documentation `_. diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md index c061678e7518..9c335ff759a5 100644 --- a/docs/admin_api/delete_group.md +++ b/docs/admin_api/delete_group.md @@ -11,4 +11,4 @@ POST /_synapse/admin/v1/delete_group/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md index bfec06f75504..186139185e5d 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md @@ -7,7 +7,7 @@ The api is: GET /_synapse/admin/v1/event_reports?from=0&limit=10 ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). It returns a JSON body like the following: @@ -95,7 +95,7 @@ The api is: GET /_synapse/admin/v1/event_reports/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). It returns a JSON body like the following: diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 7709f3d8c740..9ab526988154 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -28,7 +28,7 @@ The API is: GET /_synapse/admin/v1/room//media ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). The API returns a JSON body like the following: ```json @@ -311,7 +311,7 @@ The following fields are returned in the JSON response body: * `deleted`: integer - The number of media items successfully deleted To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). If the user re-requests purged remote media, synapse will re-request the media from the originating server. diff --git a/docs/admin_api/purge_history_api.md b/docs/admin_api/purge_history_api.md index 44971acd917c..25decc3e6107 100644 --- a/docs/admin_api/purge_history_api.md +++ b/docs/admin_api/purge_history_api.md @@ -17,7 +17,7 @@ POST /_synapse/admin/v1/purge_history/[/] ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) By default, events sent by local users are not deleted, as they may represent the only copies of this content in existence. (Events sent by remote users are diff --git a/docs/admin_api/room_membership.md b/docs/admin_api/room_membership.md index b6746ff5e413..ed40366099af 100644 --- a/docs/admin_api/room_membership.md +++ b/docs/admin_api/room_membership.md @@ -24,7 +24,7 @@ POST /_synapse/admin/v1/join/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). Response: diff --git a/docs/admin_api/rooms.md b/docs/admin_api/rooms.md index 5721210fee11..dc007fa00e82 100644 --- a/docs/admin_api/rooms.md +++ b/docs/admin_api/rooms.md @@ -443,7 +443,7 @@ with a body of: ``` To use it, you will need to authenticate by providing an ``access_token`` for a -server admin: see [README.rst](README.rst). +server admin: see [Admin API](../../usage/administration/admin_api). A response body like the following is returned: diff --git a/docs/admin_api/statistics.md b/docs/admin_api/statistics.md index d398a120fb7e..d93d52a3ac15 100644 --- a/docs/admin_api/statistics.md +++ b/docs/admin_api/statistics.md @@ -10,7 +10,7 @@ GET /_synapse/admin/v1/statistics/users/media ``` To use it, you will need to authenticate by providing an `access_token` -for a server admin: see [README.rst](README.rst). +for a server admin: see [Admin API](../../usage/administration/admin_api). A response body like the following is returned: diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 0c843316c93a..c835e4a0cdbc 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -11,7 +11,7 @@ GET /_synapse/admin/v2/users/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) It returns a JSON body like the following: @@ -78,7 +78,7 @@ with a body of: ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) URL parameters: @@ -119,7 +119,7 @@ GET /_synapse/admin/v2/users?from=0&limit=10&guests=false ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -237,7 +237,7 @@ See also: [Client Server API Whois](https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-admin-whois-userid). To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) It returns a JSON body like the following: @@ -294,7 +294,7 @@ with a body of: ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) The erase parameter is optional and defaults to `false`. An empty body may be passed for backwards compatibility. @@ -339,7 +339,7 @@ with a body of: ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) The parameter `new_password` is required. The parameter `logout_devices` is optional and defaults to `true`. @@ -354,7 +354,7 @@ GET /_synapse/admin/v1/users//admin ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -384,7 +384,7 @@ with a body of: ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) ## List room memberships of a user @@ -398,7 +398,7 @@ GET /_synapse/admin/v1/users//joined_rooms ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -443,7 +443,7 @@ GET /_synapse/admin/v1/users//media ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -591,7 +591,7 @@ GET /_synapse/admin/v2/users//devices ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -659,7 +659,7 @@ POST /_synapse/admin/v2/users//delete_devices ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) An empty JSON dict is returned. @@ -683,7 +683,7 @@ GET /_synapse/admin/v2/users//devices/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -731,7 +731,7 @@ PUT /_synapse/admin/v2/users//devices/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) An empty JSON dict is returned. @@ -760,7 +760,7 @@ DELETE /_synapse/admin/v2/users//devices/ ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) An empty JSON dict is returned. @@ -781,7 +781,7 @@ GET /_synapse/admin/v1/users//pushers ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -872,7 +872,7 @@ POST /_synapse/admin/v1/users//shadow_ban ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) An empty JSON dict is returned. @@ -897,7 +897,7 @@ GET /_synapse/admin/v1/users//override_ratelimit ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -939,7 +939,7 @@ POST /_synapse/admin/v1/users//override_ratelimit ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) A response body like the following is returned: @@ -984,7 +984,7 @@ DELETE /_synapse/admin/v1/users//override_ratelimit ``` To use it, you will need to authenticate by providing an `access_token` for a -server admin: [Admin API](README.rst) +server admin: [Admin API](../../usage/administration/admin_api) An empty JSON dict is returned. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md new file mode 100644 index 000000000000..ddf088712393 --- /dev/null +++ b/docs/development/contributing_guide.md @@ -0,0 +1,7 @@ + +# Contributing + +{{#include ../../CONTRIBUTING.md}} diff --git a/docs/development/internal_documentation/README.md b/docs/development/internal_documentation/README.md new file mode 100644 index 000000000000..51c5fb94d537 --- /dev/null +++ b/docs/development/internal_documentation/README.md @@ -0,0 +1,12 @@ +# Internal Documentation + +This section covers implementation documentation for various parts of Synapse. + +If a developer is planning to make a change to a feature of Synapse, it can be useful for +general documentation of how that feature is implemented to be available. This saves the +developer time in place of needing to understand how the feature works by reading the +code. + +Documentation that would be more useful for the perspective of a system administrator, +rather than a developer who's intending to change to code, should instead be placed +under the Usage section of the documentation. \ No newline at end of file diff --git a/docs/favicon.png b/docs/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..5f18bf641fae401bcfd808e1faa2ecd6dba1d7b5 GIT binary patch literal 7908 zcmcIpWm6o$vc)ZE(BK5u5F|)|pdm=`EV8(}%i^*GcUj!s-95N0?gSRs;O>um>-~iH zKJ;`|_tZ?Cp6;$wJ)uep(pVUz7;tcKSTa8(Ro?XMe*_KXZQWkOB7uX0x3!RvP_i&G zf`emJ)pi!3i0R~Hf0|+FImexPgn8wWIE>wvgJ664)Cavc|!%~*h zlXw-Y5VI1)B@mO8dll7_8#)$dVN%|6JbBy_*mD=y6VQ()sLx#@3^XjU?)yV?s$#gb z@*IYbgNRha$g7$a`JMLZYv)N#i3hP##|!LsZ(q(#9?M$zk7}xI!f&%cS~3%z^Qf~J zh+^UI=)mV7a-}=1gA#@=rg7eTi5k3UsK3tj)vh4_T#wco6GrsA$FSdF^McW>p{%M} z+A)W*0s%4+oHXtE#ns>ax!_CahyU5>Rd1x3=?JAhxs;M@4#g5z>CJ{YLw`WSGGnVm zf9Q!~b%rc1=3RllYK#^g*Pt`~jUmbG>%CM$PgIrS9xFVNm+ zZeGRHz6Pr|+GG017rJ11&oA%n%qFw^dVu>n7N&DrdspzAX%~`w)xK~>wXlu{L|CKQ zT+S>!5+yBsXf`h~M|Bt{+S91iy!BLRQNQ$X}bdNL8?@pwC4cgRj@v^c{t8k=3 z3S&!{j6;o6wPM-h(0{yZ{9%y-Svr}web9iUtTF=85;KYVc(w|ea&r^SS@4MaIeDA@ znt#h$dF*XUf{mp+nfe@gj$e*BZM=?;30yuyMa){(t^*MKMX7PokpAzL1`0w}HRfm3 z#0#hV8&!IWPX|$PA}+nh;EU5VIvth+ZJ8{#1AZAd*!I`&FC8uU-S(3cPWcyBbl#HF zEA$;w)BSPdFt4nQ7F1MTgy@)bs+g;9zpt{wgbD;VZ@I<+5xT6f9cP)W{FlvG2V5ju zx`s@Aa6W&m#@9Qwd%dP5Va#dG+-V^MzJx42BCrK7xP+|F`rSlo>*eW|%Dor-bEhgI z)l(X+sGd)E;e3~cuBX~!c(+^sNPTN!iCx+~qU!alcWX{-sGnC$ewqTZ>lp=>|0}Kn zqfDzkPZZhrEdDRO==Z{3P-X)6qemYe{+tNJiP&g2K02)q9XdDWpF~<%r-Na4Xw~@l zZZg`Uk>4L{#WR>g)<8UJ;3;if(Ci}2o67cn(*Dj{CQBfxirg_Gs2eMACNYlVL->|R zxe|;{pHi48yT{BGEuE`S^3i_u76bk&@Us|}Zk@OvS=U6&Rn6yotw#)(BocWe=RD2J z^?}pH8anMqKaXtVx41Lh~lIP-pXyJ0?hxd-Q9OkgBgJe$Rt!e8|MG>#nD+o4?;1_Vj__ zPGjsY3F;x5D;WLM54|Mv^S0H8PwLUU$@8Efk02flX%;bm(co@mBk`I-LX2Qm5?mBy zgcY=QkUamqV^C|bU(nKJQ3Sf~S*kF?EB$+)%>3I3`U6rxaUK%elVqPhY1sl>@C551 zxxAHtgh9&v{pkGw%|Qs-Q-4T%RJGOv&%vJybl$8-gGj6( zb_{=Ca!<|*gHd8YbNJy_9>(Kdp5qE9@BxE&XJxg@Rv!UfLE4mRXQo z7K!1HdI`89Hl!)qlZCf^Q6OrRnSF256HH~XG-FQqv1!yg)DYT$5K82UaxU z$PyT!HC89+qQoi2Rjm-;_qeO>&Z%|b_p#jUF7nLl`WkYM%@lxP9gS>@{;D#~r5O>&-l~Xs2bNi21$_7izs)*) z`kawipZG0GW$ptcv0Q)70Yvl zx&Ha$t0+b0qsGh!zjBt+#jRv z!!twckWsE}$FrCxq*<}z00)CmM90Sr2YM6o>@5BF$r{Giq4(k|6Ai@_u@2Und{3KI642Pro(I(m-MohrmXRY>YBzk1$vIyxcV>CYDGK2xt0~md(JTq z9`>fY?$3IzexjI3wlSpdDwXD}2PlwBtW#314QiKq_?@@CH+1Fg!!%zVH@`@dFF%3f+f(kf`U;uX0L53XK?m(#GcnXJV;ZoUDA zKQW>|k*Ea4Rg2F=#xSUFJAy~bEQM?WHtzLR8&96b4hlY65Hx;K zDtYDD!0gAo3NHxa_w|iU*b4h91v5>Cf0em-2@uC_l!j#w?LjXHm_f}-B=YWV60G8K z%)P>E4rq>i$a5t49%>6OHeufzckd~pQY60ux%3?ay(1ed8Cl3)NF*|})^s|jZA=(H z+Sx){LzPGSC8!*fjZ4aY=?XS2Ec>DF$OiuYoXSj0 zPN(8xv#)m7|CJZJrN^IQW-~o-egL(~CRNR%i+zeuaw=IK1^Q39LxV!r7zH-3AC`rlw~y>saZeoap3PQ|&| zghN0$QyCi*tQ627U~><`C*8_vxdNzqq%EHqy|TQ5k6=26E*?s`+>MJDcymu*`6!H% zuzVFe_KS>VBh&u6+^em0keX-^__IeZQE;r5n(ERQvYlNbt+8KX2t5nhR zF0+T{4Mv{%%Qi`jtwV6K5K^tKW_#{;zf-ndOfo-&tPaq~O#FO$>GgX|LDp3jARr^G~pb->u>R7^Of`4Ovf21-X!*ndb68+S;%=e05i0vB7Qy?*aNgtl#KcYRocz2xP6py}I6+Nh z4Y`IY_)_l6F~}kS4-q<}AgD6ZN|1@EM&SKuYG41e99W;;(j<*DKC`>Z1-U%tc)9~# z4f$~#d<>oB$waXlO~9^mrnb@9euN;6tj_Zm{TP<(`c3Ta`=uw^2?n!0l+)P&t_-aI z6c}qd{d^!Qw7zc)iFT3Aaj|BszpjZZc8?%_)uQ28^Crvv;DK;E>+>^7#Kd!u?Me=Sk9>U zmh%=x^4cxaCUgH?M2{$QuN!6#m?4|^&QvvTM$|X|M?RB^IPk_YX zlk>L{vq$)tl_r86Ofu$P)ZJEySc4oA;z|Sg&wE123Hz^o3g7dsd}Z$~*1dv^;+f5@ zVR-d5L3;!w&)+g(hv`y0+=|0I6cc%=9z*fm)+?)8Id(}ODFdB^97Yp!_l^^!G7d#Q zaw96Pe7T@;vg93n%_yh@xTmPKD}0~AYN(tXihM{>r!~6h3546jysD8+-B})br?b~g z?N3CNKHj_<+N>$BJRS&_qT0#vKt6xPy!U4z&(evLmK%Os3+ug{FtfMJ&q1jz*q^e| zG?%7yBkQF@bJF!;iM<~B6I?Zf0mC05Ud)5ki~PqTfxG4As)y3$emL>7P4q(4zON0G zF)F3snfA4Q*~}N}M19T)mX(Fe6qIHK5BQrAF()!xKxVp|NI-*E zfKOUP-hF+8hs~q3MGA9_s@U9Db4p!@wBrUwi>1dSN%1l>V#4R8mb_d%j##OC(O4O+ zJRyy~pE?|YSkI28C(OYd%#W2KdYD6Lf~@&o_XRx5!ajTB6J919wrq<{BS8EgP%GsK zuJW|PwF|74RjU$B?Cz2kVds@Kf)GZ zMM?drmF==nrh&ip)QWI`cS9<)@#qZ)ckH_P+MlmGws(x&*ZGuwZWKQ=A^T71y@LG< zI-1Hdo5y9*T8j#w_bLQukBV8R1Am*3JSU&2VRh?Yx0By*NBqgR zE}dFI44KVrDkIG#YDXO&eGD)ytlfP>bgAF;XsWj953fgsx5TObWv2_lM0;G0Ia;%yYwG$^k`;*7r0R}b>WE}GM1J%dhtA&Ft+;&v7*+qAm~ z6TL;Fy7j{gbWECJI-emJs8Favb%mrN0nrw1c|RfJYi4)kZa;$=5N9f|aMedRJv+Cv zQNAFJ1DG`urgcmOCGRet56KxB-ymvU$yPitYbR7*+y#(Aws0 zIbMd33mteM4;X9la|B}tEswNZ*9L~x_+(Fu5rKu}GP+XDtLMk}by2#|1`eJ*B_aQbjGje51*s2=Ec!HsckGN;qa3sk>{c&1gNQ!{P!a8j%^XU&b&U zf>pq7K;1PNj-NbmU7qu0n+?pp9!)mk?Hq#y?nCTI4$+Rrb0h0)@rT)ps^fvA#GTCj z3!N!M(GtubL4GE#94$kK?R=Vt<W>yoZWU{BR;xLM0`Vm`H`G74Xx)%zt*kS!} zeS9&x%0mHg)s1v|p#scHk*PkoQ%XbHKNW=Jb5L3K~t*!{5X;&fDRt8w(O ztz%1Xc_|qMN@B+3HBvzSH!ToqtZ=WkAI6!@zV}W(8;AZ*^X8z1o1p8Qp%AMoPK8%o zz6|C#vOSLVxc${od5`g*e^68saGI^?Qey%Ul^eXU2@YTkAkfU^-CZ79Nu9&D8J7ZN z3HXN36{(^6)&KhrRG9?RgitaQnd3Rl^ zCJf=ma96#?bsY%XQst)jLt4Eu@-?w*qFryruEW1OtI{MJ%IDlCdea~gKVTQglD-AW zw(hM5)@VO!2&-VLvo%-qE@;nK9ciz)v@EzuT5!#83TTN;a9O9ECuhh}!0=sqKK4#u zK;A-9nQ8U87%P9*J<7l6CPPVBKS(#n9#|grQV4n2`rrFj`C>>(DKcZY&*a~VtnJy-2QaF9 zBrlD-3AvIi8Dr_4y;nsEqtvGSqWsvkO8c)cf-h!Sf)%6ym@O^0I7p|5Ix6NRH4C!| zn*;4hMi76UwAFIWrQ@K}=^yZI@BHKK%l>OxS`(O|T=A zLu*JL{gW5?=?Y7j#tg-KOy|&#^s#p$mMX)-!p^jff8N~NB`F_GoX36 z`yEfK858^4bj2IZK5t0EG3(pydqa7K^ofdv<2&y5-|=QLg|uBbE~9RSm<)t3kLjVJ zaG5h2Gz@O^)+Is zaV&b%o4X%ZlO`%d9@jdfs`PuftOWBlpOe+jPHhU`v7q4l>{fu$v)#?Eu=THlNexAF z^gj@wZQ~ZVaNOG7T7ndAcvA$}h$`51m-Dc*L{<`-DRibJgr`I;!A&LoHXY&yvYdS_ z@~hcH{GLc?x^4xkvg0~~0wbHK+bq%oPO%WgFtkk^J~>?E8!Ug)ra?)D8c@|?bw!IZ zk9XFabB>u%b)HVmSeF-p*DPh}G0MC_SmHN2{a|Xxd8wx|&j}g=$?wged)4el)_I|7ZSP9g3J|~Zzo$9~?80`ibyh>d-;qA?~1u+&YPzYXHzzF{D z5mc?;fIya=GGIXK`|3P_i7^3;e`E;!I2|eZ=}Dyo3|FcFuo|6uT%4A_n7QTp^ljH6vFH>&5Pa1K$$HJ-{ zt13z4F(oHB_6dz}7(`0kL~F9151tS4rn|etlg?39xw=P&sm6b|Vx_UaNJt&&@zlK2%Q@B%zPC>}3|$vL3zQjZ6?gEycZ&Z( zDxVZ43YB%xv#4kEki(i_NoPmI!1>Fi($lp^=8an_tz{zGmA3q0B6cd`OEYb4c80#bkI$ z2pK)YKfY#bq1>dF*YtqYkaU%5R<}YNP=o868Cj8k*sGO&%B4{HG^PGQ0MVZoO{fb2 zb>a}I?Ty%DIDFGpE1ULraiJ>^(tS|HdVWN~oUUHV2#4pia@ymg}xc;%<$4?@o|4kTeOrfF#=F7+D9X>_s zhT(eJ#+A40&1E!<&eilurHCWo%or(+JL(rN<4u0ICK6{bHMyY))#dh=%LhKxzdsq~ z6i-jDXH6K)^t~AP4VOo2A8X%U9l$D9EIh7a^*8=ki05kVGCU!xn(CrDaLI@j*tmb3 zX)@}T{l1u~(+8=Upjb0QbSO{f#en&Cx~02JC+83AKkd#%p`G=PgY(4PRpk*+gpwS~ zH+H`U4YNX>%}8~K2%8V-{v?yPrm@8N7MxM3MG+e#r-&!TxRsi9MUa4Vdf~a=U|8Eu%w9Y zrH@bOaa#OwUfHvG2jOj?pO%lax92>O&ChXN9z$yc@QvP*s*LrR--{~b>DvbaYSDJ*^oe^2J10vUH7-uPo61sf|U!H611-}Q9Y6!b-)asNO~N758HdC zBC}|U|FkU8+liSjp&OODyZ2P~JjqM!^)qJY_onPf(otpP&qp0914f&l`AJgc&;y&M zx${M>&&!LC^?Zcz@O64+_vGLsbj_qw;dcZA=Q~p;u#0icagm6sFx$ezOhxh3ptAh# zI2cH>XrRhb%@kd=Qcx+37ZD>7m(?A?Skqnd#e~*g4i7e?cy>MbbDK?uI~zN6JJ7j) zau_le>-Gsx^aJ1$yY=1pW`eSiHYXwarlWu`E1BuPKyTM~~{1gn3Ta8#e8{Sfio^ za^0Ua8cN!{lV0Z-p9_VT3Q3#8#p5TTp8pm0qzECfO;?{G{EO96mq5o7T!f>;z5{Rz z1Xbc=!g-a^*}ei$!`eKL6~$~XDQSkCtoKo1b`^LWPnS8d|56NExq=IX%*xtiQ`wxi zNWYC^t~evc9;nG^Gt6Fn7~v5AFn@`C)(j=8?7yYWMKv8uM$@J_I8K9qjD=bQth83e z2Z|-4;X3JUSq|pos;>qV?|)wP#|s=#HN_iyDr?D0*X47uR(!Dx6;1 zXREDPs1q2-W*kf$v1(Rci-m;Bqp5h^?clkCJUR3H+WI={Li6p(oT$hM64ath%Y&H8=SnW~m`uhVDRlKrfa~=y1dGa5= z+#i#rwV9LvKk0A~^FkBNUbXtQ#uis{KqGFqOY&g+Pv?1quSb8+r4Uh*#>9lt&a_^H zWSm|@XkbE}cwtq=xyXdL{8B!HPS(zUyQ$|;$Kzl8dkkXU!`Cxsenju+Akpv_TR_!F zREXc6n^$)2x;EXiq-bLb!0GE=*2A<<4{H#6SFm&)MI + + + + + image/svg+xml + + + + + + + + + diff --git a/docs/setup/installation.md b/docs/setup/installation.md new file mode 100644 index 000000000000..8bb1cffd3d67 --- /dev/null +++ b/docs/setup/installation.md @@ -0,0 +1,7 @@ + +{{#include ../../INSTALL.md}} \ No newline at end of file diff --git a/docs/upgrading/README.md b/docs/upgrading/README.md new file mode 100644 index 000000000000..258e58cf1532 --- /dev/null +++ b/docs/upgrading/README.md @@ -0,0 +1,7 @@ + +{{#include ../../UPGRADE.rst}} \ No newline at end of file diff --git a/docs/usage/administration/README.md b/docs/usage/administration/README.md new file mode 100644 index 000000000000..e1e57546ab6b --- /dev/null +++ b/docs/usage/administration/README.md @@ -0,0 +1,7 @@ +# Administration + +This section contains information on managing your Synapse homeserver. This includes: + +* Managing users, rooms and media via the Admin API. +* Setting up metrics and monitoring to give you insight into your homeserver's health. +* Configuring structured logging. \ No newline at end of file diff --git a/docs/usage/administration/admin_api/README.md b/docs/usage/administration/admin_api/README.md new file mode 100644 index 000000000000..2fca96f8be4e --- /dev/null +++ b/docs/usage/administration/admin_api/README.md @@ -0,0 +1,29 @@ +# The Admin API + +## Authenticate as a server admin + +Many of the API calls in the admin api will require an `access_token` for a +server admin. (Note that a server admin is distinct from a room admin.) + +A user can be marked as a server admin by updating the database directly, e.g.: + +```sql +UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; +``` + +A new server admin user can also be created using the `register_new_matrix_user` +command. This is a script that is located in the `scripts/` directory, or possibly +already on your `$PATH` depending on how Synapse was installed. + +Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. + +## Making an Admin API request +Once you have your `access_token`, you will need to authenticate each request to an Admin API endpoint by +providing the token as either a query parameter or a request header. To add it as a request header in cURL: + +```sh +curl --header "Authorization: Bearer " +``` + +For more details on access tokens in Matrix, please refer to the complete +[matrix spec documentation](https://matrix.org/docs/spec/client_server/r0.6.1#using-access-tokens). diff --git a/docs/usage/configuration/README.md b/docs/usage/configuration/README.md new file mode 100644 index 000000000000..41d41167c680 --- /dev/null +++ b/docs/usage/configuration/README.md @@ -0,0 +1,4 @@ +# Configuration + +This section contains information on tweaking Synapse via the various options in the configuration file. A configuration +file should have been generated when you [installed Synapse](../../setup/installation.html). diff --git a/docs/usage/configuration/homeserver_sample_config.md b/docs/usage/configuration/homeserver_sample_config.md new file mode 100644 index 000000000000..11e806998d88 --- /dev/null +++ b/docs/usage/configuration/homeserver_sample_config.md @@ -0,0 +1,14 @@ +# Homeserver Sample Configuration File + +Below is a sample homeserver configuration file. The homeserver configuration file +can be tweaked to change the behaviour of your homeserver. A restart of the server is +generally required to apply any changes made to this file. + +Note that the contents below are *not* intended to be copied and used as the basis for +a real homeserver.yaml. Instead, if you are starting from scratch, please generate +a fresh config using Synapse by following the instructions in +[Installation](../../setup/installation.md). + +```yaml +{{#include ../../sample_config.yaml}} +``` diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md new file mode 100644 index 000000000000..4c4bc6fc1639 --- /dev/null +++ b/docs/usage/configuration/logging_sample_config.md @@ -0,0 +1,14 @@ +# Logging Sample Configuration File + +Below is a sample logging configuration file. This file can be tweaked to control how your +homeserver will output logs. A restart of the server is generally required to apply any +changes made to this file. + +Note that the contents below are *not* intended to be copied and used as the basis for +a real homeserver.yaml. Instead, if you are starting from scratch, please generate +a fresh config using Synapse by following the instructions in +[Installation](../../setup/installation.md). + +```yaml +{{#include ../../sample_log_config.yaml}} +``__` \ No newline at end of file diff --git a/docs/usage/configuration/user_authentication/README.md b/docs/usage/configuration/user_authentication/README.md new file mode 100644 index 000000000000..087ae053cf61 --- /dev/null +++ b/docs/usage/configuration/user_authentication/README.md @@ -0,0 +1,15 @@ +# User Authentication + +Synapse supports multiple methods of authenticating users, either out-of-the-box or through custom pluggable +authentication modules. + +Included in Synapse is support for authenticating users via: + +* A username and password. +* An email address and password. +* Single Sign-On through the SAML, Open ID Connect or CAS protocols. +* JSON Web Tokens. +* An administrator's shared secret. + +Synapse can additionally be extended to support custom authentication schemes through optional "password auth provider" +modules. \ No newline at end of file diff --git a/docs/website_files/README.md b/docs/website_files/README.md new file mode 100644 index 000000000000..04d191479bfe --- /dev/null +++ b/docs/website_files/README.md @@ -0,0 +1,30 @@ +# Documentation Website Files and Assets + +This directory contains extra files for modifying the look and functionality of +[mdbook](https://github.com/rust-lang/mdBook), the documentation software that's +used to generate Synapse's documentation website. + +The configuration options in the `output.html` section of [book.toml](../../book.toml) +point to additional JS/CSS in this directory that are added on each page load. In +addition, the `theme` directory contains files that overwrite their counterparts in +each of the default themes included with mdbook. + +Currently we use these files to generate a floating Table of Contents panel. The code for +which was partially taken from +[JorelAli/mdBook-pagetoc](https://github.com/JorelAli/mdBook-pagetoc/) +before being modified such that it scrolls with the content of the page. This is handled +by the `table-of-contents.js/css` files. The table of contents panel only appears on pages +that have more than one header, as well as only appearing on desktop-sized monitors. + +We remove the navigation arrows which typically appear on the left and right side of the +screen on desktop as they interfere with the table of contents. This is handled by +the `remove-nav-buttons.css` file. + +Finally, we also stylise the chapter titles in the left sidebar by indenting them +slightly so that they are more visually distinguishable from the section headers +(the bold titles). This is done through the `indent-section-headers.css` file. + +More information can be found in mdbook's official documentation for +[injecting page JS/CSS](https://rust-lang.github.io/mdBook/format/config.html) +and +[customising the default themes](https://rust-lang.github.io/mdBook/format/theme/index.html). \ No newline at end of file diff --git a/docs/website_files/indent-section-headers.css b/docs/website_files/indent-section-headers.css new file mode 100644 index 000000000000..f9b3c82ca68c --- /dev/null +++ b/docs/website_files/indent-section-headers.css @@ -0,0 +1,7 @@ +/* + * Indents each chapter title in the left sidebar so that they aren't + * at the same level as the section headers. + */ +.chapter-item { + margin-left: 1em; +} \ No newline at end of file diff --git a/docs/website_files/remove-nav-buttons.css b/docs/website_files/remove-nav-buttons.css new file mode 100644 index 000000000000..4b280794ea15 --- /dev/null +++ b/docs/website_files/remove-nav-buttons.css @@ -0,0 +1,8 @@ +/* Remove the prev, next chapter buttons as they interfere with the + * table of contents. + * Note that the table of contents only appears on desktop, thus we + * only remove the desktop (wide) chapter buttons. + */ +.nav-wide-wrapper { + display: none +} \ No newline at end of file diff --git a/docs/website_files/table-of-contents.css b/docs/website_files/table-of-contents.css new file mode 100644 index 000000000000..d16bb3b9886b --- /dev/null +++ b/docs/website_files/table-of-contents.css @@ -0,0 +1,42 @@ +@media only screen and (max-width:1439px) { + .sidetoc { + display: none; + } +} + +@media only screen and (min-width:1440px) { + main { + position: relative; + margin-left: 100px !important; + } + .sidetoc { + margin-left: auto; + margin-right: auto; + left: calc(100% + (var(--content-max-width))/4 - 140px); + position: absolute; + text-align: right; + } + .pagetoc { + position: fixed; + width: 250px; + overflow: auto; + right: 20px; + height: calc(100% - var(--menu-bar-height)); + } + .pagetoc a { + color: var(--fg) !important; + display: block; + padding: 5px 15px 5px 10px; + text-align: left; + text-decoration: none; + } + .pagetoc a:hover, + .pagetoc a.active { + background: var(--sidebar-bg) !important; + color: var(--sidebar-fg) !important; + } + .pagetoc .active { + background: var(--sidebar-bg); + color: var(--sidebar-fg); + } +} diff --git a/docs/website_files/table-of-contents.js b/docs/website_files/table-of-contents.js new file mode 100644 index 000000000000..0de5960b22b1 --- /dev/null +++ b/docs/website_files/table-of-contents.js @@ -0,0 +1,134 @@ +const getPageToc = () => document.getElementsByClassName('pagetoc')[0]; + +const pageToc = getPageToc(); +const pageTocChildren = [...pageToc.children]; +const headers = [...document.getElementsByClassName('header')]; + + +// Select highlighted item in ToC when clicking an item +pageTocChildren.forEach(child => { + child.addEventHandler('click', () => { + pageTocChildren.forEach(child => { + child.classList.remove('active'); + }); + child.classList.add('active'); + }); +}); + + +/** + * Test whether a node is in the viewport + */ +function isInViewport(node) { + const rect = node.getBoundingClientRect(); + return rect.top >= 0 && rect.left >= 0 && rect.bottom <= (window.innerHeight || document.documentElement.clientHeight) && rect.right <= (window.innerWidth || document.documentElement.clientWidth); +} + + +/** + * Set a new ToC entry. + * Clear any previously highlighted ToC items, set the new one, + * and adjust the ToC scroll position. + */ +function setTocEntry() { + let activeEntry; + const pageTocChildren = [...getPageToc().children]; + + // Calculate which header is the current one at the top of screen + headers.forEach(header => { + if (window.pageYOffset >= header.offsetTop) { + activeEntry = header; + } + }); + + // Update selected item in ToC when scrolling + pageTocChildren.forEach(child => { + if (activeEntry.href.localeCompare(child.href) === 0) { + child.classList.add('active'); + } else { + child.classList.remove('active'); + } + }); + + let tocEntryForLocation = document.querySelector(`nav a[href="${activeEntry.href}"]`); + if (tocEntryForLocation) { + const headingForLocation = document.querySelector(activeEntry.hash); + if (headingForLocation && isInViewport(headingForLocation)) { + // Update ToC scroll + const nav = getPageToc(); + const content = document.querySelector('html'); + if (content.scrollTop !== 0) { + nav.scrollTo({ + top: tocEntryForLocation.offsetTop - 100, + left: 0, + behavior: 'smooth', + }); + } else { + nav.scrollTop = 0; + } + } + } +} + + +/** + * Populate sidebar on load + */ +window.addEventListener('load', () => { + // Only create table of contents if there is more than one header on the page + if (headers.length <= 1) { + return; + } + + // Create an entry in the page table of contents for each header in the document + headers.forEach((header, index) => { + const link = document.createElement('a'); + + // Indent shows hierarchy + let indent = '0px'; + switch (header.parentElement.tagName) { + case 'H1': + indent = '5px'; + break; + case 'H2': + indent = '20px'; + break; + case 'H3': + indent = '30px'; + break; + case 'H4': + indent = '40px'; + break; + case 'H5': + indent = '50px'; + break; + case 'H6': + indent = '60px'; + break; + default: + break; + } + + let tocEntry; + if (index == 0) { + // Create a bolded title for the first element + tocEntry = document.createElement("strong"); + tocEntry.innerHTML = header.text; + } else { + // All other elements are non-bold + tocEntry = document.createTextNode(header.text); + } + link.appendChild(tocEntry); + + link.style.paddingLeft = indent; + link.href = header.href; + pageToc.appendChild(link); + }); + setTocEntry.call(); +}); + + +// Handle active headers on scroll, if there is more than one header on the page +if (headers.length > 1) { + window.addEventListener('scroll', setTocEntry); +} diff --git a/docs/website_files/theme/index.hbs b/docs/website_files/theme/index.hbs new file mode 100644 index 000000000000..3b7a5b616353 --- /dev/null +++ b/docs/website_files/theme/index.hbs @@ -0,0 +1,312 @@ + + + + + + {{ title }} + {{#if is_print }} + + {{/if}} + {{#if base_url}} + + {{/if}} + + + + {{> head}} + + + + + + + {{#if favicon_svg}} + + {{/if}} + {{#if favicon_png}} + + {{/if}} + + + + {{#if print_enable}} + + {{/if}} + + + + {{#if copy_fonts}} + + {{/if}} + + + + + + + + {{#each additional_css}} + + {{/each}} + + {{#if mathjax_support}} + + + {{/if}} + + + + + + + + + + + + + + + + +
+ +
+ {{> header}} + + + + {{#if search_enabled}} + + {{/if}} + + + + +
+
+ +
+ +
+ + {{{ content }}} +
+ + +
+
+ + + +
+ + {{#if livereload}} + + + {{/if}} + + {{#if google_analytics}} + + + {{/if}} + + {{#if playground_line_numbers}} + + {{/if}} + + {{#if playground_copyable}} + + {{/if}} + + {{#if playground_js}} + + + + + + {{/if}} + + {{#if search_js}} + + + + {{/if}} + + + + + + + {{#each additional_js}} + + {{/each}} + + {{#if is_print}} + {{#if mathjax_support}} + + {{else}} + + {{/if}} + {{/if}} + + + \ No newline at end of file diff --git a/docs/welcome_and_overview.md b/docs/welcome_and_overview.md new file mode 100644 index 000000000000..30e75984d1ef --- /dev/null +++ b/docs/welcome_and_overview.md @@ -0,0 +1,4 @@ +# Introduction + +Welcome to the documentation repository for Synapse, the reference +[Matrix](https://matrix.org) homeserver implementation. \ No newline at end of file