Skip to content
This repository has been archived by the owner on Apr 26, 2024. It is now read-only.

Fix message duplication if something goes wrong after persisting the event #8476

Merged
merged 20 commits into from
Oct 13, 2020
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog.d/8476.bugfix
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix message duplication if something goes wrong after persisting the event.
9 changes: 6 additions & 3 deletions synapse/handlers/federation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2966,17 +2966,20 @@ async def persist_events_and_notify(
return result["max_stream_id"]
else:
assert self.storage.persistence
max_stream_token = await self.storage.persistence.persist_events(

# Note that this returns the events that were persisted, which may not be
# the same as were passed in if some were deduplicated due to transaction IDs.
events, max_stream_token = await self.storage.persistence.persist_events(
event_and_contexts, backfilled=backfilled
)

if self._ephemeral_messages_enabled:
for (event, context) in event_and_contexts:
for event in events:
# If there's an expiry timestamp on the event, schedule its expiry.
self._message_handler.maybe_schedule_expiry(event)

if not backfilled: # Never notify for backfilled events
for event, _ in event_and_contexts:
for event in events:
await self._notify_persisted_event(event, max_stream_token)

return max_stream_token.stream
Expand Down
34 changes: 26 additions & 8 deletions synapse/handlers/message.py
Original file line number Diff line number Diff line change
Expand Up @@ -686,7 +686,7 @@ async def create_and_send_nonmember_event(
send this event.

Returns:
The event, and its stream ordering (if state event deduplication happened,
The event, and its stream ordering (if deduplication happened,
the previous, duplicate event).

Raises:
Expand All @@ -709,6 +709,16 @@ async def create_and_send_nonmember_event(
# extremities to pile up, which in turn leads to state resolution
# taking longer.
with (await self.limiter.queue(event_dict["room_id"])):
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
requester.user.to_string(), requester.access_token_id, txn_id,
)
if existing_event_id:
event = await self.store.get_event(existing_event_id)
# we know it was persisted, so must have a stream ordering
assert event.internal_metadata.stream_ordering
return event, event.internal_metadata.stream_ordering

event, context = await self.create_event(
requester, event_dict, token_id=requester.access_token_id, txn_id=txn_id
)
Expand Down Expand Up @@ -910,10 +920,14 @@ async def handle_new_client_event(
extra_users=extra_users,
)
stream_id = result["stream_id"]
event.internal_metadata.stream_ordering = stream_id
event_id = result["event_id"]
if event_id != event.event_id:
richvdh marked this conversation as resolved.
Show resolved Hide resolved
event = await self.store.get_event(event_id)
else:
event.internal_metadata.stream_ordering = stream_id
return event

stream_id = await self.persist_and_notify_client_event(
event, stream_id = await self.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)

Expand Down Expand Up @@ -962,7 +976,7 @@ async def persist_and_notify_client_event(
context: EventContext,
ratelimit: bool = True,
extra_users: List[UserID] = [],
) -> int:
) -> Tuple[EventBase, int]:
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
"""Called when we have fully built the event, have already
calculated the push actions for the event, and checked auth.

Expand Down Expand Up @@ -1134,9 +1148,13 @@ def is_inviter_member_event(e):
if prev_state_ids:
raise AuthError(403, "Changing the room create event is forbidden")

event_pos, max_stream_token = await self.storage.persistence.persist_event(
event, context=context
)
# Note that this returns the event that was persisted, which may not be
# the same as we passed in if it was deduplicated due transaction IDs.
(
event,
event_pos,
max_stream_token,
) = await self.storage.persistence.persist_event(event, context=context)

if self._ephemeral_events_enabled:
# If there's an expiry timestamp on the event, schedule its expiry.
Expand All @@ -1157,7 +1175,7 @@ def _notify():
# matters as sometimes presence code can take a while.
run_in_background(self._bump_active_time, requester.user)

return event_pos.stream
return event, event_pos.stream

async def _bump_active_time(self, user: UserID) -> None:
try:
Expand Down
10 changes: 9 additions & 1 deletion synapse/handlers/room_member.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,14 @@ async def _local_membership_update(
if requester.is_guest:
content["kind"] = "guest"

if txn_id and requester.access_token_id:
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
existing_event_id = await self.store.get_event_id_from_transaction_id(
requester.user.to_string(), requester.access_token_id, txn_id,
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
return existing_event_id, event_pos.stream

event, context = await self.event_creation_handler.create_event(
requester,
{
Expand Down Expand Up @@ -679,7 +687,7 @@ async def send_membership_event(
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")

await self.event_creation_handler.handle_new_client_event(
event = await self.event_creation_handler.handle_new_client_event(
requester, event, context, extra_users=[target_user], ratelimit=ratelimit
)

Expand Down
7 changes: 5 additions & 2 deletions synapse/replication/http/send_event.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,11 +116,14 @@ async def _handle_request(self, request, event_id):
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
)

stream_id = await self.event_creation_handler.persist_and_notify_client_event(
(
event,
stream_id,
) = await self.event_creation_handler.persist_and_notify_client_event(
requester, event, context, ratelimit=ratelimit, extra_users=extra_users
)

return 200, {"stream_id": stream_id}
return 200, {"stream_id": stream_id, "event_id": event.event_id}
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved


def register_servlets(hs, http_server):
Expand Down
30 changes: 30 additions & 0 deletions synapse/storage/databases/main/events.py
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,8 @@ def _persist_events_txn(

self._store_event_txn(txn, events_and_contexts=events_and_contexts)

self._persist_transaction_ids_txn(txn, events_and_contexts)

# Insert into event_to_state_groups.
self._store_event_state_mappings_txn(txn, events_and_contexts)

Expand Down Expand Up @@ -405,6 +407,34 @@ def _persist_events_txn(
# room_memberships, where applicable.
self._update_current_state_txn(txn, state_delta_for_room, min_stream_order)

def _persist_transaction_ids_txn(
self,
txn: LoggingTransaction,
events_and_contexts: List[Tuple[EventBase, EventContext]],
):
"""Persist the mapping from transaction IDs to event IDs (if defined).
"""

to_insert = []
for event, _ in events_and_contexts:
token_id = getattr(event.internal_metadata, "token_id", None)
txn_id = getattr(event.internal_metadata, "txn_id", None)
if token_id and txn_id:
to_insert.append(
{
"event_id": event.event_id,
"user_id": event.sender,
"token_id": token_id,
"txn_id": txn_id,
"inserted_ts": self._clock.time_msec(),
}
)

if to_insert:
self.db_pool.simple_insert_many_txn(
txn, table="event_txn_id", values=to_insert,
)

def _update_current_state_txn(
self,
txn: LoggingTransaction,
Expand Down
62 changes: 61 additions & 1 deletion synapse/storage/databases/main/events_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import itertools
import logging
import threading
Expand Down Expand Up @@ -137,6 +136,15 @@ def __init__(self, database: DatabasePool, db_conn, hs):
db_conn, "events", "stream_ordering", step=-1
)

if not hs.config.worker.worker_app:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can probably be put on the background worker?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ugh, yes but that work isn't in 1.21 :(

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, right. 🤦 I'll update it when this gets merged forward.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So if we're retargetting this to develop, seems like we can make this change!

# We periodically clean out old transaction ID mappings
self._clock.looping_call(
run_as_background_process,
5 * 60 * 1000,
"_cleanup_old_transaction_ids",
self._cleanup_old_transaction_ids,
)

self._get_event_cache = Cache(
"*getEvent*",
keylen=3,
Expand Down Expand Up @@ -1304,3 +1312,55 @@ def get_next_event_to_expire_txn(txn):
return await self.db_pool.runInteraction(
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
)

async def get_event_id_from_transaction_id(
self, user_id: str, token_id: str, txn_id: str
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
) -> Optional[str]:
"""Look up if we have already persisted an event for the transaction ID,
returning the event ID if so.
"""
return await self.db_pool.simple_select_one_onecol(
table="event_txn_id",
keyvalues={"user_id": user_id, "token_id": token_id, "txn_id": txn_id},
retcol="event_id",
allow_none=True,
desc="get_event_id_from_transaction_id",
)

async def get_already_persisted_events(
self, events: Iterable[EventBase]
) -> Dict[str, str]:
"""Look up if we have already persisted an event for the transaction ID,
returning a mapping from event ID in the given list to the event ID of
an existing event.
"""

mapping = {}

for event in events:
token_id = getattr(event.internal_metadata, "token_id", None)
txn_id = getattr(event.internal_metadata, "txn_id", None)
if token_id and txn_id:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

slightly feeling that it shouldn't be the storage layer's responsibility to do this digging, but ymmv

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeeeah, though this only gets called from persist_events so we'd have to thread this through persist_events if we wanted to move the logic out of storage.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ok

existing = await self.get_event_id_from_transaction_id(
event.sender, token_id, txn_id
)
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't know if we want to try and batch these up a bit? The standard simple_select_many doesn't work due to it only supporting one column. There are psycopg2 helper functions that might help us though

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It looks like something similar could be made which takes an iterable of tuples (or dicts)?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, I'm just not sure how much I want to try and do that for something that is going in an RC

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

park it for now, optimise later?

if existing:
mapping[event.event_id] = existing

return mapping

async def _cleanup_old_transaction_ids(self):
"""Cleans out transaction id mappings older than 24hrs.
"""

def _cleanup_old_transaction_ids_txn(txn):
sql = """
DELETE FROM event_txn_id
WHERE inserted_ts < ?
"""
one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One day ago is essentially the largest you could get behind on federation and still not have duplicates?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We only get transaction IDs from local clients, who I believe shouldn't retry if significant time has passed.

txn.execute(sql, (one_day_ago,))

return await self.db_pool.runInteraction(
"_cleanup_old_transaction_ids", _cleanup_old_transaction_ids_txn,
)
32 changes: 32 additions & 0 deletions synapse/storage/databases/main/schema/delta/58/19txn_id.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
/* Copyright 2020 The Matrix.org Foundation C.I.C
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


-- A map of recent events persisted with transaction IDs. Used to deduplicate
-- send event requests with the same transaction ID.
Comment on lines +17 to +18
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might be nice to mention what these are expected to be deduplicated across (user, device, transaction).

I think most of the other tables we have use the device_id, not the access token ID? Are these mostly synonymous?

--
-- Note, transaction IDs are scoped to the user ID/access token that was used to
-- make the request.
CREATE TABLE event_txn_id (
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
event_id TEXT NOT NULL,
erikjohnston marked this conversation as resolved.
Show resolved Hide resolved
user_id TEXT NOT NULL,
token_id BIGINT NOT NULL,
richvdh marked this conversation as resolved.
Show resolved Hide resolved
txn_id TEXT NOT NULL,
inserted_ts BIGINT NOT NULL
);

CREATE UNIQUE INDEX event_txn_id_event_id ON event_txn_id(event_id);
CREATE UNIQUE INDEX event_txn_id_txn_id ON event_txn_id(user_id, token_id, txn_id);
CREATE INDEX event_txn_id_ts ON event_txn_id(inserted_ts);
Loading