From e1ed959a68f8039130be821c27e82e75b5d59e5f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 9 Sep 2024 10:41:25 -0500 Subject: [PATCH 01/18] Sliding Sync: Get `bump_stamp` from new sliding sync tables because it's faster (#17658) Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster (performance) than flipping through the latest events in the room. --- changelog.d/17658.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 74 ++++-- synapse/storage/databases/main/events.py | 61 ++--- .../storage/databases/main/sliding_sync.py | 40 ++++ tests/storage/test_sliding_sync_tables.py | 213 +++++++++++++++++- 5 files changed, 333 insertions(+), 56 deletions(-) create mode 100644 changelog.d/17658.misc diff --git a/changelog.d/17658.misc b/changelog.d/17658.misc new file mode 100644 index 0000000000..0bdbc1140d --- /dev/null +++ b/changelog.d/17658.misc @@ -0,0 +1 @@ +Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 444cc32f36..7340c6ec05 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1040,29 +1040,67 @@ class SlidingSyncHandler: ) ) - # By default, just choose the membership event position + # Figure out the last bump event in the room + # + # By default, just choose the membership event position for any non-join membership bump_stamp = room_membership_for_user_at_to_token.event_pos.stream - - # Figure out the last bump event in the room if we're in the room. + # If we're joined to the room, we need to find the last bump event before the + # `to_token` if room_membership_for_user_at_to_token.membership == Membership.JOIN: - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) + # We can quickly query for the latest bump event in the room using the + # sliding sync tables. + latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( + room_id ) - # But if we found a bump event, use that instead - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result + min_to_token_position = to_token.room_key.stream - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + # If we can rely on the new sliding sync tables and the `bump_stamp` is + # `None`, just fallback to the membership event position. This can happen + # when we've just joined a remote room and all the events are backfilled. + if ( + # FIXME: The background job check can be removed once we bump + # `SCHEMA_COMPAT_VERSION` and run the foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` + # (tracked by https://github.com/element-hq/synapse/issues/17623) + await self.store.have_finished_sliding_sync_background_jobs() + and latest_room_bump_stamp is None + ): + pass + + # The `bump_stamp` stored in the database might be ahead of our token. Since + # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure + # that's before the `to_token` in all scenarios. The only scenario we can be + # sure of is if the `bump_stamp` is totally before the minimum position from + # the token. + # + # We don't need to check if the background update has finished, as if the + # returned bump stamp is not None then it must be up to date. + elif ( + latest_room_bump_stamp is not None + and latest_room_bump_stamp < min_to_token_position + ): + bump_stamp = latest_room_bump_stamp + + # Otherwise, if it's within or after the `to_token`, we need to find the + # last bump event before the `to_token`. + else: + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + bump_stamp = new_bump_event_pos.stream unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index d423d80efa..e5f63019fd 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -327,6 +327,13 @@ class PersistEventsStore: async with stream_ordering_manager as stream_orderings: for (event, _), stream in zip(events_and_contexts, stream_orderings): + # XXX: We can't rely on `stream_ordering`/`instance_name` being correct + # at this point. We could be working with events that were previously + # persisted as an `outlier` with one `stream_ordering` but are now being + # persisted again and de-outliered and are being assigned a different + # `stream_ordering` here that won't end up being used. + # `_update_outliers_txn()` will fix this discrepancy (always use the + # `stream_ordering` from the first time it was persisted). event.internal_metadata.stream_ordering = stream event.internal_metadata.instance_name = self._instance_name @@ -470,11 +477,11 @@ class PersistEventsStore: membership_infos_to_insert_membership_snapshots.append( # XXX: We don't use `SlidingSyncMembershipInfoWithEventPos` here # because we're sourcing the event from `events_and_contexts`, we - # can't rely on `stream_ordering`/`instance_name` being correct. We - # could be working with events that were previously persisted as an - # `outlier` with one `stream_ordering` but are now being persisted - # again and de-outliered and assigned a different `stream_ordering` - # that won't end up being used. Since we call + # can't rely on `stream_ordering`/`instance_name` being correct at + # this point. We could be working with events that were previously + # persisted as an `outlier` with one `stream_ordering` but are now + # being persisted again and de-outliered and assigned a different + # `stream_ordering` that won't end up being used. Since we call # `_calculate_sliding_sync_table_changes()` before # `_update_outliers_txn()` which fixes this discrepancy (always use # the `stream_ordering` from the first time it was persisted), we're @@ -591,11 +598,17 @@ class PersistEventsStore: event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, ) ) - bump_stamp_to_fully_insert = ( - most_recent_bump_event_pos_results[1].stream - if most_recent_bump_event_pos_results is not None - else None - ) + if most_recent_bump_event_pos_results is not None: + _, new_bump_event_pos = most_recent_bump_event_pos_results + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead just leave it as `None` in the table and we will use their + # membership event position as the bump event position in the + # Sliding Sync API. + if new_bump_event_pos.stream > 0: + bump_stamp_to_fully_insert = new_bump_event_pos.stream current_state_ids_map = dict( await self.store.get_partial_filtered_current_state_ids( @@ -2123,31 +2136,26 @@ class PersistEventsStore: if len(events_and_contexts) == 0: return - # We only update the sliding sync tables for non-backfilled events. - # - # Check if the first event is a backfilled event (with a negative - # `stream_ordering`). If one event is backfilled, we assume this whole batch was - # backfilled. - first_event_stream_ordering = events_and_contexts[0][ - 0 - ].internal_metadata.stream_ordering - # This should exist for persisted events - assert first_event_stream_ordering is not None - if first_event_stream_ordering < 0: - return - # Since the list is sorted ascending by `stream_ordering`, the last event should # have the highest `stream_ordering`. max_stream_ordering = events_and_contexts[-1][ 0 ].internal_metadata.stream_ordering + # `stream_ordering` should be assigned for persisted events + assert max_stream_ordering is not None + # Check if the event is a backfilled event (with a negative `stream_ordering`). + # If one event is backfilled, we assume this whole batch was backfilled. + if max_stream_ordering < 0: + # We only update the sliding sync tables for non-backfilled events. + return + max_bump_stamp = None for event, _ in reversed(events_and_contexts): # Sanity check that all events belong to the same room assert event.room_id == room_id if event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: - # This should exist for persisted events + # `stream_ordering` should be assigned for persisted events assert event.internal_metadata.stream_ordering is not None max_bump_stamp = event.internal_metadata.stream_ordering @@ -2156,11 +2164,6 @@ class PersistEventsStore: # matching bump event which should have the highest `stream_ordering`. break - # We should have exited earlier if there were no events - assert ( - max_stream_ordering is not None - ), "Expected to have a stream_ordering if we have events" - # Handle updating the `sliding_sync_joined_rooms` table. # txn.execute( diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index dc747d7ac0..83939d10b0 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -41,6 +41,46 @@ logger = logging.getLogger(__name__) class SlidingSyncStore(SQLBaseStore): + async def get_latest_bump_stamp_for_room( + self, + room_id: str, + ) -> Optional[int]: + """ + Get the `bump_stamp` for the room. + + The `bump_stamp` is the `stream_ordering` of the last event according to the + `bump_event_types`. This helps clients sort more readily without them needing to + pull in a bunch of the timeline to determine the last activity. + `bump_event_types` is a thing because for example, we don't want display name + changes to mark the room as unread and bump it to the top. For encrypted rooms, + we just have to consider any activity as a bump because we can't see the content + and the client has to figure it out for themselves. + + This should only be called where the server is participating + in the room (someone local is joined). + + Returns: + The `bump_stamp` for the room (which can be `None`). + """ + + return cast( + Optional[int], + await self.db_pool.simple_select_one_onecol( + table="sliding_sync_joined_rooms", + keyvalues={"room_id": room_id}, + retcol="bump_stamp", + # FIXME: This should be `False` once we bump `SCHEMA_COMPAT_VERSION` and run the + # foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked + # by https://github.com/element-hq/synapse/issues/17623) + # + # The should be `allow_none=False` in the future because event though + # `bump_stamp` itself can be `None`, we should have a row in the + # `sliding_sync_joined_rooms` table for any joined room. + allow_none=True, + ), + ) + async def persist_per_connection_state( self, user_id: str, diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index 621f46fff8..de80ad53cd 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -106,6 +106,12 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): assert persist_events_store is not None self.persist_events_store = persist_events_store + persist_controller = self.hs.get_storage_controllers().persistence + assert persist_controller is not None + self.persist_controller = persist_controller + + self.state_handler = self.hs.get_state_handler() + def _get_sliding_sync_joined_rooms(self) -> Dict[str, _SlidingSyncJoinedRoomResult]: """ Return the rows from the `sliding_sync_joined_rooms` table. @@ -260,10 +266,8 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): ) ) context = EventContext.for_outlier(self.hs.get_storage_controllers()) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None persisted_event, _, _ = self.get_success( - persist_controller.persist_event(invite_event, context) + self.persist_controller.persist_event(invite_event, context) ) self._remote_invite_count += 1 @@ -316,10 +320,8 @@ class SlidingSyncTablesTestCaseBase(HomeserverTestCase): ) ) context = EventContext.for_outlier(self.hs.get_storage_controllers()) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None persisted_event, _, _ = self.get_success( - persist_controller.persist_event(kick_event, context) + self.persist_controller.persist_event(kick_event, context) ) return persisted_event @@ -926,6 +928,201 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): user2_snapshot, ) + def test_joined_room_bump_stamp_backfill(self) -> None: + """ + Test that `bump_stamp` ignores backfilled events, i.e. events with a + negative stream ordering. + """ + user1_id = self.register_user("user1", "pass") + _user1_tok = self.login(user1_id, "pass") + + # Create a remote room + creator = "@user:other" + room_id = "!foo:other" + room_version = RoomVersions.V10 + shared_kwargs = { + "room_id": room_id, + "room_version": room_version.identifier, + } + + create_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[], + type=EventTypes.Create, + state_key="", + content={ + # The `ROOM_CREATOR` field could be removed if we used a room + # version > 10 (in favor of relying on `sender`) + EventContentFields.ROOM_CREATOR: creator, + EventContentFields.ROOM_VERSION: room_version.identifier, + }, + sender=creator, + **shared_kwargs, + ) + ) + creator_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[create_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id], + type=EventTypes.Member, + state_key=creator, + content={"membership": Membership.JOIN}, + sender=creator, + **shared_kwargs, + ) + ) + room_name_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[creator_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Name, + state_key="", + content={ + EventContentFields.ROOM_NAME: "my super duper room", + }, + sender=creator, + **shared_kwargs, + ) + ) + # We add a message event as a valid "bump type" + msg_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[room_name_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Message, + content={"body": "foo", "msgtype": "m.text"}, + sender=creator, + **shared_kwargs, + ) + ) + invite_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[msg_tuple[0].event_id], + auth_event_ids=[create_tuple[0].event_id, creator_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.INVITE}, + sender=creator, + **shared_kwargs, + ) + ) + + remote_events_and_contexts = [ + create_tuple, + creator_tuple, + room_name_tuple, + msg_tuple, + invite_tuple, + ] + + # Ensure the local HS knows the room version + self.get_success(self.store.store_room(room_id, creator, False, room_version)) + + # Persist these events as backfilled events. + for event, context in remote_events_and_contexts: + self.get_success( + self.persist_controller.persist_event(event, context, backfilled=True) + ) + + # Now we join the local user to the room. We want to make this feel as close to + # the real `process_remote_join()` as possible but we'd like to avoid some of + # the auth checks that would be done in the real code. + # + # FIXME: The test was originally written using this less-real + # `persist_event(...)` shortcut but it would be nice to use the real remote join + # process in a `FederatingHomeserverTestCase`. + flawed_join_tuple = self.get_success( + create_event( + self.hs, + prev_event_ids=[invite_tuple[0].event_id], + # This doesn't work correctly to create an `EventContext` that includes + # both of these state events. I assume it's because we're working on our + # local homeserver which has the remote state set as `outlier`. We have + # to create our own EventContext below to get this right. + auth_event_ids=[create_tuple[0].event_id, invite_tuple[0].event_id], + type=EventTypes.Member, + state_key=user1_id, + content={"membership": Membership.JOIN}, + sender=user1_id, + **shared_kwargs, + ) + ) + # We have to create our own context to get the state set correctly. If we use + # the `EventContext` from the `flawed_join_tuple`, the `current_state_events` + # table will only have the join event in it which should never happen in our + # real server. + join_event = flawed_join_tuple[0] + join_context = self.get_success( + self.state_handler.compute_event_context( + join_event, + state_ids_before_event={ + (e.type, e.state_key): e.event_id + for e in [create_tuple[0], invite_tuple[0], room_name_tuple[0]] + }, + partial_state=False, + ) + ) + join_event, _join_event_pos, _room_token = self.get_success( + self.persist_controller.persist_event(join_event, join_context) + ) + + # Make sure the tables are populated correctly + sliding_sync_joined_rooms_results = self._get_sliding_sync_joined_rooms() + self.assertIncludes( + set(sliding_sync_joined_rooms_results.keys()), + {room_id}, + exact=True, + ) + self.assertEqual( + sliding_sync_joined_rooms_results[room_id], + _SlidingSyncJoinedRoomResult( + room_id=room_id, + # This should be the last event in the room (the join membership) + event_stream_ordering=join_event.internal_metadata.stream_ordering, + # Since all of the bump events are backfilled, the `bump_stamp` should + # still be `None`. (and we will fallback to the users membership event + # position in the Sliding Sync API) + bump_stamp=None, + room_type=None, + # We still pick up state of the room even if it's backfilled + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + }, + exact=True, + ) + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=join_event.event_id, + membership=Membership.JOIN, + event_stream_ordering=join_event.internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name="my super duper room", + is_encrypted=False, + tombstone_successor_room_id=None, + ), + ) + @parameterized.expand( # Test both an insert an upsert into the # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` to exercise @@ -1036,11 +1233,9 @@ class SlidingSyncTablesTestCase(SlidingSyncTablesTestCaseBase): context = self.get_success(unpersisted_context.persist(event)) events_to_persist.append((event, context)) - persist_controller = self.hs.get_storage_controllers().persistence - assert persist_controller is not None for event, context in events_to_persist: self.get_success( - persist_controller.persist_event( + self.persist_controller.persist_event( event, context, ) From 515c1cc0a1ad7957d5aa2caa8b6423fa93d4193c Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 9 Sep 2024 17:55:59 -0500 Subject: [PATCH 02/18] Sliding Sync: Add comment to explain extra case where you can be invited -> banned -> unbanned (#17654) Add comment to explain extra case where you can be invited -> banned -> unbanned and we want to be able to find the invite event. Follow-up to https://github.com/element-hq/synapse/pull/17636#discussion_r1738993330 --- changelog.d/17654.misc | 1 + synapse/storage/databases/main/events_bg_updates.py | 10 +++++++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17654.misc diff --git a/changelog.d/17654.misc b/changelog.d/17654.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17654.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index e20fc4471e..12670e87d2 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1966,7 +1966,7 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ) return 0 - def _find_previous_membership_txn( + def _find_previous_invite_or_knock_membership_txn( txn: LoggingTransaction, room_id: str, user_id: str, event_id: str ) -> Tuple[str, str]: # Find the previous invite/knock event before the leave event @@ -2007,6 +2007,10 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS ( room_id, user_id, + # We look explicitly for `invite` and `knock` events instead of + # just their previous membership as someone could have been `invite` + # -> `ban` -> unbanned (`leave`) and we want to find the `invite` + # event where the stripped state is. Membership.INVITE, Membership.KNOCK, event_id, @@ -2155,8 +2159,8 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS invite_or_knock_event_id, invite_or_knock_membership, ) = await self.db_pool.runInteraction( - "sliding_sync_membership_snapshots_bg_update._find_previous_membership", - _find_previous_membership_txn, + "sliding_sync_membership_snapshots_bg_update._find_previous_invite_or_knock_membership_txn", + _find_previous_invite_or_knock_membership_txn, room_id, user_id, membership_event_id, From 588e5b521d56f8605e2a029626a5e59bb2b1a40f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Sep 2024 09:52:42 +0100 Subject: [PATCH 03/18] Sliding Sync: Retrieve fewer events from DB in sync (#17688) When using timeline limit of 1 we end up fetching 2 events from the DB purely to tell if the response was "limited" or not. Lets not do that. --- changelog.d/17688.misc | 1 + synapse/handlers/admin.py | 1 + synapse/handlers/pagination.py | 2 + synapse/handlers/room.py | 2 +- synapse/handlers/sliding_sync/__init__.py | 20 +--- synapse/handlers/sync.py | 11 +-- synapse/storage/databases/main/stream.py | 93 ++++++++++++------- .../sliding_sync/test_rooms_timeline.py | 48 ++++------ tests/storage/test_stream.py | 2 +- 9 files changed, 89 insertions(+), 91 deletions(-) create mode 100644 changelog.d/17688.misc diff --git a/changelog.d/17688.misc b/changelog.d/17688.misc new file mode 100644 index 0000000000..7ba8d48fbe --- /dev/null +++ b/changelog.d/17688.misc @@ -0,0 +1 @@ +Speed up sync by pulling out fewer events from the database. diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index c874d22eac..65b3f153da 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -200,6 +200,7 @@ class AdminHandler: ( events, _, + _, ) = await self._store.paginate_room_events_by_topological_ordering( room_id=room_id, from_key=from_key, diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 3c44458fa3..4070b74b7a 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -510,6 +510,7 @@ class PaginationHandler: ( events, next_key, + _, ) = await self.store.paginate_room_events_by_topological_ordering( room_id=room_id, from_key=from_token.room_key, @@ -588,6 +589,7 @@ class PaginationHandler: ( events, next_key, + _, ) = await self.store.paginate_room_events_by_topological_ordering( room_id=room_id, from_key=from_token.room_key, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 35c88f1b91..386375d64b 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1753,7 +1753,7 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]): ) events = list(room_events) - events.extend(e for evs, _ in room_to_events.values() for e in evs) + events.extend(e for evs, _, _ in room_to_events.values() for e in evs) # We know stream_ordering must be not None here, as its been # persisted, but mypy doesn't know that diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 7340c6ec05..cf368be9d9 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -47,7 +47,6 @@ from synapse.types import ( MutableStateMap, PersistedEventPosition, Requester, - RoomStreamToken, SlidingSyncStreamToken, StateMap, StreamKeyType, @@ -632,7 +631,7 @@ class SlidingSyncHandler: # Use `stream_ordering` for updates else paginate_room_events_by_stream_ordering ) - timeline_events, new_room_key = await pagination_method( + timeline_events, new_room_key, limited = await pagination_method( room_id=room_id, # The bounds are reversed so we can paginate backwards # (from newer to older events) starting at to_bound. @@ -640,28 +639,13 @@ class SlidingSyncHandler: from_key=to_bound, to_key=timeline_from_bound, direction=Direction.BACKWARDS, - # We add one so we can determine if there are enough events to saturate - # the limit or not (see `limited`) - limit=room_sync_config.timeline_limit + 1, + limit=room_sync_config.timeline_limit, ) # We want to return the events in ascending order (the last event is the # most recent). timeline_events.reverse() - # Determine our `limited` status based on the timeline. We do this before - # filtering the events so we can accurately determine if there is more to - # paginate even if we filter out some/all events. - if len(timeline_events) > room_sync_config.timeline_limit: - limited = True - # Get rid of that extra "+ 1" event because we only used it to determine - # if we hit the limit or not - timeline_events = timeline_events[-room_sync_config.timeline_limit :] - assert timeline_events[0].internal_metadata.stream_ordering - new_room_key = RoomStreamToken( - stream=timeline_events[0].internal_metadata.stream_ordering - 1 - ) - # Make sure we don't expose any events that the client shouldn't see timeline_events = await filter_events_for_client( self.storage_controllers, diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 609840bfe9..f4ea90fbd7 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -906,7 +906,7 @@ class SyncHandler: # Use `stream_ordering` for updates else paginate_room_events_by_stream_ordering ) - events, end_key = await pagination_method( + events, end_key, limited = await pagination_method( room_id=room_id, # The bounds are reversed so we can paginate backwards # (from newer to older events) starting at to_bound. @@ -914,9 +914,7 @@ class SyncHandler: from_key=end_key, to_key=since_key, direction=Direction.BACKWARDS, - # We add one so we can determine if there are enough events to saturate - # the limit or not (see `limited`) - limit=load_limit + 1, + limit=load_limit, ) # We want to return the events in ascending order (the last event is the # most recent). @@ -971,9 +969,6 @@ class SyncHandler: loaded_recents.extend(recents) recents = loaded_recents - if len(events) <= load_limit: - limited = False - break max_repeat -= 1 if len(recents) > timeline_limit: @@ -2608,7 +2603,7 @@ class SyncHandler: newly_joined = room_id in newly_joined_rooms if room_entry: - events, start_key = room_entry + events, start_key, _ = room_entry # We want to return the events in ascending order (the last event is the # most recent). events.reverse() diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 68d4168621..459436e304 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -108,7 +108,7 @@ class PaginateFunction(Protocol): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken]: ... + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: ... # Used as return values for pagination APIs @@ -679,7 +679,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken]]: + ) -> Dict[str, Tuple[List[EventBase], RoomStreamToken, bool]]: """Get new room events in stream ordering since `from_key`. Args: @@ -695,6 +695,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): A map from room id to a tuple containing: - list of recent events in the room - stream ordering key for the start of the chunk of events returned. + - a boolean to indicate if there were more events but we hit the limit When Direction.FORWARDS: from_key < x <= to_key, (ascending order) When Direction.BACKWARDS: from_key >= x > to_key, (descending order) @@ -758,7 +759,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): to_key: Optional[RoomStreamToken] = None, direction: Direction = Direction.BACKWARDS, limit: int = 0, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: """ Paginate events by `stream_ordering` in the room from the `from_key` in the given `direction` to the `to_key` or `limit`. @@ -773,8 +774,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): limit: Maximum number of events to return Returns: - The results as a list of events and a token that points to the end - of the result set. If no events are returned then the end of the + The results as a list of events, a token that points to the end of + the result set, and a boolean to indicate if there were more events + but we hit the limit. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). @@ -798,7 +800,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and to_key.is_before_or_eq(from_key) ): # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # Or vice-versa, if we're looking backwards and our `from_key` is already before # our `to_key`. elif ( @@ -807,7 +809,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and from_key.is_before_or_eq(to_key) ): # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # We can do a quick sanity check to see if any events have been sent in the room # since the earlier token. @@ -826,7 +828,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if not has_changed: # Token selection matches what we do below if there are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False order, from_bound, to_bound = generate_pagination_bounds( direction, from_key, to_key @@ -842,7 +844,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): engine=self.database_engine, ) - def f(txn: LoggingTransaction) -> List[_EventDictReturn]: + def f(txn: LoggingTransaction) -> Tuple[List[_EventDictReturn], bool]: sql = f""" SELECT event_id, instance_name, stream_ordering FROM events @@ -854,9 +856,13 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): """ txn.execute(sql, (room_id, 2 * limit)) + # Get all the rows and check if we hit the limit. + fetched_rows = txn.fetchall() + limited = len(fetched_rows) >= 2 * limit + rows = [ _EventDictReturn(event_id, None, stream_ordering) - for event_id, instance_name, stream_ordering in txn + for event_id, instance_name, stream_ordering in fetched_rows if _filter_results_by_stream( lower_token=( to_key if direction == Direction.BACKWARDS else from_key @@ -867,10 +873,17 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): instance_name=instance_name, stream_ordering=stream_ordering, ) - ][:limit] - return rows + ] - rows = await self.db_pool.runInteraction("get_room_events_stream_for_room", f) + if len(rows) > limit: + limited = True + + rows = rows[:limit] + return rows, limited + + rows, limited = await self.db_pool.runInteraction( + "get_room_events_stream_for_room", f + ) ret = await self.get_events_as_list( [r.event_id for r in rows], get_prev_content=True @@ -887,7 +900,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # `_paginate_room_events_by_topological_ordering_txn(...)`) next_key = to_key if to_key else from_key - return ret, next_key + return ret, next_key, limited @trace async def get_current_state_delta_membership_changes_for_user( @@ -1191,7 +1204,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if limit == 0: return [], end_token - rows, token = await self.db_pool.runInteraction( + rows, token, _ = await self.db_pool.runInteraction( "get_recent_event_ids_for_room", self._paginate_room_events_by_topological_ordering_txn, room_id, @@ -1765,7 +1778,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topological=topological_ordering, stream=stream_ordering ) - rows, start_token = self._paginate_room_events_by_topological_ordering_txn( + rows, start_token, _ = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, before_token, @@ -1775,7 +1788,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) events_before = [r.event_id for r in rows] - rows, end_token = self._paginate_room_events_by_topological_ordering_txn( + rows, end_token, _ = self._paginate_room_events_by_topological_ordering_txn( txn, room_id, after_token, @@ -1947,7 +1960,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: + ) -> Tuple[List[_EventDictReturn], RoomStreamToken, bool]: """Returns list of events before or after a given token. Args: @@ -1962,10 +1975,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): those that match the filter. Returns: - A list of _EventDictReturn and a token that points to the end of the - result set. If no events are returned then the end of the stream has - been reached (i.e. there are no events between `from_token` and - `to_token`), or `limit` is zero. + A list of _EventDictReturn, a token that points to the end of the + result set, and a boolean to indicate if there were more events but + we hit the limit. If no events are returned then the end of the + stream has been reached (i.e. there are no events between + `from_token` and `to_token`), or `limit` is zero. """ # We can bail early if we're looking forwards, and our `to_key` is already # before our `from_token`. @@ -1975,7 +1989,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and to_token.is_before_or_eq(from_token) ): # Token selection matches what we do below if there are no rows - return [], to_token if to_token else from_token + return [], to_token if to_token else from_token, False # Or vice-versa, if we're looking backwards and our `from_token` is already before # our `to_token`. elif ( @@ -1984,7 +1998,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): and from_token.is_before_or_eq(to_token) ): # Token selection matches what we do below if there are no rows - return [], to_token if to_token else from_token + return [], to_token if to_token else from_token, False args: List[Any] = [room_id] @@ -2007,6 +2021,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): args.extend(filter_args) # We fetch more events as we'll filter the result set + requested_limit = int(limit) * 2 args.append(int(limit) * 2) select_keywords = "SELECT" @@ -2071,10 +2086,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): } txn.execute(sql, args) + # Get all the rows and check if we hit the limit. + fetched_rows = txn.fetchall() + limited = len(fetched_rows) >= requested_limit + # Filter the result set. rows = [ _EventDictReturn(event_id, topological_ordering, stream_ordering) - for event_id, instance_name, topological_ordering, stream_ordering in txn + for event_id, instance_name, topological_ordering, stream_ordering in fetched_rows if _filter_results( lower_token=( to_token if direction == Direction.BACKWARDS else from_token @@ -2086,7 +2105,12 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): topological_ordering=topological_ordering, stream_ordering=stream_ordering, ) - ][:limit] + ] + + if len(rows) > limit: + limited = True + + rows = rows[:limit] if rows: assert rows[-1].topological_ordering is not None @@ -2097,7 +2121,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # TODO (erikj): We should work out what to do here instead. next_token = to_token if to_token else from_token - return rows, next_token + return rows, next_token, limited @trace @tag_args @@ -2110,7 +2134,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): direction: Direction = Direction.BACKWARDS, limit: int = 0, event_filter: Optional[Filter] = None, - ) -> Tuple[List[EventBase], RoomStreamToken]: + ) -> Tuple[List[EventBase], RoomStreamToken, bool]: """ Paginate events by `topological_ordering` (tie-break with `stream_ordering`) in the room from the `from_key` in the given `direction` to the `to_key` or @@ -2127,8 +2151,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event_filter: If provided filters the events to those that match the filter. Returns: - The results as a list of events and a token that points to the end - of the result set. If no events are returned then the end of the + The results as a list of events, a token that points to the end of + the result set, and a boolean to indicate if there were more events + but we hit the limit. If no events are returned then the end of the stream has been reached (i.e. there are no events between `from_key` and `to_key`). @@ -2152,7 +2177,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ): # Token selection matches what we do in `_paginate_room_events_txn` if there # are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False # Or vice-versa, if we're looking backwards and our `from_key` is already before # our `to_key`. elif ( @@ -2162,9 +2187,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ): # Token selection matches what we do in `_paginate_room_events_txn` if there # are no rows - return [], to_key if to_key else from_key + return [], to_key if to_key else from_key, False - rows, token = await self.db_pool.runInteraction( + rows, token, limited = await self.db_pool.runInteraction( "paginate_room_events_by_topological_ordering", self._paginate_room_events_by_topological_ordering_txn, room_id, @@ -2179,7 +2204,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): [r.event_id for r in rows], get_prev_content=True ) - return events, token + return events, token, limited @cached() async def get_id_for_instance(self, instance_name: str) -> int: diff --git a/tests/rest/client/sliding_sync/test_rooms_timeline.py b/tests/rest/client/sliding_sync/test_rooms_timeline.py index 0e027ff39d..2293994793 100644 --- a/tests/rest/client/sliding_sync/test_rooms_timeline.py +++ b/tests/rest/client/sliding_sync/test_rooms_timeline.py @@ -22,7 +22,7 @@ import synapse.rest.admin from synapse.api.constants import EventTypes from synapse.rest.client import login, room, sync from synapse.server import HomeServer -from synapse.types import StreamToken, StrSequence +from synapse.types import StrSequence from synapse.util import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -149,16 +149,10 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): user2_tok = self.login(user2_id, "pass") room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) - self.helper.send(room_id1, "activity1", tok=user2_tok) - self.helper.send(room_id1, "activity2", tok=user2_tok) + event_response1 = self.helper.send(room_id1, "activity1", tok=user2_tok) + event_response2 = self.helper.send(room_id1, "activity2", tok=user2_tok) event_response3 = self.helper.send(room_id1, "activity3", tok=user2_tok) - event_pos3 = self.get_success( - self.store.get_position_for_event(event_response3["event_id"]) - ) event_response4 = self.helper.send(room_id1, "activity4", tok=user2_tok) - event_pos4 = self.get_success( - self.store.get_position_for_event(event_response4["event_id"]) - ) event_response5 = self.helper.send(room_id1, "activity5", tok=user2_tok) user1_join_response = self.helper.join(room_id1, user1_id, tok=user1_tok) @@ -196,27 +190,23 @@ class SlidingSyncRoomsTimelineTestCase(SlidingSyncBase): ) # Check to make sure the `prev_batch` points at the right place - prev_batch_token = self.get_success( - StreamToken.from_string( - self.store, response_body["rooms"][room_id1]["prev_batch"] - ) + prev_batch_token = response_body["rooms"][room_id1]["prev_batch"] + + # If we use the `prev_batch` token to look backwards we should see + # `event3` and older next. + channel = self.make_request( + "GET", + f"/rooms/{room_id1}/messages?from={prev_batch_token}&dir=b&limit=3", + access_token=user1_tok, ) - prev_batch_room_stream_token_serialized = self.get_success( - prev_batch_token.room_key.to_string(self.store) - ) - # If we use the `prev_batch` token to look backwards, we should see `event3` - # next so make sure the token encompasses it - self.assertEqual( - event_pos3.persisted_after(prev_batch_token.room_key), - False, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be >= event_pos3={self.get_success(event_pos3.to_room_stream_token().to_string(self.store))}", - ) - # If we use the `prev_batch` token to look backwards, we shouldn't see `event4` - # anymore since it was just returned in this response. - self.assertEqual( - event_pos4.persisted_after(prev_batch_token.room_key), - True, - f"`prev_batch` token {prev_batch_room_stream_token_serialized} should be < event_pos4={self.get_success(event_pos4.to_room_stream_token().to_string(self.store))}", + self.assertEqual(channel.code, 200, channel.json_body) + self.assertListEqual( + [ + event_response3["event_id"], + event_response2["event_id"], + event_response1["event_id"], + ], + [ev["event_id"] for ev in channel.json_body["chunk"]], ) # With no `from_token` (initial sync), it's all historical since there is no diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index 7b7590da76..837eb434aa 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -147,7 +147,7 @@ class PaginationTestCase(HomeserverTestCase): def _filter_messages(self, filter: JsonDict) -> List[str]: """Make a request to /messages with a filter, returns the chunk of events.""" - events, next_key = self.get_success( + events, next_key, _ = self.get_success( self.hs.get_datastores().main.paginate_room_events_by_topological_ordering( room_id=self.room_id, from_key=self.from_token.room_key, From 9689ac3294c4e22ddbf1093c171b57e39810e734 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Sep 2024 10:20:30 +0100 Subject: [PATCH 04/18] Sliding Sync: Look for `bump _stamp` in the room timeline (#17684) This allows us to skip checking the database a lot of the time. --------- Co-authored-by: Eric Eastwood --- changelog.d/17684.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 148 ++++++++++++++-------- 2 files changed, 96 insertions(+), 53 deletions(-) create mode 100644 changelog.d/17684.misc diff --git a/changelog.d/17684.misc b/changelog.d/17684.misc new file mode 100644 index 0000000000..ecfb040a5f --- /dev/null +++ b/changelog.d/17684.misc @@ -0,0 +1 @@ +Speed up sliding sync by reducing number of database calls. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index cf368be9d9..b097ac57a2 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -1031,60 +1031,13 @@ class SlidingSyncHandler: # If we're joined to the room, we need to find the last bump event before the # `to_token` if room_membership_for_user_at_to_token.membership == Membership.JOIN: - # We can quickly query for the latest bump event in the room using the - # sliding sync tables. - latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( - room_id + # Try and get a bump stamp, if not we just fall back to the + # membership token. + new_bump_stamp = await self._get_bump_stamp( + room_id, to_token, timeline_events ) - - min_to_token_position = to_token.room_key.stream - - # If we can rely on the new sliding sync tables and the `bump_stamp` is - # `None`, just fallback to the membership event position. This can happen - # when we've just joined a remote room and all the events are backfilled. - if ( - # FIXME: The background job check can be removed once we bump - # `SCHEMA_COMPAT_VERSION` and run the foreground update for - # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` - # (tracked by https://github.com/element-hq/synapse/issues/17623) - await self.store.have_finished_sliding_sync_background_jobs() - and latest_room_bump_stamp is None - ): - pass - - # The `bump_stamp` stored in the database might be ahead of our token. Since - # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure - # that's before the `to_token` in all scenarios. The only scenario we can be - # sure of is if the `bump_stamp` is totally before the minimum position from - # the token. - # - # We don't need to check if the background update has finished, as if the - # returned bump stamp is not None then it must be up to date. - elif ( - latest_room_bump_stamp is not None - and latest_room_bump_stamp < min_to_token_position - ): - bump_stamp = latest_room_bump_stamp - - # Otherwise, if it's within or after the `to_token`, we need to find the - # last bump event before the `to_token`. - else: - last_bump_event_result = ( - await self.store.get_last_event_pos_in_room_before_stream_ordering( - room_id, - to_token.room_key, - event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, - ) - ) - if last_bump_event_result is not None: - _, new_bump_event_pos = last_bump_event_result - - # If we've just joined a remote room, then the last bump event may - # have been backfilled (and so have a negative stream ordering). - # These negative stream orderings can't sensibly be compared, so - # instead we use the membership event position. - if new_bump_event_pos.stream > 0: - bump_stamp = new_bump_event_pos.stream + if new_bump_stamp is not None: + bump_stamp = new_bump_stamp unstable_expanded_timeline = False prev_room_sync_config = previous_connection_state.room_configs.get(room_id) @@ -1174,3 +1127,92 @@ class SlidingSyncHandler: notification_count=0, highlight_count=0, ) + + @trace + async def _get_bump_stamp( + self, room_id: str, to_token: StreamToken, timeline: List[EventBase] + ) -> Optional[int]: + """Get a bump stamp for the room, if we have a bump event + + Args: + room_id + to_token: The upper bound of token to return + timeline: The list of events we have fetched. + """ + + # First check the timeline events we're returning to see if one of + # those matches. We iterate backwards and take the stream ordering + # of the first event that matches the bump event types. + for timeline_event in reversed(timeline): + if timeline_event.type in SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES: + new_bump_stamp = timeline_event.internal_metadata.stream_ordering + + # All persisted events have a stream ordering + assert new_bump_stamp is not None + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_stamp > 0: + return new_bump_stamp + + # We can quickly query for the latest bump event in the room using the + # sliding sync tables. + latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room( + room_id + ) + + min_to_token_position = to_token.room_key.stream + + # If we can rely on the new sliding sync tables and the `bump_stamp` is + # `None`, just fallback to the membership event position. This can happen + # when we've just joined a remote room and all the events are backfilled. + if ( + # FIXME: The background job check can be removed once we bump + # `SCHEMA_COMPAT_VERSION` and run the foreground update for + # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` + # (tracked by https://github.com/element-hq/synapse/issues/17623) + await self.store.have_finished_sliding_sync_background_jobs() + and latest_room_bump_stamp is None + ): + return None + + # The `bump_stamp` stored in the database might be ahead of our token. Since + # `bump_stamp` is only a `stream_ordering` position, we can't be 100% sure + # that's before the `to_token` in all scenarios. The only scenario we can be + # sure of is if the `bump_stamp` is totally before the minimum position from + # the token. + # + # We don't need to check if the background update has finished, as if the + # returned bump stamp is not None then it must be up to date. + elif ( + latest_room_bump_stamp is not None + and latest_room_bump_stamp < min_to_token_position + ): + if latest_room_bump_stamp > 0: + return latest_room_bump_stamp + else: + return None + + # Otherwise, if it's within or after the `to_token`, we need to find the + # last bump event before the `to_token`. + else: + last_bump_event_result = ( + await self.store.get_last_event_pos_in_room_before_stream_ordering( + room_id, + to_token.room_key, + event_types=SLIDING_SYNC_DEFAULT_BUMP_EVENT_TYPES, + ) + ) + if last_bump_event_result is not None: + _, new_bump_event_pos = last_bump_event_result + + # If we've just joined a remote room, then the last bump event may + # have been backfilled (and so have a negative stream ordering). + # These negative stream orderings can't sensibly be compared, so + # instead we use the membership event position. + if new_bump_event_pos.stream > 0: + return new_bump_event_pos.stream + + return None From b3047f3f17187a319c6c2d6145917223b0f7bc84 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 10 Sep 2024 10:22:46 +0100 Subject: [PATCH 05/18] Sliding sync: various fixups to the sliding sync joined room background job (#17673) Follow-up to #17652, https://github.com/element-hq/synapse/pull/17641, https://github.com/element-hq/synapse/pull/17634, https://github.com/element-hq/synapse/pull/17631 and https://github.com/element-hq/synapse/pull/17632 to fix-up https://github.com/element-hq/synapse/pull/17512 --- changelog.d/17673.misc | 1 + .../databases/main/events_bg_updates.py | 27 ++++++++++++------- 2 files changed, 19 insertions(+), 9 deletions(-) create mode 100644 changelog.d/17673.misc diff --git a/changelog.d/17673.misc b/changelog.d/17673.misc new file mode 100644 index 0000000000..756918e2b2 --- /dev/null +++ b/changelog.d/17673.misc @@ -0,0 +1 @@ +Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 12670e87d2..b3244f7457 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1595,17 +1595,15 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS # starve disk usage while this goes on. # # We upsert in case we have to run this multiple times. - # - # The `WHERE TRUE` clause is to avoid "Parsing Ambiguity" txn.execute( """ INSERT INTO sliding_sync_joined_rooms_to_recalculate (room_id) - SELECT room_id FROM rooms WHERE ? + SELECT DISTINCT room_id FROM local_current_membership + WHERE membership = 'join' ON CONFLICT (room_id) DO NOTHING; """, - (True,), ) await self.db_pool.runInteraction( @@ -1689,7 +1687,15 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS if not current_state_ids_map: continue - fetched_events = await self.get_events(current_state_ids_map.values()) + try: + fetched_events = await self.get_events(current_state_ids_map.values()) + except (DatabaseCorruptionError, InvalidEventError) as e: + logger.warning( + "Failed to fetch state for room '%s' due to corrupted events. Ignoring. Error: %s", + room_id, + e, + ) + continue current_state_map: StateMap[EventBase] = { state_key: fetched_events[event_id] @@ -1722,10 +1728,13 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS + "given we pulled the room out of `current_state_events`" ) most_recent_event_stream_ordering = most_recent_event_pos_results[1].stream - assert most_recent_event_stream_ordering > 0, ( - "We should have at-least one event in the room (our own join membership event for example) " - + "that isn't backfilled (negative `stream_ordering`) if we are joined to the room." - ) + + # The `most_recent_event_stream_ordering` should be positive, + # however there are (very rare) rooms where that is not the case in + # the matrix.org database. It's not clear how they got into that + # state, but does mean that we cannot assert that the stream + # ordering is indeed positive. + # Figure out the latest `bump_stamp` in the room. This could be `None` for a # federated room you just joined where all of events are still `outliers` or # backfilled history. In the Sliding Sync API, we default to the user's From a193d4a1b55f2289d51b81c431dd88dde30c950e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:26 +0100 Subject: [PATCH 06/18] Bump authlib from 1.3.1 to 1.3.2 (#17679) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 85950ff707..c0059de0a5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -35,13 +35,13 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "authlib" -version = "1.3.1" +version = "1.3.2" description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = true python-versions = ">=3.8" files = [ - {file = "Authlib-1.3.1-py2.py3-none-any.whl", hash = "sha256:d35800b973099bbadc49b42b256ecb80041ad56b7fe1216a362c7943c088f377"}, - {file = "authlib-1.3.1.tar.gz", hash = "sha256:7ae843f03c06c5c0debd63c9db91f9fda64fa62a42a77419fa15fbb7e7a58917"}, + {file = "Authlib-1.3.2-py2.py3-none-any.whl", hash = "sha256:ede026a95e9f5cdc2d4364a52103f5405e75aa156357e831ef2bfd0bc5094dfc"}, + {file = "authlib-1.3.2.tar.gz", hash = "sha256:4b16130117f9eb82aa6eec97f6dd4673c3f960ac0283ccdae2897ee4bc030ba2"}, ] [package.dependencies] From cd24bc2f36a4558fdc11e5bdeb67065faf638251 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:34 +0100 Subject: [PATCH 07/18] Bump ruff from 0.6.2 to 0.6.4 (#17680) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index c0059de0a5..7687ae1364 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2268,29 +2268,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.2" +version = "0.6.4" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.2-py3-none-linux_armv6l.whl", hash = "sha256:5c8cbc6252deb3ea840ad6a20b0f8583caab0c5ef4f9cca21adc5a92b8f79f3c"}, - {file = "ruff-0.6.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:17002fe241e76544448a8e1e6118abecbe8cd10cf68fde635dad480dba594570"}, - {file = "ruff-0.6.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3dbeac76ed13456f8158b8f4fe087bf87882e645c8e8b606dd17b0b66c2c1158"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:094600ee88cda325988d3f54e3588c46de5c18dae09d683ace278b11f9d4d534"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:316d418fe258c036ba05fbf7dfc1f7d3d4096db63431546163b472285668132b"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d72b8b3abf8a2d51b7b9944a41307d2f442558ccb3859bbd87e6ae9be1694a5d"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2aed7e243be68487aa8982e91c6e260982d00da3f38955873aecd5a9204b1d66"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d371f7fc9cec83497fe7cf5eaf5b76e22a8efce463de5f775a1826197feb9df8"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8f310d63af08f583363dfb844ba8f9417b558199c58a5999215082036d795a1"}, - {file = "ruff-0.6.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7db6880c53c56addb8638fe444818183385ec85eeada1d48fc5abe045301b2f1"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1175d39faadd9a50718f478d23bfc1d4da5743f1ab56af81a2b6caf0a2394f23"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:5b939f9c86d51635fe486585389f54582f0d65b8238e08c327c1534844b3bb9a"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:d0d62ca91219f906caf9b187dea50d17353f15ec9bb15aae4a606cd697b49b4c"}, - {file = "ruff-0.6.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:7438a7288f9d67ed3c8ce4d059e67f7ed65e9fe3aa2ab6f5b4b3610e57e3cb56"}, - {file = "ruff-0.6.2-py3-none-win32.whl", hash = "sha256:279d5f7d86696df5f9549b56b9b6a7f6c72961b619022b5b7999b15db392a4da"}, - {file = "ruff-0.6.2-py3-none-win_amd64.whl", hash = "sha256:d9f3469c7dd43cd22eb1c3fc16926fb8258d50cb1b216658a07be95dd117b0f2"}, - {file = "ruff-0.6.2-py3-none-win_arm64.whl", hash = "sha256:f28fcd2cd0e02bdf739297516d5643a945cc7caf09bd9bcb4d932540a5ea4fa9"}, - {file = "ruff-0.6.2.tar.gz", hash = "sha256:239ee6beb9e91feb8e0ec384204a763f36cb53fb895a1a364618c6abb076b3be"}, + {file = "ruff-0.6.4-py3-none-linux_armv6l.whl", hash = "sha256:c4b153fc152af51855458e79e835fb6b933032921756cec9af7d0ba2aa01a258"}, + {file = "ruff-0.6.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:bedff9e4f004dad5f7f76a9d39c4ca98af526c9b1695068198b3bda8c085ef60"}, + {file = "ruff-0.6.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d02a4127a86de23002e694d7ff19f905c51e338c72d8e09b56bfb60e1681724f"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7862f42fc1a4aca1ea3ffe8a11f67819d183a5693b228f0bb3a531f5e40336fc"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eebe4ff1967c838a1a9618a5a59a3b0a00406f8d7eefee97c70411fefc353617"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:932063a03bac394866683e15710c25b8690ccdca1cf192b9a98260332ca93408"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:50e30b437cebef547bd5c3edf9ce81343e5dd7c737cb36ccb4fe83573f3d392e"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c44536df7b93a587de690e124b89bd47306fddd59398a0fb12afd6133c7b3818"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ea086601b22dc5e7693a78f3fcfc460cceabfdf3bdc36dc898792aba48fbad6"}, + {file = "ruff-0.6.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b52387d3289ccd227b62102c24714ed75fbba0b16ecc69a923a37e3b5e0aaaa"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0308610470fcc82969082fc83c76c0d362f562e2f0cdab0586516f03a4e06ec6"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:803b96dea21795a6c9d5bfa9e96127cc9c31a1987802ca68f35e5c95aed3fc0d"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:66dbfea86b663baab8fcae56c59f190caba9398df1488164e2df53e216248baa"}, + {file = "ruff-0.6.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:34d5efad480193c046c86608dbba2bccdc1c5fd11950fb271f8086e0c763a5d1"}, + {file = "ruff-0.6.4-py3-none-win32.whl", hash = "sha256:f0f8968feea5ce3777c0d8365653d5e91c40c31a81d95824ba61d871a11b8523"}, + {file = "ruff-0.6.4-py3-none-win_amd64.whl", hash = "sha256:549daccee5227282289390b0222d0fbee0275d1db6d514550d65420053021a58"}, + {file = "ruff-0.6.4-py3-none-win_arm64.whl", hash = "sha256:ac4b75e898ed189b3708c9ab3fc70b79a433219e1e87193b4f2b77251d058d14"}, + {file = "ruff-0.6.4.tar.gz", hash = "sha256:ac3b5bfbee99973f80aa1b7cbd1c9cbce200883bdd067300c22a6cc1c7fba212"}, ] [[package]] @@ -3104,4 +3104,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "2bf09e2b68f3abd1a0f9ff2227eb3026ac3d034845acfc120d0b1cb8167ea43b" +content-hash = "26ff23a6cafd8593141cb3d54d7b1e94328a02b863d347578d2b6e666ee2bc93" diff --git a/pyproject.toml b/pyproject.toml index 69a82b8e1c..e93179c9e0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.2" +ruff = "0.6.4" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 2efed1d4fb925332438f9edfb939bbe3a92cb538 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:45 +0100 Subject: [PATCH 08/18] Bump types-setuptools from 71.1.0.20240818 to 74.1.0.20240907 (#17681) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 7687ae1364..242d28877a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2823,13 +2823,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "71.1.0.20240818" +version = "74.1.0.20240907" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-71.1.0.20240818.tar.gz", hash = "sha256:f62eaffaa39774462c65fbb49368c4dc1d91a90a28371cb14e1af090ff0e41e3"}, - {file = "types_setuptools-71.1.0.20240818-py3-none-any.whl", hash = "sha256:c4f95302f88369ac0ac46c67ddbfc70c6c4dbbb184d9fed356244217a2934025"}, + {file = "types-setuptools-74.1.0.20240907.tar.gz", hash = "sha256:0abdb082552ca966c1e5fc244e4853adc62971f6cd724fb1d8a3713b580e5a65"}, + {file = "types_setuptools-74.1.0.20240907-py3-none-any.whl", hash = "sha256:15b38c8e63ca34f42f6063ff4b1dd662ea20086166d5ad6a102e670a52574120"}, ] [[package]] From d8b926d323565a1b3097da3beff558ce3403724c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:34:54 +0100 Subject: [PATCH 09/18] Bump idna from 3.7 to 3.8 (#17682) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 242d28877a..3f2f815d8b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -608,13 +608,13 @@ idna = ">=2.5" [[package]] name = "idna" -version = "3.7" +version = "3.8" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.8-py3-none-any.whl", hash = "sha256:050b4e5baadcd44d760cedbd2b8e639f2ff89bbc7a5730fcc662954303377aac"}, + {file = "idna-3.8.tar.gz", hash = "sha256:d838c2c0ed6fced7693d5e8ab8e734d5f8fda53a039c0164afb0b82e771e3603"}, ] [[package]] From 59bcbcec0a801ba0a2015d4e5bee1b5324619795 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 10 Sep 2024 08:42:01 -0600 Subject: [PATCH 10/18] 1.115.0rc1 --- CHANGES.md | 50 +++++++++++++++++++++++++++++++++++++++ changelog.d/17407.misc | 1 - changelog.d/17509.feature | 1 - changelog.d/17512.misc | 1 - changelog.d/17590.doc | 1 - changelog.d/17594.doc | 1 - changelog.d/17599.misc | 1 - changelog.d/17600.misc | 1 - changelog.d/17607.bugfix | 1 - changelog.d/17620.misc | 1 - changelog.d/17626.bugfix | 1 - changelog.d/17629.misc | 1 - changelog.d/17630.misc | 1 - changelog.d/17631.misc | 1 - changelog.d/17632.misc | 1 - changelog.d/17633.misc | 1 - changelog.d/17634.misc | 1 - changelog.d/17635.misc | 1 - changelog.d/17636.misc | 1 - changelog.d/17641.misc | 1 - changelog.d/17643.misc | 1 - changelog.d/17649.misc | 1 - changelog.d/17650.removal | 1 - changelog.d/17654.misc | 1 - changelog.d/17655.misc | 1 - changelog.d/17658.misc | 1 - changelog.d/17665.misc | 1 - changelog.d/17666.misc | 1 - changelog.d/17670.misc | 1 - changelog.d/17672.misc | 1 - changelog.d/17673.misc | 1 - changelog.d/17674.bugfix | 1 - changelog.d/17684.misc | 1 - changelog.d/17688.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 36 files changed, 57 insertions(+), 34 deletions(-) delete mode 100644 changelog.d/17407.misc delete mode 100644 changelog.d/17509.feature delete mode 100644 changelog.d/17512.misc delete mode 100644 changelog.d/17590.doc delete mode 100644 changelog.d/17594.doc delete mode 100644 changelog.d/17599.misc delete mode 100644 changelog.d/17600.misc delete mode 100644 changelog.d/17607.bugfix delete mode 100644 changelog.d/17620.misc delete mode 100644 changelog.d/17626.bugfix delete mode 100644 changelog.d/17629.misc delete mode 100644 changelog.d/17630.misc delete mode 100644 changelog.d/17631.misc delete mode 100644 changelog.d/17632.misc delete mode 100644 changelog.d/17633.misc delete mode 100644 changelog.d/17634.misc delete mode 100644 changelog.d/17635.misc delete mode 100644 changelog.d/17636.misc delete mode 100644 changelog.d/17641.misc delete mode 100644 changelog.d/17643.misc delete mode 100644 changelog.d/17649.misc delete mode 100644 changelog.d/17650.removal delete mode 100644 changelog.d/17654.misc delete mode 100644 changelog.d/17655.misc delete mode 100644 changelog.d/17658.misc delete mode 100644 changelog.d/17665.misc delete mode 100644 changelog.d/17666.misc delete mode 100644 changelog.d/17670.misc delete mode 100644 changelog.d/17672.misc delete mode 100644 changelog.d/17673.misc delete mode 100644 changelog.d/17674.bugfix delete mode 100644 changelog.d/17684.misc delete mode 100644 changelog.d/17688.misc diff --git a/CHANGES.md b/CHANGES.md index d3cec9cc15..227129d00a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,53 @@ +# Synapse 1.115.0rc1 (2024-09-10) + +### Features + +- Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. ([\#17509](https://github.com/element-hq/synapse/issues/17509)) + +### Bugfixes + +- Return `400 M_BAD_JSON` upon attempting to complete various room actions with a non-local user ID and unknown room ID, rather than an internal server error. ([\#17607](https://github.com/element-hq/synapse/issues/17607)) +- Fix authenticated media responses using a wrong limit when following redirects over federation. ([\#17626](https://github.com/element-hq/synapse/issues/17626)) +- Fix bug where we returned the wrong `bump_stamp` for invites in sliding sync response, causing incorrect ordering of invites in the room list. ([\#17674](https://github.com/element-hq/synapse/issues/17674)) + +### Improved Documentation + +- Clarify that the admin api resource is only loaded on the main process and not workers. ([\#17590](https://github.com/element-hq/synapse/issues/17590)) +- Fixed typo in `saml2_config` config [example](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#saml2_config). ([\#17594](https://github.com/element-hq/synapse/issues/17594)) + +### Deprecations and Removals + +- Stabilise [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156) by removing the `msc4156_enabled` config setting and defaulting it to `true`. ([\#17650](https://github.com/element-hq/synapse/issues/17650)) + +### Internal Changes + +- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) +- Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17512](https://github.com/element-hq/synapse/issues/17512), [\#17632](https://github.com/element-hq/synapse/issues/17632), [\#17633](https://github.com/element-hq/synapse/issues/17633), [\#17634](https://github.com/element-hq/synapse/issues/17634), [\#17635](https://github.com/element-hq/synapse/issues/17635), [\#17636](https://github.com/element-hq/synapse/issues/17636), [\#17641](https://github.com/element-hq/synapse/issues/17641), [\#17654](https://github.com/element-hq/synapse/issues/17654), [\#17673](https://github.com/element-hq/synapse/issues/17673)) +- Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599), [\#17631](https://github.com/element-hq/synapse/issues/17631)) +- Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600)) +- Replace `isort` and `black` with `ruff`. ([\#17620](https://github.com/element-hq/synapse/issues/17620), [\#17643](https://github.com/element-hq/synapse/issues/17643)) +- Sliding Sync: Split up `get_room_membership_for_user_at_to_token`. ([\#17629](https://github.com/element-hq/synapse/issues/17629)) +- Use new database tables for sliding sync. ([\#17630](https://github.com/element-hq/synapse/issues/17630), [\#17649](https://github.com/element-hq/synapse/issues/17649)) +- Prevent duplicate tags being added to Sliding Sync traces. ([\#17655](https://github.com/element-hq/synapse/issues/17655)) +- Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. ([\#17658](https://github.com/element-hq/synapse/issues/17658)) +- Speed up incremental Sliding Sync requests by avoiding extra work. ([\#17665](https://github.com/element-hq/synapse/issues/17665)) +- Small performance improvement in speeding up sliding sync. ([\#17666](https://github.com/element-hq/synapse/issues/17666), [\#17670](https://github.com/element-hq/synapse/issues/17670), [\#17672](https://github.com/element-hq/synapse/issues/17672)) +- Speed up sliding sync by reducing number of database calls. ([\#17684](https://github.com/element-hq/synapse/issues/17684)) +- Speed up sync by pulling out fewer events from the database. ([\#17688](https://github.com/element-hq/synapse/issues/17688)) + + + +### Updates to locked dependencies + +* Bump authlib from 1.3.1 to 1.3.2. ([\#17679](https://github.com/element-hq/synapse/issues/17679)) +* Bump idna from 3.7 to 3.8. ([\#17682](https://github.com/element-hq/synapse/issues/17682)) +* Bump ruff from 0.6.2 to 0.6.4. ([\#17680](https://github.com/element-hq/synapse/issues/17680)) +* Bump towncrier from 24.7.1 to 24.8.0. ([\#17645](https://github.com/element-hq/synapse/issues/17645)) +* Bump twisted from 24.7.0rc1 to 24.7.0. ([\#17647](https://github.com/element-hq/synapse/issues/17647)) +* Bump types-pillow from 10.2.0.20240520 to 10.2.0.20240822. ([\#17644](https://github.com/element-hq/synapse/issues/17644)) +* Bump types-psycopg2 from 2.9.21.20240417 to 2.9.21.20240819. ([\#17646](https://github.com/element-hq/synapse/issues/17646)) +* Bump types-setuptools from 71.1.0.20240818 to 74.1.0.20240907. ([\#17681](https://github.com/element-hq/synapse/issues/17681)) + # Synapse 1.114.0 (2024-09-02) This release enables support for diff --git a/changelog.d/17407.misc b/changelog.d/17407.misc deleted file mode 100644 index 9ed6e61a5b..0000000000 --- a/changelog.d/17407.misc +++ /dev/null @@ -1 +0,0 @@ -MSC3861: load the issuer and account management URLs from OIDC discovery. diff --git a/changelog.d/17509.feature b/changelog.d/17509.feature deleted file mode 100644 index 6d639ceb98..0000000000 --- a/changelog.d/17509.feature +++ /dev/null @@ -1 +0,0 @@ -Improve cross-signing upload when using [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) to use a custom UIA flow stage, with web fallback support. diff --git a/changelog.d/17512.misc b/changelog.d/17512.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17512.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17590.doc b/changelog.d/17590.doc deleted file mode 100644 index eced3d96cb..0000000000 --- a/changelog.d/17590.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify that the admin api resource is only loaded on the main process and not workers. \ No newline at end of file diff --git a/changelog.d/17594.doc b/changelog.d/17594.doc deleted file mode 100644 index 95b0042005..0000000000 --- a/changelog.d/17594.doc +++ /dev/null @@ -1 +0,0 @@ -Fixed typo in `saml2_config` config [example](https://element-hq.github.io/synapse/latest/usage/configuration/config_documentation.html#saml2_config). diff --git a/changelog.d/17599.misc b/changelog.d/17599.misc deleted file mode 100644 index 2f81356d12..0000000000 --- a/changelog.d/17599.misc +++ /dev/null @@ -1 +0,0 @@ -Store sliding sync per-connection state in the database. diff --git a/changelog.d/17600.misc b/changelog.d/17600.misc deleted file mode 100644 index a81c67f6d1..0000000000 --- a/changelog.d/17600.misc +++ /dev/null @@ -1 +0,0 @@ -Make the sliding sync `PerConnectionState` class immutable. diff --git a/changelog.d/17607.bugfix b/changelog.d/17607.bugfix deleted file mode 100644 index 74201135b6..0000000000 --- a/changelog.d/17607.bugfix +++ /dev/null @@ -1 +0,0 @@ -Return `400 M_BAD_JSON` upon attempting to complete various room actions with a non-local user ID and unknown room ID, rather than an internal server error. \ No newline at end of file diff --git a/changelog.d/17620.misc b/changelog.d/17620.misc deleted file mode 100644 index f583cdcb38..0000000000 --- a/changelog.d/17620.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `isort` and `black with `ruff`. diff --git a/changelog.d/17626.bugfix b/changelog.d/17626.bugfix deleted file mode 100644 index 1dbb2a2f45..0000000000 --- a/changelog.d/17626.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix authenticated media responses using a wrong limit when following redirects over federation. diff --git a/changelog.d/17629.misc b/changelog.d/17629.misc deleted file mode 100644 index 1eb46b2c68..0000000000 --- a/changelog.d/17629.misc +++ /dev/null @@ -1 +0,0 @@ -Sliding Sync: Split up `get_room_membership_for_user_at_to_token`. diff --git a/changelog.d/17630.misc b/changelog.d/17630.misc deleted file mode 100644 index ed1bf6bd55..0000000000 --- a/changelog.d/17630.misc +++ /dev/null @@ -1 +0,0 @@ -Use new database tables for sliding sync. diff --git a/changelog.d/17631.misc b/changelog.d/17631.misc deleted file mode 100644 index 2f81356d12..0000000000 --- a/changelog.d/17631.misc +++ /dev/null @@ -1 +0,0 @@ -Store sliding sync per-connection state in the database. diff --git a/changelog.d/17632.misc b/changelog.d/17632.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17632.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17633.misc b/changelog.d/17633.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17633.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17634.misc b/changelog.d/17634.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17634.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17635.misc b/changelog.d/17635.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17635.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17636.misc b/changelog.d/17636.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17636.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17641.misc b/changelog.d/17641.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17641.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17643.misc b/changelog.d/17643.misc deleted file mode 100644 index f583cdcb38..0000000000 --- a/changelog.d/17643.misc +++ /dev/null @@ -1 +0,0 @@ -Replace `isort` and `black with `ruff`. diff --git a/changelog.d/17649.misc b/changelog.d/17649.misc deleted file mode 100644 index ed1bf6bd55..0000000000 --- a/changelog.d/17649.misc +++ /dev/null @@ -1 +0,0 @@ -Use new database tables for sliding sync. diff --git a/changelog.d/17650.removal b/changelog.d/17650.removal deleted file mode 100644 index 1238815c08..0000000000 --- a/changelog.d/17650.removal +++ /dev/null @@ -1 +0,0 @@ -Stabilise [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156) by removing the `msc4156_enabled` config setting and defaulting it to `true`. \ No newline at end of file diff --git a/changelog.d/17654.misc b/changelog.d/17654.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17654.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17655.misc b/changelog.d/17655.misc deleted file mode 100644 index ce997d3b41..0000000000 --- a/changelog.d/17655.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent duplicate tags being added to Sliding Sync traces. diff --git a/changelog.d/17658.misc b/changelog.d/17658.misc deleted file mode 100644 index 0bdbc1140d..0000000000 --- a/changelog.d/17658.misc +++ /dev/null @@ -1 +0,0 @@ -Get `bump_stamp` from [new sliding sync tables](https://github.com/element-hq/synapse/pull/17512) which should be faster. diff --git a/changelog.d/17665.misc b/changelog.d/17665.misc deleted file mode 100644 index 28921087a6..0000000000 --- a/changelog.d/17665.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up incremental Sliding Sync requests by avoiding extra work. diff --git a/changelog.d/17666.misc b/changelog.d/17666.misc deleted file mode 100644 index 3550679247..0000000000 --- a/changelog.d/17666.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement in speeding up sliding sync. diff --git a/changelog.d/17670.misc b/changelog.d/17670.misc deleted file mode 100644 index 3550679247..0000000000 --- a/changelog.d/17670.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement in speeding up sliding sync. diff --git a/changelog.d/17672.misc b/changelog.d/17672.misc deleted file mode 100644 index 3550679247..0000000000 --- a/changelog.d/17672.misc +++ /dev/null @@ -1 +0,0 @@ -Small performance improvement in speeding up sliding sync. diff --git a/changelog.d/17673.misc b/changelog.d/17673.misc deleted file mode 100644 index 756918e2b2..0000000000 --- a/changelog.d/17673.misc +++ /dev/null @@ -1 +0,0 @@ -Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. diff --git a/changelog.d/17674.bugfix b/changelog.d/17674.bugfix deleted file mode 100644 index bbef5005a1..0000000000 --- a/changelog.d/17674.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we returned the wrong `bump_stamp` for invites in sliding sync response, causing incorrect ordering of invites in the room list. diff --git a/changelog.d/17684.misc b/changelog.d/17684.misc deleted file mode 100644 index ecfb040a5f..0000000000 --- a/changelog.d/17684.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sliding sync by reducing number of database calls. diff --git a/changelog.d/17688.misc b/changelog.d/17688.misc deleted file mode 100644 index 7ba8d48fbe..0000000000 --- a/changelog.d/17688.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up sync by pulling out fewer events from the database. diff --git a/debian/changelog b/debian/changelog index dfb48edc49..51b082205d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.115.0~rc1) stable; urgency=medium + + * New Synapse release 1.115.0rc1. + + -- Synapse Packaging team Tue, 10 Sep 2024 08:39:09 -0600 + matrix-synapse-py3 (1.114.0) stable; urgency=medium * New Synapse release 1.114.0. diff --git a/pyproject.toml b/pyproject.toml index e93179c9e0..bd139e2834 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.114.0" +version = "1.115.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 5562a891689a1f5cb28874fea50407a507fc883f Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 10 Sep 2024 08:48:41 -0600 Subject: [PATCH 11/18] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 227129d00a..b86a4c310d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,7 +21,7 @@ ### Internal Changes -- MSC3861: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) +- Update [MSC3861](https://github.com/matrix-org/matrix-spec-proposals/pull/3861) implementation: load the issuer and account management URLs from OIDC discovery. ([\#17407](https://github.com/element-hq/synapse/issues/17407)) - Pre-populate room data used in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint for quick filtering/sorting. ([\#17512](https://github.com/element-hq/synapse/issues/17512), [\#17632](https://github.com/element-hq/synapse/issues/17632), [\#17633](https://github.com/element-hq/synapse/issues/17633), [\#17634](https://github.com/element-hq/synapse/issues/17634), [\#17635](https://github.com/element-hq/synapse/issues/17635), [\#17636](https://github.com/element-hq/synapse/issues/17636), [\#17641](https://github.com/element-hq/synapse/issues/17641), [\#17654](https://github.com/element-hq/synapse/issues/17654), [\#17673](https://github.com/element-hq/synapse/issues/17673)) - Store sliding sync per-connection state in the database. ([\#17599](https://github.com/element-hq/synapse/issues/17599), [\#17631](https://github.com/element-hq/synapse/issues/17631)) - Make the sliding sync `PerConnectionState` class immutable. ([\#17600](https://github.com/element-hq/synapse/issues/17600)) From 62523571ae9cfb3ecc0c5f7d4dbdd877c158e929 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:30:37 +0100 Subject: [PATCH 12/18] Bump serde from 1.0.209 to 1.0.210 (#17686) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18936ab843..98a2a6c3cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.209" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", From be603de2cbbfdbd056ac137180c0f465180064c4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:31:34 +0100 Subject: [PATCH 13/18] Bump serde_json from 1.0.127 to 1.0.128 (#17687) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98a2a6c3cd..586ac094a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.127" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", From 598a83d00500c2114d4e1db16ef0b413447f0f67 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 17:32:17 +0100 Subject: [PATCH 14/18] Bump cryptography from 43.0.0 to 43.0.1 (#17689) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 58 ++++++++++++++++++++++++++--------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3f2f815d8b..4c1756c78a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -357,38 +357,38 @@ files = [ [[package]] name = "cryptography" -version = "43.0.0" +version = "43.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:64c3f16e2a4fc51c0d06af28441881f98c5d91009b8caaff40cf3548089e9c74"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dcdedae5c7710b9f97ac6bba7e1052b95c7083c9d0e9df96e02a1932e777895"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d9a1eca329405219b605fac09ecfc09ac09e595d6def650a437523fcd08dd22"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ea9e57f8ea880eeea38ab5abf9fbe39f923544d7884228ec67d666abd60f5a47"}, - {file = "cryptography-43.0.0-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9a8d6802e0825767476f62aafed40532bd435e8a5f7d23bd8b4f5fd04cc80ecf"}, - {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:cc70b4b581f28d0a254d006f26949245e3657d40d8857066c2ae22a61222ef55"}, - {file = "cryptography-43.0.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4a997df8c1c2aae1e1e5ac49c2e4f610ad037fc5a3aadc7b64e39dea42249431"}, - {file = "cryptography-43.0.0-cp37-abi3-win32.whl", hash = "sha256:6e2b11c55d260d03a8cf29ac9b5e0608d35f08077d8c087be96287f43af3ccdc"}, - {file = "cryptography-43.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:31e44a986ceccec3d0498e16f3d27b2ee5fdf69ce2ab89b52eaad1d2f33d8778"}, - {file = "cryptography-43.0.0-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:7b3f5fe74a5ca32d4d0f302ffe6680fcc5c28f8ef0dc0ae8f40c0f3a1b4fca66"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac1955ce000cb29ab40def14fd1bbfa7af2017cca696ee696925615cafd0dce5"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:299d3da8e00b7e2b54bb02ef58d73cd5f55fb31f33ebbf33bd00d9aa6807df7e"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ee0c405832ade84d4de74b9029bedb7b31200600fa524d218fc29bfa371e97f5"}, - {file = "cryptography-43.0.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:cb013933d4c127349b3948aa8aaf2f12c0353ad0eccd715ca789c8a0f671646f"}, - {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fdcb265de28585de5b859ae13e3846a8e805268a823a12a4da2597f1f5afc9f0"}, - {file = "cryptography-43.0.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:2905ccf93a8a2a416f3ec01b1a7911c3fe4073ef35640e7ee5296754e30b762b"}, - {file = "cryptography-43.0.0-cp39-abi3-win32.whl", hash = "sha256:47ca71115e545954e6c1d207dd13461ab81f4eccfcb1345eac874828b5e3eaaf"}, - {file = "cryptography-43.0.0-cp39-abi3-win_amd64.whl", hash = "sha256:0663585d02f76929792470451a5ba64424acc3cd5227b03921dab0e2f27b1709"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2c6d112bf61c5ef44042c253e4859b3cbbb50df2f78fa8fae6747a7814484a70"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:844b6d608374e7d08f4f6e6f9f7b951f9256db41421917dfb2d003dde4cd6b66"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:51956cf8730665e2bdf8ddb8da0056f699c1a5715648c1b0144670c1ba00b48f"}, - {file = "cryptography-43.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:aae4d918f6b180a8ab8bf6511a419473d107df4dbb4225c7b48c5c9602c38c7f"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:232ce02943a579095a339ac4b390fbbe97f5b5d5d107f8a08260ea2768be8cc2"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5bcb8a5620008a8034d39bce21dc3e23735dfdb6a33a06974739bfa04f853947"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:08a24a7070b2b6804c1940ff0f910ff728932a9d0e80e7814234269f9d46d069"}, - {file = "cryptography-43.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e9c5266c432a1e23738d178e51c2c7a5e2ddf790f248be939448c0ba2021f9d1"}, - {file = "cryptography-43.0.0.tar.gz", hash = "sha256:b88075ada2d51aa9f18283532c9f60e72170041bba88d7f37e49cbb10275299e"}, + {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, + {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, + {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, + {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, + {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, + {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, + {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, ] [package.dependencies] @@ -401,7 +401,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.0)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] From 1b197752b63e6bb364d3a262abf8d4a71676217d Mon Sep 17 00:00:00 2001 From: Jeremy Wright Date: Tue, 10 Sep 2024 09:33:25 -0700 Subject: [PATCH 15/18] Fix minor misspelling in README.rst. (#17664) --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index d5625afe8f..2fe4a7e43f 100644 --- a/README.rst +++ b/README.rst @@ -158,7 +158,7 @@ it: We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to the public internet. Without it, anyone can freely register accounts on your homeserver. -This can be exploited by attackers to create spambots targetting the rest of the Matrix +This can be exploited by attackers to create spambots targeting the rest of the Matrix federation. Your new user name will be formed partly from the ``server_name``, and partly From 60441059a3bf4225a3bd16152b774aef14a797c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 10 Sep 2024 18:05:31 +0100 Subject: [PATCH 16/18] Bump anyhow from 1.0.86 to 1.0.87 (#17685) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 586ac094a3..f1edc21b5f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "10f00e1f6e58a40e807377c75c6a7f97bf9044fab57816f2414e6f5f4499d7b8" [[package]] name = "arc-swap" From e06e3c40047bb117c2f9f81b62be56a7ff5eb225 Mon Sep 17 00:00:00 2001 From: V02460 Date: Tue, 10 Sep 2024 19:27:46 +0200 Subject: [PATCH 17/18] Add config option turn_shared_secret_path (#17690) Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17690.feature | 1 + docs/usage/configuration/config_documentation.md | 16 ++++++++++++++++ synapse/config/voip.py | 14 +++++++++++++- 3 files changed, 30 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17690.feature diff --git a/changelog.d/17690.feature b/changelog.d/17690.feature new file mode 100644 index 0000000000..36c72f89f8 --- /dev/null +++ b/changelog.d/17690.feature @@ -0,0 +1 @@ +Add config option `turn_shared_secret_path`. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index c18f03d321..282b59dec9 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2315,6 +2315,22 @@ Example configuration: ```yaml turn_shared_secret: "YOUR_SHARED_SECRET" ``` +--- +### `turn_shared_secret_path` + +An alternative to [`turn_shared_secret`](#turn_shared_secret): +allows the shared secret to be specified in an external file. + +The file should be a plain text file, containing only the shared secret. +Synapse reads the shared secret from the given file once at startup. + +Example configuration: +```yaml +turn_shared_secret_path: /path/to/secrets/file +``` + +_Added in Synapse 1.116.0._ + --- ### `turn_username` and `turn_password` diff --git a/synapse/config/voip.py b/synapse/config/voip.py index 6fe43a9e32..8614a41dd4 100644 --- a/synapse/config/voip.py +++ b/synapse/config/voip.py @@ -23,7 +23,12 @@ from typing import Any from synapse.types import JsonDict -from ._base import Config +from ._base import Config, ConfigError, read_file + +CONFLICTING_SHARED_SECRET_OPTS_ERROR = """\ +You have configured both `turn_shared_secret` and `turn_shared_secret_path`. +These are mutually incompatible. +""" class VoipConfig(Config): @@ -32,6 +37,13 @@ class VoipConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.turn_uris = config.get("turn_uris", []) self.turn_shared_secret = config.get("turn_shared_secret") + turn_shared_secret_path = config.get("turn_shared_secret_path") + if turn_shared_secret_path: + if self.turn_shared_secret: + raise ConfigError(CONFLICTING_SHARED_SECRET_OPTS_ERROR) + self.turn_shared_secret = read_file( + turn_shared_secret_path, ("turn_shared_secret_path",) + ).strip() self.turn_username = config.get("turn_username") self.turn_password = config.get("turn_password") self.turn_user_lifetime = self.parse_duration( From a7fcac564848911a32d31865f7f259aa943629a8 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 10 Sep 2024 11:29:24 -0600 Subject: [PATCH 18/18] Enable guest access on new media endpoints, per MSC4189 (#17675) --- changelog.d/17675.feature | 1 + synapse/rest/client/media.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17675.feature diff --git a/changelog.d/17675.feature b/changelog.d/17675.feature new file mode 100644 index 0000000000..20db149ca8 --- /dev/null +++ b/changelog.d/17675.feature @@ -0,0 +1 @@ +Guests can use the new media endpoints to download media, as described by [MSC4189](https://github.com/matrix-org/matrix-spec-proposals/pull/4189). \ No newline at end of file diff --git a/synapse/rest/client/media.py b/synapse/rest/client/media.py index c30e3022de..25b302370f 100644 --- a/synapse/rest/client/media.py +++ b/synapse/rest/client/media.py @@ -138,7 +138,7 @@ class ThumbnailResource(RestServlet): ) -> None: # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) - await self.auth.get_user_by_req(request) + await self.auth.get_user_by_req(request, allow_guest=True) set_cors_headers(request) set_corp_headers(request) @@ -229,7 +229,7 @@ class DownloadResource(RestServlet): # Validate the server name, raising if invalid parse_and_validate_server_name(server_name) - await self.auth.get_user_by_req(request) + await self.auth.get_user_by_req(request, allow_guest=True) set_cors_headers(request) set_corp_headers(request)