mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-22 04:34:28 +03:00
Merge branch 'release-v1.102' into matrix-org-hotfixes
This commit is contained in:
commit
5a4b8b8456
13 changed files with 152 additions and 156 deletions
31
CHANGES.md
31
CHANGES.md
|
@ -1,3 +1,34 @@
|
||||||
|
# Synapse 1.102.0rc1 (2024-02-20)
|
||||||
|
|
||||||
|
### Features
|
||||||
|
|
||||||
|
- A metric was added for emails sent by Synapse, broken down by type: `synapse_emails_sent_total`. Contributed by Remi Rampin. ([\#16881](https://github.com/element-hq/synapse/issues/16881))
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- Do not send multiple concurrent requests for keys for the same server. ([\#16894](https://github.com/element-hq/synapse/issues/16894))
|
||||||
|
- Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0. ([\#16903](https://github.com/element-hq/synapse/issues/16903))
|
||||||
|
- Always prefer unthreaded receipt when >1 exist ([MSC4102](https://github.com/matrix-org/matrix-spec-proposals/pull/4102)). ([\#16927](https://github.com/element-hq/synapse/issues/16927))
|
||||||
|
|
||||||
|
### Improved Documentation
|
||||||
|
|
||||||
|
- Fix a small typo in the Rooms section of the Admin API documentation. Contributed by @RainerZufall187. ([\#16857](https://github.com/element-hq/synapse/issues/16857))
|
||||||
|
|
||||||
|
### Internal Changes
|
||||||
|
|
||||||
|
- Don't invalidate the entire event cache when we purge history. ([\#16905](https://github.com/element-hq/synapse/issues/16905))
|
||||||
|
- Add experimental config option to not send device list updates for specific users. ([\#16909](https://github.com/element-hq/synapse/issues/16909))
|
||||||
|
- Fix incorrect docker hub link in release script. ([\#16910](https://github.com/element-hq/synapse/issues/16910))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### Updates to locked dependencies
|
||||||
|
|
||||||
|
* Bump attrs from 23.1.0 to 23.2.0. ([\#16899](https://github.com/element-hq/synapse/issues/16899))
|
||||||
|
* Bump bcrypt from 4.0.1 to 4.1.2. ([\#16900](https://github.com/element-hq/synapse/issues/16900))
|
||||||
|
* Bump pygithub from 2.1.1 to 2.2.0. ([\#16902](https://github.com/element-hq/synapse/issues/16902))
|
||||||
|
* Bump sentry-sdk from 1.40.0 to 1.40.3. ([\#16898](https://github.com/element-hq/synapse/issues/16898))
|
||||||
|
|
||||||
# Synapse 1.101.0 (2024-02-13)
|
# Synapse 1.101.0 (2024-02-13)
|
||||||
|
|
||||||
### Bugfixes
|
### Bugfixes
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
Fix a small typo in the Rooms section of the Admin API documentation. Contributed by @RainerZufall187.
|
|
|
@ -1 +0,0 @@
|
||||||
A metric was added for emails sent by Synapse, broken down by type: `synapse_emails_sent_total`. Contributed by Remi Rampin.
|
|
|
@ -1 +0,0 @@
|
||||||
Do not send multiple concurrent requests for keys for the same server.
|
|
|
@ -1 +0,0 @@
|
||||||
Fix performance issue when joining very large rooms that can cause the server to lock up. Introduced in v1.100.0.
|
|
|
@ -1 +0,0 @@
|
||||||
Don't invalidate the entire event cache when we purge history.
|
|
|
@ -1 +0,0 @@
|
||||||
Add experimental config option to not send device list updates for specific users.
|
|
|
@ -1 +0,0 @@
|
||||||
Fix incorrect docker hub link in release script.
|
|
|
@ -1 +0,0 @@
|
||||||
Always prefer unthreaded receipt when >1 exist ([MSC4102](https://github.com/matrix-org/matrix-spec-proposals/pull/4102)).
|
|
6
debian/changelog
vendored
6
debian/changelog
vendored
|
@ -1,3 +1,9 @@
|
||||||
|
matrix-synapse-py3 (1.102.0~rc1) stable; urgency=medium
|
||||||
|
|
||||||
|
* New Synapse release 1.102.0rc1.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 20 Feb 2024 15:50:36 +0000
|
||||||
|
|
||||||
matrix-synapse-py3 (1.101.0) stable; urgency=medium
|
matrix-synapse-py3 (1.101.0) stable; urgency=medium
|
||||||
|
|
||||||
* New Synapse release 1.101.0.
|
* New Synapse release 1.101.0.
|
||||||
|
|
|
@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
|
||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "matrix-synapse"
|
name = "matrix-synapse"
|
||||||
version = "1.101.0"
|
version = "1.102.0rc1"
|
||||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||||
license = "AGPL-3.0-or-later"
|
license = "AGPL-3.0-or-later"
|
||||||
|
|
|
@ -29,11 +29,17 @@ from synapse.storage.databases.main import DataStore
|
||||||
|
|
||||||
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
|
async def get_badge_count(store: DataStore, user_id: str, group_by_room: bool) -> int:
|
||||||
invites = await store.get_invited_rooms_for_local_user(user_id)
|
invites = await store.get_invited_rooms_for_local_user(user_id)
|
||||||
|
joins = await store.get_rooms_for_user(user_id)
|
||||||
|
|
||||||
badge = len(invites)
|
badge = len(invites)
|
||||||
|
|
||||||
room_to_count = await store.get_unread_counts_by_room_for_user(user_id)
|
room_to_count = await store.get_unread_counts_by_room_for_user(user_id)
|
||||||
for _room_id, notify_count in room_to_count.items():
|
for room_id, notify_count in room_to_count.items():
|
||||||
|
# room_to_count may include rooms which the user has left,
|
||||||
|
# ignore those.
|
||||||
|
if room_id not in joins:
|
||||||
|
continue
|
||||||
|
|
||||||
if notify_count == 0:
|
if notify_count == 0:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -358,6 +358,10 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||||
This function is intentionally not cached because it is called to calculate the
|
This function is intentionally not cached because it is called to calculate the
|
||||||
unread badge for push notifications and thus the result is expected to change.
|
unread badge for push notifications and thus the result is expected to change.
|
||||||
|
|
||||||
|
Note that this function assumes the user is a member of the room. Because
|
||||||
|
summary rows are not removed when a user leaves a room, the caller must
|
||||||
|
filter out those results from the result.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A map of room ID to notification counts for the given user.
|
A map of room ID to notification counts for the given user.
|
||||||
"""
|
"""
|
||||||
|
@ -370,170 +374,127 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
||||||
def _get_unread_counts_by_room_for_user_txn(
|
def _get_unread_counts_by_room_for_user_txn(
|
||||||
self, txn: LoggingTransaction, user_id: str
|
self, txn: LoggingTransaction, user_id: str
|
||||||
) -> Dict[str, int]:
|
) -> Dict[str, int]:
|
||||||
# To get the badge count of all rooms we need to make three queries:
|
receipt_types_clause, args = make_in_list_sql_clause(
|
||||||
# 1. Fetch all counts from `event_push_summary`, discarding any stale
|
|
||||||
# rooms.
|
|
||||||
# 2. Fetch all notifications from `event_push_actions` that haven't
|
|
||||||
# been rotated yet.
|
|
||||||
# 3. Fetch all notifications from `event_push_actions` for the stale
|
|
||||||
# rooms.
|
|
||||||
#
|
|
||||||
# The "stale room" scenario generally happens when there is a new read
|
|
||||||
# receipt that hasn't yet been processed to update the
|
|
||||||
# `event_push_summary` table. When that happens we ignore the
|
|
||||||
# `event_push_summary` table for that room and calculate the count
|
|
||||||
# manually from `event_push_actions`.
|
|
||||||
|
|
||||||
# We need to only take into account read receipts of these types.
|
|
||||||
receipt_types_clause, receipt_types_args = make_in_list_sql_clause(
|
|
||||||
self.database_engine,
|
self.database_engine,
|
||||||
"receipt_type",
|
"receipt_type",
|
||||||
(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
|
(ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE),
|
||||||
)
|
)
|
||||||
|
args.extend([user_id, user_id])
|
||||||
|
|
||||||
# Step 1, fetch all counts from `event_push_summary` for the user. This
|
receipts_cte = f"""
|
||||||
# is slightly convoluted as we also need to pull out the stream ordering
|
WITH all_receipts AS (
|
||||||
# of the most recent receipt of the user in the room (either a thread
|
SELECT room_id, thread_id, MAX(event_stream_ordering) AS max_receipt_stream_ordering
|
||||||
# aware receipt or thread unaware receipt) in order to determine
|
FROM receipts_linearized
|
||||||
# whether the row in `event_push_summary` is stale. Hence the outer
|
LEFT JOIN events USING (room_id, event_id)
|
||||||
# GROUP BY and odd join condition against `receipts_linearized`.
|
WHERE
|
||||||
sql = f"""
|
{receipt_types_clause}
|
||||||
SELECT room_id, notif_count, stream_ordering, thread_id, last_receipt_stream_ordering,
|
AND user_id = ?
|
||||||
MAX(receipt_stream_ordering)
|
GROUP BY room_id, thread_id
|
||||||
FROM (
|
)
|
||||||
SELECT e.room_id, notif_count, e.stream_ordering, e.thread_id, last_receipt_stream_ordering,
|
|
||||||
ev.stream_ordering AS receipt_stream_ordering
|
|
||||||
FROM event_push_summary AS e
|
|
||||||
INNER JOIN local_current_membership USING (user_id, room_id)
|
|
||||||
LEFT JOIN receipts_linearized AS r ON (
|
|
||||||
e.user_id = r.user_id
|
|
||||||
AND e.room_id = r.room_id
|
|
||||||
AND (e.thread_id = r.thread_id OR r.thread_id IS NULL)
|
|
||||||
AND {receipt_types_clause}
|
|
||||||
)
|
|
||||||
LEFT JOIN events AS ev ON (r.event_id = ev.event_id)
|
|
||||||
WHERE e.user_id = ? and notif_count > 0
|
|
||||||
) AS es
|
|
||||||
GROUP BY room_id, notif_count, stream_ordering, thread_id, last_receipt_stream_ordering
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
txn.execute(
|
receipts_joins = """
|
||||||
sql,
|
LEFT JOIN (
|
||||||
receipt_types_args
|
SELECT room_id, thread_id,
|
||||||
+ [
|
max_receipt_stream_ordering AS threaded_receipt_stream_ordering
|
||||||
user_id,
|
FROM all_receipts
|
||||||
],
|
WHERE thread_id IS NOT NULL
|
||||||
)
|
) AS threaded_receipts USING (room_id, thread_id)
|
||||||
|
LEFT JOIN (
|
||||||
|
SELECT room_id, thread_id,
|
||||||
|
max_receipt_stream_ordering AS unthreaded_receipt_stream_ordering
|
||||||
|
FROM all_receipts
|
||||||
|
WHERE thread_id IS NULL
|
||||||
|
) AS unthreaded_receipts USING (room_id)
|
||||||
|
"""
|
||||||
|
|
||||||
|
# First get summary counts by room / thread for the user. We use the max receipt
|
||||||
|
# stream ordering of both threaded & unthreaded receipts to compare against the
|
||||||
|
# summary table.
|
||||||
|
#
|
||||||
|
# PostgreSQL and SQLite differ in comparing scalar numerics.
|
||||||
|
if isinstance(self.database_engine, PostgresEngine):
|
||||||
|
# GREATEST ignores NULLs.
|
||||||
|
max_clause = """GREATEST(
|
||||||
|
threaded_receipt_stream_ordering,
|
||||||
|
unthreaded_receipt_stream_ordering
|
||||||
|
)"""
|
||||||
|
else:
|
||||||
|
# MAX returns NULL if any are NULL, so COALESCE to 0 first.
|
||||||
|
max_clause = """MAX(
|
||||||
|
COALESCE(threaded_receipt_stream_ordering, 0),
|
||||||
|
COALESCE(unthreaded_receipt_stream_ordering, 0)
|
||||||
|
)"""
|
||||||
|
|
||||||
|
sql = f"""
|
||||||
|
{receipts_cte}
|
||||||
|
SELECT eps.room_id, eps.thread_id, notif_count
|
||||||
|
FROM event_push_summary AS eps
|
||||||
|
{receipts_joins}
|
||||||
|
WHERE user_id = ?
|
||||||
|
AND notif_count != 0
|
||||||
|
AND (
|
||||||
|
(last_receipt_stream_ordering IS NULL AND stream_ordering > {max_clause})
|
||||||
|
OR last_receipt_stream_ordering = {max_clause}
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
txn.execute(sql, args)
|
||||||
|
|
||||||
|
seen_thread_ids = set()
|
||||||
room_to_count: Dict[str, int] = defaultdict(int)
|
room_to_count: Dict[str, int] = defaultdict(int)
|
||||||
stale_room_ids = set()
|
|
||||||
for row in txn:
|
|
||||||
room_id = row[0]
|
|
||||||
notif_count = row[1]
|
|
||||||
stream_ordering = row[2]
|
|
||||||
_thread_id = row[3]
|
|
||||||
last_receipt_stream_ordering = row[4]
|
|
||||||
receipt_stream_ordering = row[5]
|
|
||||||
|
|
||||||
if last_receipt_stream_ordering is None:
|
for room_id, thread_id, notif_count in txn:
|
||||||
if receipt_stream_ordering is None:
|
room_to_count[room_id] += notif_count
|
||||||
room_to_count[room_id] += notif_count
|
seen_thread_ids.add(thread_id)
|
||||||
elif stream_ordering > receipt_stream_ordering:
|
|
||||||
room_to_count[room_id] += notif_count
|
|
||||||
else:
|
|
||||||
# The latest read receipt from the user is after all the rows for
|
|
||||||
# this room in `event_push_summary`. We ignore them, and
|
|
||||||
# calculate the count from `event_push_actions` in step 3.
|
|
||||||
pass
|
|
||||||
elif last_receipt_stream_ordering == receipt_stream_ordering:
|
|
||||||
room_to_count[room_id] += notif_count
|
|
||||||
else:
|
|
||||||
# The row is stale if `last_receipt_stream_ordering` is set and
|
|
||||||
# *doesn't* match the latest receipt from the user.
|
|
||||||
stale_room_ids.add(room_id)
|
|
||||||
|
|
||||||
# Discard any stale rooms from `room_to_count`, as we will recalculate
|
# Now get any event push actions that haven't been rotated using the same OR
|
||||||
# them in step 3.
|
# join and filter by receipt and event push summary rotated up to stream ordering.
|
||||||
for room_id in stale_room_ids:
|
sql = f"""
|
||||||
room_to_count.pop(room_id, None)
|
{receipts_cte}
|
||||||
|
SELECT epa.room_id, epa.thread_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
|
||||||
|
FROM event_push_actions AS epa
|
||||||
|
{receipts_joins}
|
||||||
|
WHERE user_id = ?
|
||||||
|
AND epa.notif = 1
|
||||||
|
AND stream_ordering > (SELECT stream_ordering FROM event_push_summary_stream_ordering)
|
||||||
|
AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
|
||||||
|
AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
|
||||||
|
GROUP BY epa.room_id, epa.thread_id
|
||||||
|
"""
|
||||||
|
txn.execute(sql, args)
|
||||||
|
|
||||||
# Step 2, basically the same query, except against `event_push_actions`
|
for room_id, thread_id, notif_count in txn:
|
||||||
# and only fetching rows inserted since the last rotation.
|
# Note: only count push actions we have valid summaries for with up to date receipt.
|
||||||
rotated_upto_stream_ordering = self.db_pool.simple_select_one_onecol_txn(
|
if thread_id not in seen_thread_ids:
|
||||||
txn,
|
continue
|
||||||
table="event_push_summary_stream_ordering",
|
room_to_count[room_id] += notif_count
|
||||||
keyvalues={},
|
|
||||||
retcol="stream_ordering",
|
thread_id_clause, thread_ids_args = make_in_list_sql_clause(
|
||||||
|
self.database_engine, "epa.thread_id", seen_thread_ids
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Finally re-check event_push_actions for any rooms not in the summary, ignoring
|
||||||
|
# the rotated up-to position. This handles the case where a read receipt has arrived
|
||||||
|
# but not been rotated meaning the summary table is out of date, so we go back to
|
||||||
|
# the push actions table.
|
||||||
sql = f"""
|
sql = f"""
|
||||||
SELECT room_id, thread_id
|
{receipts_cte}
|
||||||
FROM (
|
SELECT epa.room_id, COUNT(CASE WHEN epa.notif = 1 THEN 1 END) AS notif_count
|
||||||
SELECT e.room_id, e.stream_ordering, e.thread_id,
|
FROM event_push_actions AS epa
|
||||||
ev.stream_ordering AS receipt_stream_ordering
|
{receipts_joins}
|
||||||
FROM event_push_actions AS e
|
WHERE user_id = ?
|
||||||
INNER JOIN local_current_membership USING (user_id, room_id)
|
AND NOT {thread_id_clause}
|
||||||
LEFT JOIN receipts_linearized AS r ON (
|
AND epa.notif = 1
|
||||||
e.user_id = r.user_id
|
AND (threaded_receipt_stream_ordering IS NULL OR stream_ordering > threaded_receipt_stream_ordering)
|
||||||
AND e.room_id = r.room_id
|
AND (unthreaded_receipt_stream_ordering IS NULL OR stream_ordering > unthreaded_receipt_stream_ordering)
|
||||||
AND (e.thread_id = r.thread_id OR r.thread_id IS NULL)
|
GROUP BY epa.room_id
|
||||||
AND {receipt_types_clause}
|
|
||||||
)
|
|
||||||
LEFT JOIN events AS ev ON (r.event_id = ev.event_id)
|
|
||||||
WHERE e.user_id = ? and notif > 0
|
|
||||||
AND e.stream_ordering > ?
|
|
||||||
) AS es
|
|
||||||
GROUP BY room_id, stream_ordering, thread_id
|
|
||||||
HAVING stream_ordering > COALESCE(MAX(receipt_stream_ordering), 0)
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
txn.execute(
|
args.extend(thread_ids_args)
|
||||||
sql,
|
txn.execute(sql, args)
|
||||||
receipt_types_args + [user_id, rotated_upto_stream_ordering],
|
|
||||||
)
|
|
||||||
for room_id, _thread_id in txn:
|
|
||||||
# Again, we ignore any stale rooms.
|
|
||||||
if room_id not in stale_room_ids:
|
|
||||||
# For event push actions it is one notification per row.
|
|
||||||
room_to_count[room_id] += 1
|
|
||||||
|
|
||||||
# Step 3, if we have stale rooms then we need to recalculate the counts
|
for room_id, notif_count in txn:
|
||||||
# from `event_push_actions`. Again, this is basically the same query as
|
room_to_count[room_id] += notif_count
|
||||||
# above except without a lower bound on stream ordering and only against
|
|
||||||
# a specific set of rooms.
|
|
||||||
if stale_room_ids:
|
|
||||||
room_id_clause, room_id_args = make_in_list_sql_clause(
|
|
||||||
self.database_engine,
|
|
||||||
"e.room_id",
|
|
||||||
stale_room_ids,
|
|
||||||
)
|
|
||||||
|
|
||||||
sql = f"""
|
|
||||||
SELECT room_id, thread_id
|
|
||||||
FROM (
|
|
||||||
SELECT e.room_id, e.stream_ordering, e.thread_id,
|
|
||||||
ev.stream_ordering AS receipt_stream_ordering
|
|
||||||
FROM event_push_actions AS e
|
|
||||||
INNER JOIN local_current_membership USING (user_id, room_id)
|
|
||||||
LEFT JOIN receipts_linearized AS r ON (
|
|
||||||
e.user_id = r.user_id
|
|
||||||
AND e.room_id = r.room_id
|
|
||||||
AND (e.thread_id = r.thread_id OR r.thread_id IS NULL)
|
|
||||||
AND {receipt_types_clause}
|
|
||||||
)
|
|
||||||
LEFT JOIN events AS ev ON (r.event_id = ev.event_id)
|
|
||||||
WHERE e.user_id = ? and notif > 0
|
|
||||||
AND {room_id_clause}
|
|
||||||
) AS es
|
|
||||||
GROUP BY room_id, stream_ordering, thread_id
|
|
||||||
HAVING stream_ordering > COALESCE(MAX(receipt_stream_ordering), 0)
|
|
||||||
"""
|
|
||||||
txn.execute(
|
|
||||||
sql,
|
|
||||||
receipt_types_args + [user_id] + room_id_args,
|
|
||||||
)
|
|
||||||
for room_id, _ in txn:
|
|
||||||
room_to_count[room_id] += 1
|
|
||||||
|
|
||||||
return room_to_count
|
return room_to_count
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue