mirror of
https://github.com/element-hq/synapse.git
synced 2024-11-24 10:35:46 +03:00
Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes
This commit is contained in:
commit
63439300cd
29 changed files with 395 additions and 42 deletions
|
@ -1,3 +1,10 @@
|
|||
# Synapse 1.116.0 (2024-10-01)
|
||||
|
||||
No significant changes since 1.116.0rc2.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.116.0rc2 (2024-09-26)
|
||||
|
||||
### Features
|
||||
|
|
1
changelog.d/17628.doc
Normal file
1
changelog.d/17628.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Clarify the docstring of `test_forget_when_not_left`.
|
1
changelog.d/17709.doc
Normal file
1
changelog.d/17709.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Add documentation note about PYTHONMALLOC for accurate jemalloc memory tracking. Contributed by @hensg.
|
1
changelog.d/17717.feature
Normal file
1
changelog.d/17717.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add config option `redis.password_path`.
|
1
changelog.d/17776.doc
Normal file
1
changelog.d/17776.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Explain how load balancing works for `federation_sender_instances`.
|
1
changelog.d/17779.bugfix
Normal file
1
changelog.d/17779.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix a rare bug introduced in v1.29.0 where invalidating a user's access token from a worker could raise an error.
|
1
changelog.d/17780.bugfix
Normal file
1
changelog.d/17780.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
In the response to `GET /_matrix/client/versions`, set the `unstable_features` flag for MSC4140 to `false` when server configuration disables support for delayed events.
|
1
changelog.d/17787.misc
Normal file
1
changelog.d/17787.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Sliding sync minor performance speed up using new table.
|
1
changelog.d/17788.misc
Normal file
1
changelog.d/17788.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Sliding sync minor performance improvement by omitting unchanged data from incremental responses.
|
1
changelog.d/17789.misc
Normal file
1
changelog.d/17789.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Speed up sliding sync when there are many active subscriptions.
|
6
debian/changelog
vendored
6
debian/changelog
vendored
|
@ -1,3 +1,9 @@
|
|||
matrix-synapse-py3 (1.116.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.116.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 01 Oct 2024 11:14:07 +0100
|
||||
|
||||
matrix-synapse-py3 (1.116.0~rc2) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.116.0rc2.
|
||||
|
|
|
@ -255,6 +255,8 @@ line to `/etc/default/matrix-synapse`:
|
|||
|
||||
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.2
|
||||
|
||||
*Note*: You may need to set `PYTHONMALLOC=malloc` to ensure that `jemalloc` can accurately calculate memory usage. By default, Python uses its internal small-object allocator, which may interfere with jemalloc's ability to track memory consumption correctly. This could prevent the [cache_autotuning](../configuration/config_documentation.md#caches-and-associated-values) feature from functioning as expected, as the Python allocator may not reach the memory threshold set by `max_cache_memory_usage`, thus not triggering the cache eviction process.
|
||||
|
||||
This made a significant difference on Python 2.7 - it's unclear how
|
||||
much of an improvement it provides on Python 3.x.
|
||||
|
||||
|
|
|
@ -4370,6 +4370,12 @@ a `federation_sender_instances` map. Doing so will remove handling of this funct
|
|||
the main process. Multiple workers can be added to this map, in which case the work is
|
||||
balanced across them.
|
||||
|
||||
The way that the load balancing works is any outbound federation request will be assigned
|
||||
to a federation sender worker based on the hash of the destination server name. This
|
||||
means that all requests being sent to the same destination will be processed by the same
|
||||
worker instance. Multiple `federation_sender_instances` are useful if there is a federation
|
||||
with multiple servers.
|
||||
|
||||
This configuration setting must be shared between all workers handling federation
|
||||
sending, and if changed all federation sender workers must be stopped at the same time
|
||||
and then started, to ensure that all instances are running with the same config (otherwise
|
||||
|
@ -4518,6 +4524,9 @@ This setting has the following sub-options:
|
|||
* `path`: The full path to a local Unix socket file. **If this is used, `host` and
|
||||
`port` are ignored.** Defaults to `/tmp/redis.sock'
|
||||
* `password`: Optional password if configured on the Redis instance.
|
||||
* `password_path`: Alternative to `password`, reading the password from an
|
||||
external file. The file should be a plain text file, containing only the
|
||||
password. Synapse reads the password from the given file once at startup.
|
||||
* `dbid`: Optional redis dbid if needs to connect to specific redis logical db.
|
||||
* `use_tls`: Whether to use tls connection. Defaults to false.
|
||||
* `certificate_file`: Optional path to the certificate file
|
||||
|
@ -4531,13 +4540,16 @@ This setting has the following sub-options:
|
|||
|
||||
_Changed in Synapse 1.85.0: Added path option to use a local Unix socket_
|
||||
|
||||
_Changed in Synapse 1.116.0: Added password\_path_
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
redis:
|
||||
enabled: true
|
||||
host: localhost
|
||||
port: 6379
|
||||
password: <secret_password>
|
||||
password_path: <path_to_the_password_file>
|
||||
# OR password: <secret_password>
|
||||
dbid: <dbid>
|
||||
#use_tls: True
|
||||
#certificate_file: <path_to_the_certificate_file>
|
||||
|
|
6
poetry.lock
generated
6
poetry.lock
generated
|
@ -1974,13 +1974,13 @@ six = ">=1.5"
|
|||
|
||||
[[package]]
|
||||
name = "python-multipart"
|
||||
version = "0.0.10"
|
||||
version = "0.0.12"
|
||||
description = "A streaming multipart parser for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "python_multipart-0.0.10-py3-none-any.whl", hash = "sha256:2b06ad9e8d50c7a8db80e3b56dab590137b323410605af2be20d62a5f1ba1dc8"},
|
||||
{file = "python_multipart-0.0.10.tar.gz", hash = "sha256:46eb3c6ce6fdda5fb1a03c7e11d490e407c6930a2703fe7aef4da71c374688fa"},
|
||||
{file = "python_multipart-0.0.12-py3-none-any.whl", hash = "sha256:43dcf96cf65888a9cd3423544dd0d75ac10f7aa0c3c28a175bbcd00c9ce1aebf"},
|
||||
{file = "python_multipart-0.0.12.tar.gz", hash = "sha256:045e1f98d719c1ce085ed7f7e1ef9d8ccc8c02ba02b5566d5f7521410ced58cb"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
|
@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.116.0rc2"
|
||||
version = "1.116.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
|
|
@ -21,10 +21,15 @@
|
|||
|
||||
from typing import Any
|
||||
|
||||
from synapse.config._base import Config
|
||||
from synapse.config._base import Config, ConfigError, read_file
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util.check_dependencies import check_requirements
|
||||
|
||||
CONFLICTING_PASSWORD_OPTS_ERROR = """\
|
||||
You have configured both `redis.password` and `redis.password_path`.
|
||||
These are mutually incompatible.
|
||||
"""
|
||||
|
||||
|
||||
class RedisConfig(Config):
|
||||
section = "redis"
|
||||
|
@ -43,6 +48,17 @@ class RedisConfig(Config):
|
|||
self.redis_path = redis_config.get("path", None)
|
||||
self.redis_dbid = redis_config.get("dbid", None)
|
||||
self.redis_password = redis_config.get("password")
|
||||
redis_password_path = redis_config.get("password_path")
|
||||
if redis_password_path:
|
||||
if self.redis_password:
|
||||
raise ConfigError(CONFLICTING_PASSWORD_OPTS_ERROR)
|
||||
self.redis_password = read_file(
|
||||
redis_password_path,
|
||||
(
|
||||
"redis",
|
||||
"password_path",
|
||||
),
|
||||
).strip()
|
||||
|
||||
self.redis_use_tls = redis_config.get("use_tls", False)
|
||||
self.redis_certificate = redis_config.get("certificate_file", None)
|
||||
|
|
|
@ -49,6 +49,7 @@ from synapse.types import (
|
|||
Requester,
|
||||
SlidingSyncStreamToken,
|
||||
StateMap,
|
||||
StrCollection,
|
||||
StreamKeyType,
|
||||
StreamToken,
|
||||
)
|
||||
|
@ -293,7 +294,6 @@ class SlidingSyncHandler:
|
|||
# to record rooms as having updates even if there might not actually
|
||||
# be anything new for the user (e.g. due to event filters, events
|
||||
# having happened after the user left, etc).
|
||||
unsent_room_ids = []
|
||||
if from_token:
|
||||
# The set of rooms that the client (may) care about, but aren't
|
||||
# in any list range (or subscribed to).
|
||||
|
@ -305,6 +305,15 @@ class SlidingSyncHandler:
|
|||
# TODO: Replace this with something faster. When we land the
|
||||
# sliding sync tables that record the most recent event
|
||||
# positions we can use that.
|
||||
unsent_room_ids: StrCollection
|
||||
if await self.store.have_finished_sliding_sync_background_jobs():
|
||||
unsent_room_ids = await (
|
||||
self.store.get_rooms_that_have_updates_since_sliding_sync_table(
|
||||
room_ids=missing_rooms,
|
||||
from_key=from_token.stream_token.room_key,
|
||||
)
|
||||
)
|
||||
else:
|
||||
missing_event_map_by_room = (
|
||||
await self.store.get_room_events_stream_for_rooms(
|
||||
room_ids=missing_rooms,
|
||||
|
@ -1048,22 +1057,42 @@ class SlidingSyncHandler:
|
|||
)
|
||||
)
|
||||
|
||||
# Figure out the last bump event in the room
|
||||
#
|
||||
# By default, just choose the membership event position for any non-join membership
|
||||
bump_stamp = room_membership_for_user_at_to_token.event_pos.stream
|
||||
# Figure out the last bump event in the room. If the bump stamp hasn't
|
||||
# changed we omit it from the response.
|
||||
bump_stamp = None
|
||||
|
||||
always_return_bump_stamp = (
|
||||
# We use the membership event position for any non-join
|
||||
room_membership_for_user_at_to_token.membership != Membership.JOIN
|
||||
# We didn't fetch any timeline events but we should still check for
|
||||
# a bump_stamp that might be somewhere
|
||||
or limited is None
|
||||
# There might be a bump event somewhere before the timeline events
|
||||
# that we fetched, that we didn't previously send down
|
||||
or limited is True
|
||||
# Always give the client some frame of reference if this is the
|
||||
# first time they are seeing the room down the connection
|
||||
or initial
|
||||
)
|
||||
|
||||
# If we're joined to the room, we need to find the last bump event before the
|
||||
# `to_token`
|
||||
if room_membership_for_user_at_to_token.membership == Membership.JOIN:
|
||||
# Try and get a bump stamp, if not we just fall back to the
|
||||
# membership token.
|
||||
# Try and get a bump stamp
|
||||
new_bump_stamp = await self._get_bump_stamp(
|
||||
room_id, to_token, timeline_events
|
||||
room_id,
|
||||
to_token,
|
||||
timeline_events,
|
||||
check_outside_timeline=always_return_bump_stamp,
|
||||
)
|
||||
if new_bump_stamp is not None:
|
||||
bump_stamp = new_bump_stamp
|
||||
|
||||
if bump_stamp < 0:
|
||||
if bump_stamp is None and always_return_bump_stamp:
|
||||
# By default, just choose the membership event position for any non-join membership
|
||||
bump_stamp = room_membership_for_user_at_to_token.event_pos.stream
|
||||
|
||||
if bump_stamp is not None and bump_stamp < 0:
|
||||
# We never want to send down negative stream orderings, as you can't
|
||||
# sensibly compare positive and negative stream orderings (they have
|
||||
# different meanings).
|
||||
|
@ -1156,14 +1185,23 @@ class SlidingSyncHandler:
|
|||
|
||||
@trace
|
||||
async def _get_bump_stamp(
|
||||
self, room_id: str, to_token: StreamToken, timeline: List[EventBase]
|
||||
self,
|
||||
room_id: str,
|
||||
to_token: StreamToken,
|
||||
timeline: List[EventBase],
|
||||
check_outside_timeline: bool,
|
||||
) -> Optional[int]:
|
||||
"""Get a bump stamp for the room, if we have a bump event
|
||||
"""Get a bump stamp for the room, if we have a bump event and it has
|
||||
changed.
|
||||
|
||||
Args:
|
||||
room_id
|
||||
to_token: The upper bound of token to return
|
||||
timeline: The list of events we have fetched.
|
||||
limited: If the timeline was limited.
|
||||
check_outside_timeline: Whether we need to check for bump stamp for
|
||||
events before the timeline if we didn't find a bump stamp in
|
||||
the timeline events.
|
||||
"""
|
||||
|
||||
# First check the timeline events we're returning to see if one of
|
||||
|
@ -1183,6 +1221,11 @@ class SlidingSyncHandler:
|
|||
if new_bump_stamp > 0:
|
||||
return new_bump_stamp
|
||||
|
||||
if not check_outside_timeline:
|
||||
# If we are not a limited sync, then we know the bump stamp can't
|
||||
# have changed.
|
||||
return None
|
||||
|
||||
# We can quickly query for the latest bump event in the room using the
|
||||
# sliding sync tables.
|
||||
latest_room_bump_stamp = await self.store.get_latest_bump_stamp_for_room(
|
||||
|
|
|
@ -500,6 +500,16 @@ class SlidingSyncRoomLists:
|
|||
# depending on the `required_state` requested (see below).
|
||||
partial_state_rooms = await self.store.get_partial_rooms()
|
||||
|
||||
# Fetch any rooms that we have not already fetched from the database.
|
||||
subscription_sliding_sync_rooms = (
|
||||
await self.store.get_sliding_sync_room_for_user_batch(
|
||||
user_id,
|
||||
sync_config.room_subscriptions.keys()
|
||||
- room_membership_for_user_map.keys(),
|
||||
)
|
||||
)
|
||||
room_membership_for_user_map.update(subscription_sliding_sync_rooms)
|
||||
|
||||
for (
|
||||
room_id,
|
||||
room_subscription,
|
||||
|
@ -507,17 +517,11 @@ class SlidingSyncRoomLists:
|
|||
# Check if we have a membership for the room, but didn't pull it out
|
||||
# above. This could be e.g. a leave that we don't pull out by
|
||||
# default.
|
||||
current_room_entry = (
|
||||
await self.store.get_sliding_sync_room_for_user(
|
||||
user_id, room_id
|
||||
)
|
||||
)
|
||||
current_room_entry = room_membership_for_user_map.get(room_id)
|
||||
if not current_room_entry:
|
||||
# TODO: Handle rooms the user isn't in.
|
||||
continue
|
||||
|
||||
room_membership_for_user_map[room_id] = current_room_entry
|
||||
|
||||
all_rooms.add(room_id)
|
||||
|
||||
# Take the superset of the `RoomSyncConfig` for each room.
|
||||
|
|
|
@ -1039,7 +1039,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
|||
self.deferred = deferred
|
||||
self.boundary = boundary
|
||||
self.max_length = max_length
|
||||
self.parser = None
|
||||
self.parser: Optional[multipart.MultipartParser] = None
|
||||
self.multipart_response = MultipartResponse()
|
||||
self.has_redirect = False
|
||||
self.in_json = False
|
||||
|
@ -1097,7 +1097,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
|||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks = {
|
||||
callbacks: "multipart.multipart.MultipartCallbacks" = {
|
||||
"on_header_field": on_header_field,
|
||||
"on_header_value": on_header_value,
|
||||
"on_part_data": on_part_data,
|
||||
|
@ -1113,7 +1113,7 @@ class _MultipartParserProtocol(protocol.Protocol):
|
|||
self.transport.abortConnection()
|
||||
|
||||
try:
|
||||
self.parser.write(incoming_data) # type: ignore[attr-defined]
|
||||
self.parser.write(incoming_data)
|
||||
except Exception as e:
|
||||
logger.warning(f"Exception writing to multipart parser: {e}")
|
||||
self.deferred.errback()
|
||||
|
|
|
@ -48,7 +48,7 @@ class ReplicationRemovePusherRestServlet(ReplicationEndpoint):
|
|||
|
||||
"""
|
||||
|
||||
NAME = "add_user_account_data"
|
||||
NAME = "remove_pusher"
|
||||
PATH_ARGS = ("user_id",)
|
||||
CACHE = False
|
||||
|
||||
|
|
|
@ -1010,11 +1010,13 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
serialized_rooms: Dict[str, JsonDict] = {}
|
||||
for room_id, room_result in rooms.items():
|
||||
serialized_rooms[room_id] = {
|
||||
"bump_stamp": room_result.bump_stamp,
|
||||
"notification_count": room_result.notification_count,
|
||||
"highlight_count": room_result.highlight_count,
|
||||
}
|
||||
|
||||
if room_result.bump_stamp is not None:
|
||||
serialized_rooms[room_id]["bump_stamp"] = room_result.bump_stamp
|
||||
|
||||
if room_result.joined_count is not None:
|
||||
serialized_rooms[room_id]["joined_count"] = room_result.joined_count
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ class VersionsRestServlet(RestServlet):
|
|||
)
|
||||
),
|
||||
# MSC4140: Delayed events
|
||||
"org.matrix.msc4140": True,
|
||||
"org.matrix.msc4140": bool(self.config.server.max_event_delay_ms),
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
"org.matrix.msc4151": self.config.experimental.msc4151_enabled,
|
||||
# Simplified sliding sync
|
||||
|
|
|
@ -1499,6 +1499,57 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
|
|||
"get_sliding_sync_room_for_user", get_sliding_sync_room_for_user_txn
|
||||
)
|
||||
|
||||
async def get_sliding_sync_room_for_user_batch(
|
||||
self, user_id: str, room_ids: StrCollection
|
||||
) -> Dict[str, RoomsForUserSlidingSync]:
|
||||
"""Get the sliding sync room entry for the given user and rooms."""
|
||||
|
||||
if not room_ids:
|
||||
return {}
|
||||
|
||||
def get_sliding_sync_room_for_user_batch_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> Dict[str, RoomsForUserSlidingSync]:
|
||||
clause, args = make_in_list_sql_clause(
|
||||
self.database_engine, "m.room_id", room_ids
|
||||
)
|
||||
sql = f"""
|
||||
SELECT m.room_id, m.sender, m.membership, m.membership_event_id,
|
||||
r.room_version,
|
||||
m.event_instance_name, m.event_stream_ordering,
|
||||
m.has_known_state,
|
||||
COALESCE(j.room_type, m.room_type),
|
||||
COALESCE(j.is_encrypted, m.is_encrypted)
|
||||
FROM sliding_sync_membership_snapshots AS m
|
||||
INNER JOIN rooms AS r USING (room_id)
|
||||
LEFT JOIN sliding_sync_joined_rooms AS j ON (j.room_id = m.room_id AND m.membership = 'join')
|
||||
WHERE m.forgotten = 0
|
||||
AND {clause}
|
||||
AND user_id = ?
|
||||
"""
|
||||
args.append(user_id)
|
||||
txn.execute(sql, args)
|
||||
|
||||
return {
|
||||
row[0]: RoomsForUserSlidingSync(
|
||||
room_id=row[0],
|
||||
sender=row[1],
|
||||
membership=row[2],
|
||||
event_id=row[3],
|
||||
room_version_id=row[4],
|
||||
event_pos=PersistedEventPosition(row[5], row[6]),
|
||||
has_known_state=bool(row[7]),
|
||||
room_type=row[8],
|
||||
is_encrypted=row[9],
|
||||
)
|
||||
for row in txn
|
||||
}
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_sliding_sync_room_for_user_batch",
|
||||
get_sliding_sync_room_for_user_batch_txn,
|
||||
)
|
||||
|
||||
|
||||
class RoomMemberBackgroundUpdateStore(SQLBaseStore):
|
||||
def __init__(
|
||||
|
|
|
@ -751,6 +751,48 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
if self._events_stream_cache.has_entity_changed(room_id, from_id)
|
||||
}
|
||||
|
||||
async def get_rooms_that_have_updates_since_sliding_sync_table(
|
||||
self,
|
||||
room_ids: StrCollection,
|
||||
from_key: RoomStreamToken,
|
||||
) -> StrCollection:
|
||||
"""Return the rooms that probably have had updates since the given
|
||||
token (changes that are > `from_key`)."""
|
||||
# If the stream change cache is valid for the stream token, we can just
|
||||
# use the result of that.
|
||||
if from_key.stream >= self._events_stream_cache.get_earliest_known_position():
|
||||
return self._events_stream_cache.get_entities_changed(
|
||||
room_ids, from_key.stream
|
||||
)
|
||||
|
||||
def get_rooms_that_have_updates_since_sliding_sync_table_txn(
|
||||
txn: LoggingTransaction,
|
||||
) -> StrCollection:
|
||||
sql = """
|
||||
SELECT room_id
|
||||
FROM sliding_sync_joined_rooms
|
||||
WHERE {clause}
|
||||
AND event_stream_ordering > ?
|
||||
"""
|
||||
|
||||
results: Set[str] = set()
|
||||
for batch in batch_iter(room_ids, 1000):
|
||||
clause, args = make_in_list_sql_clause(
|
||||
self.database_engine, "room_id", batch
|
||||
)
|
||||
|
||||
args.append(from_key.stream)
|
||||
txn.execute(sql.format(clause=clause), args)
|
||||
|
||||
results.update(row[0] for row in txn)
|
||||
|
||||
return results
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"get_rooms_that_have_updates_since_sliding_sync_table",
|
||||
get_rooms_that_have_updates_since_sliding_sync_table_txn,
|
||||
)
|
||||
|
||||
async def paginate_room_events_by_stream_ordering(
|
||||
self,
|
||||
*,
|
||||
|
|
|
@ -158,6 +158,7 @@ class SlidingSyncResult:
|
|||
name changes to mark the room as unread and bump it to the top. For
|
||||
encrypted rooms, we just have to consider any activity as a bump because we
|
||||
can't see the content and the client has to figure it out for themselves.
|
||||
This may not be included if there hasn't been a change.
|
||||
joined_count: The number of users with membership of join, including the client's
|
||||
own user ID. (same as sync `v2 m.joined_member_count`)
|
||||
invited_count: The number of users with membership of invite. (same as sync v2
|
||||
|
@ -193,7 +194,7 @@ class SlidingSyncResult:
|
|||
limited: Optional[bool]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
num_live: Optional[int]
|
||||
bump_stamp: int
|
||||
bump_stamp: Optional[int]
|
||||
joined_count: Optional[int]
|
||||
invited_count: Optional[int]
|
||||
notification_count: int
|
||||
|
|
|
@ -19,13 +19,23 @@
|
|||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import tempfile
|
||||
from typing import Callable
|
||||
|
||||
import yaml
|
||||
from parameterized import parameterized
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config._base import RootConfig
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
|
||||
from tests.config.utils import ConfigFileTestCase
|
||||
|
||||
try:
|
||||
import hiredis
|
||||
except ImportError:
|
||||
hiredis = None # type: ignore
|
||||
|
||||
|
||||
class ConfigLoadingFileTestCase(ConfigFileTestCase):
|
||||
def test_load_fails_if_server_name_missing(self) -> None:
|
||||
|
@ -116,3 +126,49 @@ class ConfigLoadingFileTestCase(ConfigFileTestCase):
|
|||
self.add_lines_to_config(["trust_identity_server_for_password_resets: true"])
|
||||
with self.assertRaises(ConfigError):
|
||||
HomeServerConfig.load_config("", ["-c", self.config_file])
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
"turn_shared_secret_path: /does/not/exist",
|
||||
"registration_shared_secret_path: /does/not/exist",
|
||||
*["redis:\n enabled: true\n password_path: /does/not/exist"]
|
||||
* (hiredis is not None),
|
||||
]
|
||||
)
|
||||
def test_secret_files_missing(self, config_str: str) -> None:
|
||||
self.generate_config()
|
||||
self.add_lines_to_config(["", config_str])
|
||||
|
||||
with self.assertRaises(ConfigError):
|
||||
HomeServerConfig.load_config("", ["-c", self.config_file])
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
(
|
||||
"turn_shared_secret_path: {}",
|
||||
lambda c: c.voip.turn_shared_secret,
|
||||
),
|
||||
(
|
||||
"registration_shared_secret_path: {}",
|
||||
lambda c: c.registration.registration_shared_secret,
|
||||
),
|
||||
*[
|
||||
(
|
||||
"redis:\n enabled: true\n password_path: {}",
|
||||
lambda c: c.redis.redis_password,
|
||||
)
|
||||
]
|
||||
* (hiredis is not None),
|
||||
]
|
||||
)
|
||||
def test_secret_files_existing(
|
||||
self, config_line: str, get_secret: Callable[[RootConfig], str]
|
||||
) -> None:
|
||||
self.generate_config_and_remove_lines_containing("registration_shared_secret")
|
||||
with tempfile.NamedTemporaryFile(buffering=0) as secret_file:
|
||||
secret_file.write(b"53C237")
|
||||
|
||||
self.add_lines_to_config(["", config_line.format(secret_file.name)])
|
||||
config = HomeServerConfig.load_config("", ["-c", self.config_file])
|
||||
|
||||
self.assertEqual(get_secret(config), "53C237")
|
||||
|
|
|
@ -380,7 +380,7 @@ class RoomMemberMasterHandlerTestCase(HomeserverTestCase):
|
|||
)
|
||||
|
||||
def test_forget_when_not_left(self) -> None:
|
||||
"""Tests that a user cannot not forgets a room that has not left."""
|
||||
"""Tests that a user cannot forget a room that they are still in."""
|
||||
self.get_failure(self.handler.forget(self.alice_ID, self.room_id), SynapseError)
|
||||
|
||||
def test_nonlocal_room_user_action(self) -> None:
|
||||
|
|
|
@ -1096,6 +1096,92 @@ class SlidingSyncRoomsMetaTestCase(SlidingSyncBase):
|
|||
|
||||
self.assertGreater(response_body["rooms"][room_id]["bump_stamp"], 0)
|
||||
|
||||
def test_rooms_bump_stamp_no_change_incremental(self) -> None:
|
||||
"""Test that the bump stamp is omitted if there has been no change"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user1_id,
|
||||
tok=user1_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 100,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Initial sync so we expect to see a bump stamp
|
||||
self.assertIn("bump_stamp", response_body["rooms"][room_id1])
|
||||
|
||||
# Send an event that is not in the bump events list
|
||||
self.helper.send_event(
|
||||
room_id1, type="org.matrix.test", content={}, tok=user1_tok
|
||||
)
|
||||
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# There hasn't been a change to the bump stamps, so we ignore it
|
||||
self.assertNotIn("bump_stamp", response_body["rooms"][room_id1])
|
||||
|
||||
def test_rooms_bump_stamp_change_incremental(self) -> None:
|
||||
"""Test that the bump stamp is included if there has been a change, even
|
||||
if its not in the timeline"""
|
||||
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user1_id,
|
||||
tok=user1_tok,
|
||||
)
|
||||
|
||||
# Make the Sliding Sync request
|
||||
sync_body = {
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [[0, 1]],
|
||||
"required_state": [],
|
||||
"timeline_limit": 2,
|
||||
}
|
||||
}
|
||||
}
|
||||
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
|
||||
|
||||
# Initial sync so we expect to see a bump stamp
|
||||
self.assertIn("bump_stamp", response_body["rooms"][room_id1])
|
||||
first_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"]
|
||||
|
||||
# Send a bump event at the start.
|
||||
self.helper.send(room_id1, "test", tok=user1_tok)
|
||||
|
||||
# Send events that are not in the bump events list to fill the timeline
|
||||
for _ in range(5):
|
||||
self.helper.send_event(
|
||||
room_id1, type="org.matrix.test", content={}, tok=user1_tok
|
||||
)
|
||||
|
||||
response_body, from_token = self.do_sync(
|
||||
sync_body, since=from_token, tok=user1_tok
|
||||
)
|
||||
|
||||
# There was a bump event in the timeline gap, so we should see the bump
|
||||
# stamp be updated.
|
||||
self.assertIn("bump_stamp", response_body["rooms"][room_id1])
|
||||
second_bump_stamp = response_body["rooms"][room_id1]["bump_stamp"]
|
||||
|
||||
self.assertGreater(second_bump_stamp, first_bump_stamp)
|
||||
|
||||
def test_rooms_bump_stamp_invites(self) -> None:
|
||||
"""
|
||||
Test that `bump_stamp` is present and points to the membership event,
|
||||
|
|
|
@ -8,11 +8,12 @@ from parameterized import parameterized
|
|||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.rest.client import delayed_events, room
|
||||
from synapse.rest.client import delayed_events, room, versions
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
from tests.unittest import HomeserverTestCase
|
||||
|
||||
PATH_PREFIX = "/_matrix/client/unstable/org.matrix.msc4140/delayed_events"
|
||||
|
@ -21,6 +22,21 @@ _HS_NAME = "red"
|
|||
_EVENT_TYPE = "com.example.test"
|
||||
|
||||
|
||||
class DelayedEventsUnstableSupportTestCase(HomeserverTestCase):
|
||||
servlets = [versions.register_servlets]
|
||||
|
||||
def test_false_by_default(self) -> None:
|
||||
channel = self.make_request("GET", "/_matrix/client/versions")
|
||||
self.assertEqual(channel.code, 200, channel.result)
|
||||
self.assertFalse(channel.json_body["unstable_features"]["org.matrix.msc4140"])
|
||||
|
||||
@unittest.override_config({"max_event_delay_duration": "24h"})
|
||||
def test_true_if_enabled(self) -> None:
|
||||
channel = self.make_request("GET", "/_matrix/client/versions")
|
||||
self.assertEqual(channel.code, 200, channel.result)
|
||||
self.assertTrue(channel.json_body["unstable_features"]["org.matrix.msc4140"])
|
||||
|
||||
|
||||
class DelayedEventsTestCase(HomeserverTestCase):
|
||||
"""Tests getting and managing delayed events."""
|
||||
|
||||
|
|
Loading…
Reference in a new issue