mirror of
https://github.com/element-hq/synapse.git
synced 2024-11-27 03:58:06 +03:00
1531b214fc
The idea here is that if an instance persists an event via the replication HTTP API it can return before we receive that event over replication, which can lead to races where code assumes that persisting an event immediately updates various caches (e.g. current state of the room). Most of Synapse doesn't hit such races, so we don't do the waiting automagically, instead we do so where necessary to avoid unnecessary delays. We may decide to change our minds here if it turns out there are a lot of subtle races going on. People probably want to look at this commit by commit.
208 lines
7.7 KiB
Python
208 lines
7.7 KiB
Python
from mock import Mock
|
|
|
|
from twisted.internet.defer import ensureDeferred, maybeDeferred, succeed
|
|
|
|
from synapse.events import make_event_from_dict
|
|
from synapse.logging.context import LoggingContext
|
|
from synapse.types import Requester, UserID
|
|
from synapse.util import Clock
|
|
from synapse.util.retryutils import NotRetryingDestination
|
|
|
|
from tests import unittest
|
|
from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver
|
|
|
|
|
|
class MessageAcceptTests(unittest.HomeserverTestCase):
|
|
def setUp(self):
|
|
|
|
self.http_client = Mock()
|
|
self.reactor = ThreadedMemoryReactorClock()
|
|
self.hs_clock = Clock(self.reactor)
|
|
self.homeserver = setup_test_homeserver(
|
|
self.addCleanup,
|
|
http_client=self.http_client,
|
|
clock=self.hs_clock,
|
|
reactor=self.reactor,
|
|
)
|
|
|
|
user_id = UserID("us", "test")
|
|
our_user = Requester(user_id, None, False, None, None)
|
|
room_creator = self.homeserver.get_room_creation_handler()
|
|
room_deferred = ensureDeferred(
|
|
room_creator.create_room(
|
|
our_user, room_creator.PRESETS_DICT["public_chat"], ratelimit=False
|
|
)
|
|
)
|
|
self.reactor.advance(0.1)
|
|
self.room_id = self.successResultOf(room_deferred)[0]["room_id"]
|
|
|
|
self.store = self.homeserver.get_datastore()
|
|
|
|
# Figure out what the most recent event is
|
|
most_recent = self.successResultOf(
|
|
maybeDeferred(
|
|
self.homeserver.get_datastore().get_latest_event_ids_in_room,
|
|
self.room_id,
|
|
)
|
|
)[0]
|
|
|
|
join_event = make_event_from_dict(
|
|
{
|
|
"room_id": self.room_id,
|
|
"sender": "@baduser:test.serv",
|
|
"state_key": "@baduser:test.serv",
|
|
"event_id": "$join:test.serv",
|
|
"depth": 1000,
|
|
"origin_server_ts": 1,
|
|
"type": "m.room.member",
|
|
"origin": "test.servx",
|
|
"content": {"membership": "join"},
|
|
"auth_events": [],
|
|
"prev_state": [(most_recent, {})],
|
|
"prev_events": [(most_recent, {})],
|
|
}
|
|
)
|
|
|
|
self.handler = self.homeserver.get_handlers().federation_handler
|
|
self.handler.do_auth = lambda origin, event, context, auth_events: succeed(
|
|
context
|
|
)
|
|
self.client = self.homeserver.get_federation_client()
|
|
self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
|
|
pdus
|
|
)
|
|
|
|
# Send the join, it should return None (which is not an error)
|
|
d = ensureDeferred(
|
|
self.handler.on_receive_pdu(
|
|
"test.serv", join_event, sent_to_us_directly=True
|
|
)
|
|
)
|
|
self.reactor.advance(1)
|
|
self.assertEqual(self.successResultOf(d), None)
|
|
|
|
# Make sure we actually joined the room
|
|
self.assertEqual(
|
|
self.successResultOf(
|
|
maybeDeferred(self.store.get_latest_event_ids_in_room, self.room_id)
|
|
)[0],
|
|
"$join:test.serv",
|
|
)
|
|
|
|
def test_cant_hide_direct_ancestors(self):
|
|
"""
|
|
If you send a message, you must be able to provide the direct
|
|
prev_events that said event references.
|
|
"""
|
|
|
|
def post_json(destination, path, data, headers=None, timeout=0):
|
|
# If it asks us for new missing events, give them NOTHING
|
|
if path.startswith("/_matrix/federation/v1/get_missing_events/"):
|
|
return {"events": []}
|
|
|
|
self.http_client.post_json = post_json
|
|
|
|
# Figure out what the most recent event is
|
|
most_recent = self.successResultOf(
|
|
maybeDeferred(self.store.get_latest_event_ids_in_room, self.room_id)
|
|
)[0]
|
|
|
|
# Now lie about an event
|
|
lying_event = make_event_from_dict(
|
|
{
|
|
"room_id": self.room_id,
|
|
"sender": "@baduser:test.serv",
|
|
"event_id": "one:test.serv",
|
|
"depth": 1000,
|
|
"origin_server_ts": 1,
|
|
"type": "m.room.message",
|
|
"origin": "test.serv",
|
|
"content": {"body": "hewwo?"},
|
|
"auth_events": [],
|
|
"prev_events": [("two:test.serv", {}), (most_recent, {})],
|
|
}
|
|
)
|
|
|
|
with LoggingContext(request="lying_event"):
|
|
d = ensureDeferred(
|
|
self.handler.on_receive_pdu(
|
|
"test.serv", lying_event, sent_to_us_directly=True
|
|
)
|
|
)
|
|
|
|
# Step the reactor, so the database fetches come back
|
|
self.reactor.advance(1)
|
|
|
|
# on_receive_pdu should throw an error
|
|
failure = self.failureResultOf(d)
|
|
self.assertEqual(
|
|
failure.value.args[0],
|
|
(
|
|
"ERROR 403: Your server isn't divulging details about prev_events "
|
|
"referenced in this event."
|
|
),
|
|
)
|
|
|
|
# Make sure the invalid event isn't there
|
|
extrem = maybeDeferred(self.store.get_latest_event_ids_in_room, self.room_id)
|
|
self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
|
|
|
|
def test_retry_device_list_resync(self):
|
|
"""Tests that device lists are marked as stale if they couldn't be synced, and
|
|
that stale device lists are retried periodically.
|
|
"""
|
|
remote_user_id = "@john:test_remote"
|
|
remote_origin = "test_remote"
|
|
|
|
# Track the number of attempts to resync the user's device list.
|
|
self.resync_attempts = 0
|
|
|
|
# When this function is called, increment the number of resync attempts (only if
|
|
# we're querying devices for the right user ID), then raise a
|
|
# NotRetryingDestination error to fail the resync gracefully.
|
|
def query_user_devices(destination, user_id):
|
|
if user_id == remote_user_id:
|
|
self.resync_attempts += 1
|
|
|
|
raise NotRetryingDestination(0, 0, destination)
|
|
|
|
# Register the mock on the federation client.
|
|
federation_client = self.homeserver.get_federation_client()
|
|
federation_client.query_user_devices = Mock(side_effect=query_user_devices)
|
|
|
|
# Register a mock on the store so that the incoming update doesn't fail because
|
|
# we don't share a room with the user.
|
|
store = self.homeserver.get_datastore()
|
|
store.get_rooms_for_user = Mock(return_value=["!someroom:test"])
|
|
|
|
# Manually inject a fake device list update. We need this update to include at
|
|
# least one prev_id so that the user's device list will need to be retried.
|
|
device_list_updater = self.homeserver.get_device_handler().device_list_updater
|
|
self.get_success(
|
|
device_list_updater.incoming_device_list_update(
|
|
origin=remote_origin,
|
|
edu_content={
|
|
"deleted": False,
|
|
"device_display_name": "Mobile",
|
|
"device_id": "QBUAZIFURK",
|
|
"prev_id": [5],
|
|
"stream_id": 6,
|
|
"user_id": remote_user_id,
|
|
},
|
|
)
|
|
)
|
|
|
|
# Check that there was one resync attempt.
|
|
self.assertEqual(self.resync_attempts, 1)
|
|
|
|
# Check that the resync attempt failed and caused the user's device list to be
|
|
# marked as stale.
|
|
need_resync = self.get_success(
|
|
store.get_user_ids_requiring_device_list_resync()
|
|
)
|
|
self.assertIn(remote_user_id, need_resync)
|
|
|
|
# Check that waiting for 30 seconds caused Synapse to retry resyncing the device
|
|
# list.
|
|
self.reactor.advance(30)
|
|
self.assertEqual(self.resync_attempts, 2)
|