2019-11-04 20:09:22 +03:00
|
|
|
#
|
2023-11-21 23:29:58 +03:00
|
|
|
# This file is licensed under the Affero General Public License (AGPL) version 3.
|
|
|
|
#
|
|
|
|
# Copyright (C) 2023 New Vector, Ltd
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Affero General Public License as
|
|
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
|
|
# License, or (at your option) any later version.
|
|
|
|
#
|
|
|
|
# See the GNU Affero General Public License for more details:
|
|
|
|
# <https://www.gnu.org/licenses/agpl-3.0.html>.
|
|
|
|
#
|
|
|
|
# Originally licensed under the Apache License, Version 2.0:
|
|
|
|
# <http://www.apache.org/licenses/LICENSE-2.0>.
|
|
|
|
#
|
|
|
|
# [This file includes modifications made by New Vector Limited]
|
2019-11-04 20:09:22 +03:00
|
|
|
#
|
|
|
|
#
|
2022-05-23 20:18:23 +03:00
|
|
|
from typing import Any, Dict
|
2021-04-09 20:44:38 +03:00
|
|
|
from unittest.mock import Mock
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
from twisted.test.proto_helpers import MemoryReactor
|
|
|
|
|
2019-11-04 20:09:22 +03:00
|
|
|
from synapse.api.constants import EventTypes
|
|
|
|
from synapse.rest import admin
|
2021-08-17 14:57:58 +03:00
|
|
|
from synapse.rest.client import login, room
|
2022-02-28 20:47:37 +03:00
|
|
|
from synapse.server import HomeServer
|
2022-08-22 16:17:59 +03:00
|
|
|
from synapse.types import JsonDict, create_requester
|
2022-02-28 20:47:37 +03:00
|
|
|
from synapse.util import Clock
|
2019-11-04 20:09:22 +03:00
|
|
|
from synapse.visibility import filter_events_for_client
|
|
|
|
|
|
|
|
from tests import unittest
|
2022-03-11 13:33:49 +03:00
|
|
|
from tests.unittest import override_config
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
one_hour_ms = 3600000
|
|
|
|
one_day_ms = one_hour_ms * 24
|
|
|
|
|
|
|
|
|
|
|
|
class RetentionTestCase(unittest.HomeserverTestCase):
|
|
|
|
servlets = [
|
|
|
|
admin.register_servlets,
|
|
|
|
login.register_servlets,
|
|
|
|
room.register_servlets,
|
|
|
|
]
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
2019-11-04 20:09:22 +03:00
|
|
|
config = self.default_config()
|
2022-03-11 13:33:49 +03:00
|
|
|
|
|
|
|
# merge this default retention config with anything that was specified in
|
|
|
|
# @override_config
|
|
|
|
retention_config = {
|
2019-11-04 20:09:22 +03:00
|
|
|
"enabled": True,
|
|
|
|
"default_policy": {
|
|
|
|
"min_lifetime": one_day_ms,
|
|
|
|
"max_lifetime": one_day_ms * 3,
|
|
|
|
},
|
|
|
|
"allowed_lifetime_min": one_day_ms,
|
|
|
|
"allowed_lifetime_max": one_day_ms * 3,
|
|
|
|
}
|
2022-03-11 13:33:49 +03:00
|
|
|
retention_config.update(config.get("retention", {}))
|
|
|
|
config["retention"] = retention_config
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
self.hs = self.setup_test_homeserver(config=config)
|
2020-08-24 20:21:04 +03:00
|
|
|
|
2019-11-04 20:09:22 +03:00
|
|
|
return self.hs
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
2019-11-04 20:09:22 +03:00
|
|
|
self.user_id = self.register_user("user", "password")
|
|
|
|
self.token = self.login("user", "password")
|
|
|
|
|
2022-02-23 14:04:02 +03:00
|
|
|
self.store = self.hs.get_datastores().main
|
2020-08-24 20:21:04 +03:00
|
|
|
self.serializer = self.hs.get_event_client_serializer()
|
|
|
|
self.clock = self.hs.get_clock()
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def test_retention_event_purged_with_state_event(self) -> None:
|
2020-08-24 20:21:04 +03:00
|
|
|
"""Tests that expired events are correctly purged when the room's retention policy
|
|
|
|
is defined by a state event.
|
2019-11-04 20:09:22 +03:00
|
|
|
"""
|
|
|
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
# Set the room's retention period to 2 days.
|
|
|
|
lifetime = one_day_ms * 2
|
2019-11-04 20:09:22 +03:00
|
|
|
self.helper.send_state(
|
|
|
|
room_id=room_id,
|
|
|
|
event_type=EventTypes.Retention,
|
2020-08-24 20:21:04 +03:00
|
|
|
body={"max_lifetime": lifetime},
|
2019-11-04 20:09:22 +03:00
|
|
|
tok=self.token,
|
|
|
|
)
|
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
self._test_retention_event_purged(room_id, one_day_ms * 1.5)
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def test_retention_event_purged_with_state_event_outside_allowed(self) -> None:
|
2020-08-24 20:21:04 +03:00
|
|
|
"""Tests that the server configuration can override the policy for a room when
|
|
|
|
running the purge jobs.
|
|
|
|
"""
|
|
|
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
|
|
|
|
|
|
|
# Set a max_lifetime higher than the maximum allowed value.
|
2019-11-04 20:09:22 +03:00
|
|
|
self.helper.send_state(
|
|
|
|
room_id=room_id,
|
|
|
|
event_type=EventTypes.Retention,
|
2020-08-24 20:21:04 +03:00
|
|
|
body={"max_lifetime": one_day_ms * 4},
|
2019-11-04 20:09:22 +03:00
|
|
|
tok=self.token,
|
|
|
|
)
|
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
# Check that the event is purged after waiting for the maximum allowed duration
|
|
|
|
# instead of the one specified in the room's policy.
|
|
|
|
self._test_retention_event_purged(room_id, one_day_ms * 1.5)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
# Set a max_lifetime lower than the minimum allowed value.
|
2019-11-04 20:09:22 +03:00
|
|
|
self.helper.send_state(
|
|
|
|
room_id=room_id,
|
|
|
|
event_type=EventTypes.Retention,
|
2020-08-24 20:21:04 +03:00
|
|
|
body={"max_lifetime": one_hour_ms},
|
2019-11-04 20:09:22 +03:00
|
|
|
tok=self.token,
|
|
|
|
)
|
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
# Check that the event is purged after waiting for the minimum allowed duration
|
|
|
|
# instead of the one specified in the room's policy.
|
|
|
|
self._test_retention_event_purged(room_id, one_day_ms * 0.5)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def test_retention_event_purged_without_state_event(self) -> None:
|
2019-11-04 20:09:22 +03:00
|
|
|
"""Tests that expired events are correctly purged when the room's retention policy
|
|
|
|
is defined by the server's configuration's default retention policy.
|
|
|
|
"""
|
|
|
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
|
|
|
|
|
|
|
self._test_retention_event_purged(room_id, one_day_ms * 2)
|
|
|
|
|
2022-03-11 13:33:49 +03:00
|
|
|
@override_config({"retention": {"purge_jobs": [{"interval": "5d"}]}})
|
2022-02-28 20:47:37 +03:00
|
|
|
def test_visibility(self) -> None:
|
2019-11-04 20:09:22 +03:00
|
|
|
"""Tests that synapse.visibility.filter_events_for_client correctly filters out
|
2022-03-11 13:33:49 +03:00
|
|
|
outdated events, even if the purge job hasn't got to them yet.
|
|
|
|
|
|
|
|
We do this by setting a very long time between purge jobs.
|
2019-11-04 20:09:22 +03:00
|
|
|
"""
|
2022-02-23 14:04:02 +03:00
|
|
|
store = self.hs.get_datastores().main
|
2022-05-31 15:17:50 +03:00
|
|
|
storage_controllers = self.hs.get_storage_controllers()
|
2019-11-04 20:09:22 +03:00
|
|
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
|
|
|
|
|
|
|
# Send a first event, which should be filtered out at the end of the test.
|
2019-11-19 16:22:37 +03:00
|
|
|
resp = self.helper.send(room_id=room_id, body="1", tok=self.token)
|
2022-03-11 13:33:49 +03:00
|
|
|
first_event_id = resp.get("event_id")
|
2023-02-14 22:03:35 +03:00
|
|
|
assert isinstance(first_event_id, str)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Advance the time by 2 days. We're using the default retention policy, therefore
|
|
|
|
# after this the first event will still be valid.
|
|
|
|
self.reactor.advance(one_day_ms * 2 / 1000)
|
|
|
|
|
|
|
|
# Send another event, which shouldn't get filtered out.
|
2019-11-19 16:30:04 +03:00
|
|
|
resp = self.helper.send(room_id=room_id, body="2", tok=self.token)
|
2019-11-04 20:09:22 +03:00
|
|
|
valid_event_id = resp.get("event_id")
|
2023-02-14 22:03:35 +03:00
|
|
|
assert isinstance(valid_event_id, str)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2020-07-09 16:52:58 +03:00
|
|
|
# Advance the time by another 2 days. After this, the first event should be
|
2019-11-04 20:09:22 +03:00
|
|
|
# outdated but not the second one.
|
|
|
|
self.reactor.advance(one_day_ms * 2 / 1000)
|
|
|
|
|
2022-03-11 13:33:49 +03:00
|
|
|
# Fetch the events, and run filter_events_for_client on them
|
|
|
|
events = self.get_success(
|
|
|
|
store.get_events_as_list([first_event_id, valid_event_id])
|
|
|
|
)
|
|
|
|
self.assertEqual(2, len(events), "events retrieved from database")
|
2019-11-19 16:22:37 +03:00
|
|
|
filtered_events = self.get_success(
|
2024-04-29 17:22:13 +03:00
|
|
|
filter_events_for_client(
|
|
|
|
storage_controllers,
|
|
|
|
self.user_id,
|
|
|
|
events,
|
|
|
|
)
|
2019-11-19 16:22:37 +03:00
|
|
|
)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# We should only get one event back.
|
|
|
|
self.assertEqual(len(filtered_events), 1, filtered_events)
|
|
|
|
# That event should be the second, not outdated event.
|
|
|
|
self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events)
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def _test_retention_event_purged(self, room_id: str, increment: float) -> None:
|
2020-08-24 20:21:04 +03:00
|
|
|
"""Run the following test scenario to test the message retention policy support:
|
|
|
|
|
|
|
|
1. Send event 1
|
|
|
|
2. Increment time by `increment`
|
|
|
|
3. Send event 2
|
|
|
|
4. Increment time by `increment`
|
|
|
|
5. Check that event 1 has been purged
|
|
|
|
6. Check that event 2 has not been purged
|
|
|
|
7. Check that state events that were sent before event 1 aren't purged.
|
|
|
|
The main reason for sending a second event is because currently Synapse won't
|
|
|
|
purge the latest message in a room because it would otherwise result in a lack of
|
|
|
|
forward extremities for this room. It's also a good thing to ensure the purge jobs
|
|
|
|
aren't too greedy and purge messages they shouldn't.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
room_id: The ID of the room to test retention in.
|
|
|
|
increment: The number of milliseconds to advance the clock each time. Must be
|
|
|
|
defined so that events in the room aren't purged if they are `increment`
|
|
|
|
old but are purged if they are `increment * 2` old.
|
|
|
|
"""
|
2019-11-06 18:47:40 +03:00
|
|
|
# Get the create event to, later, check that we can still access it.
|
|
|
|
message_handler = self.hs.get_message_handler()
|
|
|
|
create_event = self.get_success(
|
2020-08-12 17:05:50 +03:00
|
|
|
message_handler.get_room_data(
|
2022-08-22 16:17:59 +03:00
|
|
|
create_requester(self.user_id), room_id, EventTypes.Create, state_key=""
|
2020-08-12 17:05:50 +03:00
|
|
|
)
|
2019-11-06 18:47:40 +03:00
|
|
|
)
|
|
|
|
|
2019-11-04 20:09:22 +03:00
|
|
|
# Send a first event to the room. This is the event we'll want to be purged at the
|
|
|
|
# end of the test.
|
2019-11-19 16:22:37 +03:00
|
|
|
resp = self.helper.send(room_id=room_id, body="1", tok=self.token)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
expired_event_id = resp.get("event_id")
|
2022-02-28 20:47:37 +03:00
|
|
|
assert expired_event_id is not None
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Check that we can retrieve the event.
|
2020-08-24 20:21:04 +03:00
|
|
|
expired_event = self.get_event(expired_event_id)
|
2019-11-19 16:22:37 +03:00
|
|
|
self.assertEqual(
|
|
|
|
expired_event.get("content", {}).get("body"), "1", expired_event
|
|
|
|
)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Advance the time.
|
|
|
|
self.reactor.advance(increment / 1000)
|
|
|
|
|
|
|
|
# Send another event. We need this because the purge job won't purge the most
|
|
|
|
# recent event in the room.
|
2019-11-19 16:22:37 +03:00
|
|
|
resp = self.helper.send(room_id=room_id, body="2", tok=self.token)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
valid_event_id = resp.get("event_id")
|
2022-02-28 20:47:37 +03:00
|
|
|
assert valid_event_id is not None
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Advance the time again. Now our first event should have expired but our second
|
|
|
|
# one should still be kept.
|
|
|
|
self.reactor.advance(increment / 1000)
|
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
# Check that the first event has been purged from the database, i.e. that we
|
|
|
|
# can't retrieve it anymore, because it has expired.
|
|
|
|
self.get_event(expired_event_id, expect_none=True)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
# Check that the event that hasn't expired can still be retrieved.
|
|
|
|
valid_event = self.get_event(valid_event_id)
|
2019-11-04 20:09:22 +03:00
|
|
|
self.assertEqual(valid_event.get("content", {}).get("body"), "2", valid_event)
|
|
|
|
|
2019-11-06 18:47:40 +03:00
|
|
|
# Check that we can still access state events that were sent before the event that
|
|
|
|
# has been purged.
|
2023-02-14 22:03:35 +03:00
|
|
|
self.get_event(room_id, bool(create_event))
|
2019-11-06 18:47:40 +03:00
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def get_event(self, event_id: str, expect_none: bool = False) -> JsonDict:
|
2020-08-24 20:21:04 +03:00
|
|
|
event = self.get_success(self.store.get_event(event_id, allow_none=True))
|
|
|
|
|
|
|
|
if expect_none:
|
|
|
|
self.assertIsNone(event)
|
|
|
|
return {}
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2023-02-14 22:03:35 +03:00
|
|
|
assert event is not None
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
time_now = self.clock.time_msec()
|
2023-10-27 12:04:08 +03:00
|
|
|
serialized = self.get_success(self.serializer.serialize_event(event, time_now))
|
2019-11-04 20:09:22 +03:00
|
|
|
|
2020-08-24 20:21:04 +03:00
|
|
|
return serialized
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
|
|
|
|
class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase):
|
|
|
|
servlets = [
|
|
|
|
admin.register_servlets,
|
|
|
|
login.register_servlets,
|
|
|
|
room.register_servlets,
|
|
|
|
]
|
|
|
|
|
2022-05-23 20:18:23 +03:00
|
|
|
def default_config(self) -> Dict[str, Any]:
|
|
|
|
config = super().default_config()
|
|
|
|
|
|
|
|
retention_config = {
|
2019-11-04 20:09:22 +03:00
|
|
|
"enabled": True,
|
|
|
|
}
|
|
|
|
|
2022-05-23 20:18:23 +03:00
|
|
|
# Update this config with what's in the default config so that
|
|
|
|
# override_config works as expected.
|
|
|
|
retention_config.update(config.get("retention", {}))
|
|
|
|
config["retention"] = retention_config
|
|
|
|
|
|
|
|
return config
|
|
|
|
|
|
|
|
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
2019-11-04 20:09:22 +03:00
|
|
|
mock_federation_client = Mock(spec=["backfill"])
|
|
|
|
|
|
|
|
self.hs = self.setup_test_homeserver(
|
2019-11-19 16:22:37 +03:00
|
|
|
federation_client=mock_federation_client,
|
2019-11-04 20:09:22 +03:00
|
|
|
)
|
|
|
|
return self.hs
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
2019-11-04 20:09:22 +03:00
|
|
|
self.user_id = self.register_user("user", "password")
|
|
|
|
self.token = self.login("user", "password")
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def test_no_default_policy(self) -> None:
|
2019-11-04 20:09:22 +03:00
|
|
|
"""Tests that an event doesn't get expired if there is neither a default retention
|
|
|
|
policy nor a policy specific to the room.
|
|
|
|
"""
|
|
|
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
|
|
|
|
|
|
|
self._test_retention(room_id)
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def test_state_policy(self) -> None:
|
2019-11-04 20:09:22 +03:00
|
|
|
"""Tests that an event gets correctly expired if there is no default retention
|
|
|
|
policy but there's a policy specific to the room.
|
|
|
|
"""
|
|
|
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
|
|
|
|
|
|
|
# Set the maximum lifetime to 35 days so that the first event gets expired but not
|
|
|
|
# the second one.
|
|
|
|
self.helper.send_state(
|
|
|
|
room_id=room_id,
|
|
|
|
event_type=EventTypes.Retention,
|
2019-11-19 16:22:37 +03:00
|
|
|
body={"max_lifetime": one_day_ms * 35},
|
2019-11-04 20:09:22 +03:00
|
|
|
tok=self.token,
|
|
|
|
)
|
|
|
|
|
|
|
|
self._test_retention(room_id, expected_code_for_first_event=404)
|
|
|
|
|
2022-05-23 20:18:23 +03:00
|
|
|
@unittest.override_config({"retention": {"enabled": False}})
|
|
|
|
def test_visibility_when_disabled(self) -> None:
|
|
|
|
"""Retention policies should be ignored when the retention feature is disabled."""
|
|
|
|
room_id = self.helper.create_room_as(self.user_id, tok=self.token)
|
|
|
|
|
|
|
|
self.helper.send_state(
|
|
|
|
room_id=room_id,
|
|
|
|
event_type=EventTypes.Retention,
|
|
|
|
body={"max_lifetime": one_day_ms},
|
|
|
|
tok=self.token,
|
|
|
|
)
|
|
|
|
|
|
|
|
resp = self.helper.send(room_id=room_id, body="test", tok=self.token)
|
|
|
|
|
|
|
|
self.reactor.advance(one_day_ms * 2 / 1000)
|
|
|
|
|
|
|
|
self.get_event(room_id, resp["event_id"])
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def _test_retention(
|
|
|
|
self, room_id: str, expected_code_for_first_event: int = 200
|
|
|
|
) -> None:
|
2019-11-04 20:09:22 +03:00
|
|
|
# Send a first event to the room. This is the event we'll want to be purged at the
|
|
|
|
# end of the test.
|
2019-11-19 16:30:04 +03:00
|
|
|
resp = self.helper.send(room_id=room_id, body="1", tok=self.token)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
first_event_id = resp.get("event_id")
|
2022-02-28 20:47:37 +03:00
|
|
|
assert first_event_id is not None
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Check that we can retrieve the event.
|
|
|
|
expired_event = self.get_event(room_id, first_event_id)
|
2019-11-19 16:22:37 +03:00
|
|
|
self.assertEqual(
|
|
|
|
expired_event.get("content", {}).get("body"), "1", expired_event
|
|
|
|
)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Advance the time by a month.
|
|
|
|
self.reactor.advance(one_day_ms * 30 / 1000)
|
|
|
|
|
|
|
|
# Send another event. We need this because the purge job won't purge the most
|
|
|
|
# recent event in the room.
|
2019-11-19 16:22:37 +03:00
|
|
|
resp = self.helper.send(room_id=room_id, body="2", tok=self.token)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
second_event_id = resp.get("event_id")
|
2022-02-28 20:47:37 +03:00
|
|
|
assert second_event_id is not None
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Advance the time by another month.
|
|
|
|
self.reactor.advance(one_day_ms * 30 / 1000)
|
|
|
|
|
|
|
|
# Check if the event has been purged from the database.
|
|
|
|
first_event = self.get_event(
|
|
|
|
room_id, first_event_id, expected_code=expected_code_for_first_event
|
|
|
|
)
|
|
|
|
|
|
|
|
if expected_code_for_first_event == 200:
|
2019-11-19 16:22:37 +03:00
|
|
|
self.assertEqual(
|
|
|
|
first_event.get("content", {}).get("body"), "1", first_event
|
|
|
|
)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
# Check that the event that hasn't been purged can still be retrieved.
|
|
|
|
second_event = self.get_event(room_id, second_event_id)
|
|
|
|
self.assertEqual(second_event.get("content", {}).get("body"), "2", second_event)
|
|
|
|
|
2022-02-28 20:47:37 +03:00
|
|
|
def get_event(
|
|
|
|
self, room_id: str, event_id: str, expected_code: int = 200
|
|
|
|
) -> JsonDict:
|
2019-11-04 20:09:22 +03:00
|
|
|
url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id)
|
|
|
|
|
2020-12-15 17:44:04 +03:00
|
|
|
channel = self.make_request("GET", url, access_token=self.token)
|
2019-11-04 20:09:22 +03:00
|
|
|
|
|
|
|
self.assertEqual(channel.code, expected_code, channel.result)
|
|
|
|
|
|
|
|
return channel.json_body
|