mirror of
https://github.com/element-hq/synapse.git
synced 2024-11-25 19:15:51 +03:00
Remove backwards compatibility with RelationPaginationToken. (#12138)
This commit is contained in:
parent
36071d39f7
commit
cd1ae3d0b4
4 changed files with 16 additions and 144 deletions
1
changelog.d/12138.removal
Normal file
1
changelog.d/12138.removal
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Remove backwards compatibilty with pagination tokens from the `/relations` and `/aggregations` endpoints generated from Synapse < v1.52.0.
|
|
@ -27,50 +27,15 @@ from synapse.http.server import HttpServer
|
||||||
from synapse.http.servlet import RestServlet, parse_integer, parse_string
|
from synapse.http.servlet import RestServlet, parse_integer, parse_string
|
||||||
from synapse.http.site import SynapseRequest
|
from synapse.http.site import SynapseRequest
|
||||||
from synapse.rest.client._base import client_patterns
|
from synapse.rest.client._base import client_patterns
|
||||||
from synapse.storage.relations import (
|
from synapse.storage.relations import AggregationPaginationToken, PaginationChunk
|
||||||
AggregationPaginationToken,
|
from synapse.types import JsonDict, StreamToken
|
||||||
PaginationChunk,
|
|
||||||
RelationPaginationToken,
|
|
||||||
)
|
|
||||||
from synapse.types import JsonDict, RoomStreamToken, StreamToken
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.databases.main import DataStore
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
async def _parse_token(
|
|
||||||
store: "DataStore", token: Optional[str]
|
|
||||||
) -> Optional[StreamToken]:
|
|
||||||
"""
|
|
||||||
For backwards compatibility support RelationPaginationToken, but new pagination
|
|
||||||
tokens are generated as full StreamTokens, to be compatible with /sync and /messages.
|
|
||||||
"""
|
|
||||||
if not token:
|
|
||||||
return None
|
|
||||||
# Luckily the format for StreamToken and RelationPaginationToken differ enough
|
|
||||||
# that they can easily be separated. An "_" appears in the serialization of
|
|
||||||
# RoomStreamToken (as part of StreamToken), but RelationPaginationToken uses
|
|
||||||
# "-" only for separators.
|
|
||||||
if "_" in token:
|
|
||||||
return await StreamToken.from_string(store, token)
|
|
||||||
else:
|
|
||||||
relation_token = RelationPaginationToken.from_string(token)
|
|
||||||
return StreamToken(
|
|
||||||
room_key=RoomStreamToken(relation_token.topological, relation_token.stream),
|
|
||||||
presence_key=0,
|
|
||||||
typing_key=0,
|
|
||||||
receipt_key=0,
|
|
||||||
account_data_key=0,
|
|
||||||
push_rules_key=0,
|
|
||||||
to_device_key=0,
|
|
||||||
device_list_key=0,
|
|
||||||
groups_key=0,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class RelationPaginationServlet(RestServlet):
|
class RelationPaginationServlet(RestServlet):
|
||||||
"""API to paginate relations on an event by topological ordering, optionally
|
"""API to paginate relations on an event by topological ordering, optionally
|
||||||
filtered by relation type and event type.
|
filtered by relation type and event type.
|
||||||
|
@ -122,8 +87,12 @@ class RelationPaginationServlet(RestServlet):
|
||||||
pagination_chunk = PaginationChunk(chunk=[])
|
pagination_chunk = PaginationChunk(chunk=[])
|
||||||
else:
|
else:
|
||||||
# Return the relations
|
# Return the relations
|
||||||
from_token = await _parse_token(self.store, from_token_str)
|
from_token = None
|
||||||
to_token = await _parse_token(self.store, to_token_str)
|
if from_token_str:
|
||||||
|
from_token = await StreamToken.from_string(self.store, from_token_str)
|
||||||
|
to_token = None
|
||||||
|
if to_token_str:
|
||||||
|
to_token = await StreamToken.from_string(self.store, to_token_str)
|
||||||
|
|
||||||
pagination_chunk = await self.store.get_relations_for_event(
|
pagination_chunk = await self.store.get_relations_for_event(
|
||||||
event_id=parent_id,
|
event_id=parent_id,
|
||||||
|
@ -317,8 +286,12 @@ class RelationAggregationGroupPaginationServlet(RestServlet):
|
||||||
from_token_str = parse_string(request, "from")
|
from_token_str = parse_string(request, "from")
|
||||||
to_token_str = parse_string(request, "to")
|
to_token_str = parse_string(request, "to")
|
||||||
|
|
||||||
from_token = await _parse_token(self.store, from_token_str)
|
from_token = None
|
||||||
to_token = await _parse_token(self.store, to_token_str)
|
if from_token_str:
|
||||||
|
from_token = await StreamToken.from_string(self.store, from_token_str)
|
||||||
|
to_token = None
|
||||||
|
if to_token_str:
|
||||||
|
to_token = await StreamToken.from_string(self.store, to_token_str)
|
||||||
|
|
||||||
result = await self.store.get_relations_for_event(
|
result = await self.store.get_relations_for_event(
|
||||||
event_id=parent_id,
|
event_id=parent_id,
|
||||||
|
|
|
@ -54,37 +54,6 @@ class PaginationChunk:
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
|
||||||
class RelationPaginationToken:
|
|
||||||
"""Pagination token for relation pagination API.
|
|
||||||
|
|
||||||
As the results are in topological order, we can use the
|
|
||||||
`topological_ordering` and `stream_ordering` fields of the events at the
|
|
||||||
boundaries of the chunk as pagination tokens.
|
|
||||||
|
|
||||||
Attributes:
|
|
||||||
topological: The topological ordering of the boundary event
|
|
||||||
stream: The stream ordering of the boundary event.
|
|
||||||
"""
|
|
||||||
|
|
||||||
topological: int
|
|
||||||
stream: int
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_string(string: str) -> "RelationPaginationToken":
|
|
||||||
try:
|
|
||||||
t, s = string.split("-")
|
|
||||||
return RelationPaginationToken(int(t), int(s))
|
|
||||||
except ValueError:
|
|
||||||
raise SynapseError(400, "Invalid relation pagination token")
|
|
||||||
|
|
||||||
async def to_string(self, store: "DataStore") -> str:
|
|
||||||
return "%d-%d" % (self.topological, self.stream)
|
|
||||||
|
|
||||||
def as_tuple(self) -> Tuple[Any, ...]:
|
|
||||||
return attr.astuple(self)
|
|
||||||
|
|
||||||
|
|
||||||
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
@attr.s(frozen=True, slots=True, auto_attribs=True)
|
||||||
class AggregationPaginationToken:
|
class AggregationPaginationToken:
|
||||||
"""Pagination token for relation aggregation pagination API.
|
"""Pagination token for relation aggregation pagination API.
|
||||||
|
|
|
@ -24,8 +24,7 @@ from synapse.api.constants import EventTypes, RelationTypes
|
||||||
from synapse.rest import admin
|
from synapse.rest import admin
|
||||||
from synapse.rest.client import login, register, relations, room, sync
|
from synapse.rest.client import login, register, relations, room, sync
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.relations import RelationPaginationToken
|
from synapse.types import JsonDict
|
||||||
from synapse.types import JsonDict, StreamToken
|
|
||||||
from synapse.util import Clock
|
from synapse.util import Clock
|
||||||
|
|
||||||
from tests import unittest
|
from tests import unittest
|
||||||
|
@ -281,15 +280,6 @@ class RelationsTestCase(BaseRelationsTestCase):
|
||||||
channel.json_body["chunk"][0],
|
channel.json_body["chunk"][0],
|
||||||
)
|
)
|
||||||
|
|
||||||
def _stream_token_to_relation_token(self, token: str) -> str:
|
|
||||||
"""Convert a StreamToken into a legacy token (RelationPaginationToken)."""
|
|
||||||
room_key = self.get_success(StreamToken.from_string(self.store, token)).room_key
|
|
||||||
return self.get_success(
|
|
||||||
RelationPaginationToken(
|
|
||||||
topological=room_key.topological, stream=room_key.stream
|
|
||||||
).to_string(self.store)
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_repeated_paginate_relations(self) -> None:
|
def test_repeated_paginate_relations(self) -> None:
|
||||||
"""Test that if we paginate using a limit and tokens then we get the
|
"""Test that if we paginate using a limit and tokens then we get the
|
||||||
expected events.
|
expected events.
|
||||||
|
@ -330,34 +320,6 @@ class RelationsTestCase(BaseRelationsTestCase):
|
||||||
found_event_ids.reverse()
|
found_event_ids.reverse()
|
||||||
self.assertEqual(found_event_ids, expected_event_ids)
|
self.assertEqual(found_event_ids, expected_event_ids)
|
||||||
|
|
||||||
# Reset and try again, but convert the tokens to the legacy format.
|
|
||||||
prev_token = ""
|
|
||||||
found_event_ids = []
|
|
||||||
for _ in range(20):
|
|
||||||
from_token = ""
|
|
||||||
if prev_token:
|
|
||||||
from_token = "&from=" + self._stream_token_to_relation_token(prev_token)
|
|
||||||
|
|
||||||
channel = self.make_request(
|
|
||||||
"GET",
|
|
||||||
f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}",
|
|
||||||
access_token=self.user_token,
|
|
||||||
)
|
|
||||||
self.assertEqual(200, channel.code, channel.json_body)
|
|
||||||
|
|
||||||
found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
|
|
||||||
next_batch = channel.json_body.get("next_batch")
|
|
||||||
|
|
||||||
self.assertNotEqual(prev_token, next_batch)
|
|
||||||
prev_token = next_batch
|
|
||||||
|
|
||||||
if not prev_token:
|
|
||||||
break
|
|
||||||
|
|
||||||
# We paginated backwards, so reverse
|
|
||||||
found_event_ids.reverse()
|
|
||||||
self.assertEqual(found_event_ids, expected_event_ids)
|
|
||||||
|
|
||||||
def test_pagination_from_sync_and_messages(self) -> None:
|
def test_pagination_from_sync_and_messages(self) -> None:
|
||||||
"""Pagination tokens from /sync and /messages can be used to paginate /relations."""
|
"""Pagination tokens from /sync and /messages can be used to paginate /relations."""
|
||||||
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
|
channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A")
|
||||||
|
@ -543,39 +505,6 @@ class RelationsTestCase(BaseRelationsTestCase):
|
||||||
found_event_ids.reverse()
|
found_event_ids.reverse()
|
||||||
self.assertEqual(found_event_ids, expected_event_ids)
|
self.assertEqual(found_event_ids, expected_event_ids)
|
||||||
|
|
||||||
# Reset and try again, but convert the tokens to the legacy format.
|
|
||||||
prev_token = ""
|
|
||||||
found_event_ids = []
|
|
||||||
for _ in range(20):
|
|
||||||
from_token = ""
|
|
||||||
if prev_token:
|
|
||||||
from_token = "&from=" + self._stream_token_to_relation_token(prev_token)
|
|
||||||
|
|
||||||
channel = self.make_request(
|
|
||||||
"GET",
|
|
||||||
f"/_matrix/client/unstable/rooms/{self.room}"
|
|
||||||
f"/aggregations/{self.parent_id}/{RelationTypes.ANNOTATION}"
|
|
||||||
f"/m.reaction/{encoded_key}?limit=1{from_token}",
|
|
||||||
access_token=self.user_token,
|
|
||||||
)
|
|
||||||
self.assertEqual(200, channel.code, channel.json_body)
|
|
||||||
|
|
||||||
self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body)
|
|
||||||
|
|
||||||
found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"])
|
|
||||||
|
|
||||||
next_batch = channel.json_body.get("next_batch")
|
|
||||||
|
|
||||||
self.assertNotEqual(prev_token, next_batch)
|
|
||||||
prev_token = next_batch
|
|
||||||
|
|
||||||
if not prev_token:
|
|
||||||
break
|
|
||||||
|
|
||||||
# We paginated backwards, so reverse
|
|
||||||
found_event_ids.reverse()
|
|
||||||
self.assertEqual(found_event_ids, expected_event_ids)
|
|
||||||
|
|
||||||
def test_aggregation(self) -> None:
|
def test_aggregation(self) -> None:
|
||||||
"""Test that annotations get correctly aggregated."""
|
"""Test that annotations get correctly aggregated."""
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue