From 753b1270da1f0449bbb960b37707556abd3eaac0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 13:03:56 +0100 Subject: [PATCH 01/92] Require sig from origin server on perspectives responses --- synapse/crypto/keyring.py | 28 ++++++------ tests/crypto/test_keyring.py | 86 +++++++++++++++++++++++++++++++----- 2 files changed, 91 insertions(+), 23 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index eaf41b983c..a64ba0752a 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -394,8 +394,7 @@ class BaseV2KeyFetcher(object): POST /_matrix/key/v2/query. Checks that each signature in the response that claims to come from the origin - server is valid. (Does not check that there actually is such a signature, for - some reason.) + server is valid, and that there is at least one such signature. Stores the json in server_keys_json so that it can be used for future responses to /_matrix/key/v2/query. @@ -430,16 +429,25 @@ class BaseV2KeyFetcher(object): verify_key=verify_key, valid_until_ts=ts_valid_until_ms ) - # TODO: improve this signature checking server_name = response_json["server_name"] + verified = False for key_id in response_json["signatures"].get(server_name, {}): - if key_id not in verify_keys: + # each of the keys used for the signature must be present in the response + # json. + key = verify_keys.get(key_id) + if not key: raise KeyLookupError( - "Key response must include verification keys for all signatures" + "Key response is signed by key id %s:%s but that key is not " + "present in the response" % (server_name, key_id) ) - verify_signed_json( - response_json, server_name, verify_keys[key_id].verify_key + verify_signed_json(response_json, server_name, key.verify_key) + verified = True + + if not verified: + raise KeyLookupError( + "Key response for %s is not signed by the origin server" + % (server_name,) ) for key_id, key_data in response_json["old_verify_keys"].items(): @@ -677,12 +685,6 @@ class ServerKeyFetcher(BaseV2KeyFetcher): except HttpResponseException as e: raise_from(KeyLookupError("Remote server returned an error"), e) - if ( - u"signatures" not in response - or server_name not in response[u"signatures"] - ): - raise KeyLookupError("Key response not signed by remote server") - if response["server_name"] != server_name: raise KeyLookupError( "Expected a response for server %r not %r" diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index de61bad15d..c4c9d29499 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -55,12 +55,12 @@ class MockPerspectiveServer(object): key_id: {"key": signedjson.key.encode_verify_key_base64(verify_key)} }, } - return self.get_signed_response(res) - - def get_signed_response(self, res): - signedjson.sign.sign_json(res, self.server_name, self.key) + self.sign_response(res) return res + def sign_response(self, res): + signedjson.sign.sign_json(res, self.server_name, self.key) + class KeyringTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): @@ -238,7 +238,7 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): testkey = signedjson.key.generate_signing_key("ver1") testverifykey = signedjson.key.get_verify_key(testkey) testverifykey_id = "ed25519:ver1" - VALID_UNTIL_TS = 1000 + VALID_UNTIL_TS = 200 * 1000 # valid response response = { @@ -326,9 +326,10 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): }, } - persp_resp = { - "server_keys": [self.mock_perspective_server.get_signed_response(response)] - } + # the response must be signed by both the origin server and the perspectives + # server. + signedjson.sign.sign_json(response, SERVER_NAME, testkey) + self.mock_perspective_server.sign_response(response) def post_json(destination, path, data, **kwargs): self.assertEqual(destination, self.mock_perspective_server.server_name) @@ -337,7 +338,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): # check that the request is for the expected key q = data["server_keys"] self.assertEqual(list(q[SERVER_NAME].keys()), ["key1"]) - return persp_resp + return {"server_keys": [response]} self.http_client.post_json.side_effect = post_json @@ -365,9 +366,74 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual( bytes(res["key_json"]), - canonicaljson.encode_canonical_json(persp_resp["server_keys"][0]), + canonicaljson.encode_canonical_json(response), ) + def test_invalid_perspectives_responses(self): + """Check that invalid responses from the perspectives server are rejected""" + # arbitrarily advance the clock a bit + self.reactor.advance(100) + + SERVER_NAME = "server2" + testkey = signedjson.key.generate_signing_key("ver1") + testverifykey = signedjson.key.get_verify_key(testkey) + testverifykey_id = "ed25519:ver1" + VALID_UNTIL_TS = 200 * 1000 + + def build_response(): + # valid response + response = { + "server_name": SERVER_NAME, + "old_verify_keys": {}, + "valid_until_ts": VALID_UNTIL_TS, + "verify_keys": { + testverifykey_id: { + "key": signedjson.key.encode_verify_key_base64(testverifykey) + } + }, + } + + # the response must be signed by both the origin server and the perspectives + # server. + signedjson.sign.sign_json(response, SERVER_NAME, testkey) + self.mock_perspective_server.sign_response(response) + return response + + def get_key_from_perspectives(response): + fetcher = PerspectivesKeyFetcher(self.hs) + server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + + def post_json(destination, path, data, **kwargs): + self.assertEqual(destination, self.mock_perspective_server.server_name) + self.assertEqual(path, "/_matrix/key/v2/query") + return {"server_keys": [response]} + + self.http_client.post_json.side_effect = post_json + + return self.get_success( + fetcher.get_keys(server_name_and_key_ids) + ) + + # start with a valid response so we can check we are testing the right thing + response = build_response() + keys = get_key_from_perspectives(response) + k = keys[SERVER_NAME][testverifykey_id] + self.assertEqual(k.verify_key, testverifykey) + + # remove the perspectives server's signature + response = build_response() + del response["signatures"][self.mock_perspective_server.server_name] + self.http_client.post_json.return_value = {"server_keys": [response]} + keys = get_key_from_perspectives(response) + self.assertEqual(keys, {}, "Expected empty dict with missing persp server sig") + + # remove the origin server's signature + response = build_response() + del response["signatures"][SERVER_NAME] + self.http_client.post_json.return_value = {"server_keys": [response]} + keys = get_key_from_perspectives(response) + self.assertEqual(keys, {}, "Expected empty dict with missing origin server sig") + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): From b825d1c80046b37e32951ef034a05002df76a287 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 23 May 2019 17:31:26 +0100 Subject: [PATCH 02/92] Improve error handling/logging for perspectives-key fetching. In particular, don't give up on the first failure. --- synapse/crypto/keyring.py | 105 ++++++++++++++++++++++++++++---------- 1 file changed, 77 insertions(+), 28 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index a64ba0752a..65af2fb671 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -17,6 +17,7 @@ import logging from collections import namedtuple +import six from six import raise_from from six.moves import urllib @@ -349,6 +350,7 @@ class KeyFetcher(object): Args: server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): list of (server_name, iterable[key_id]) tuples to fetch keys for + Note that the iterables may be iterated more than once. Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: @@ -557,7 +559,16 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map from server_name -> key_id -> FetchKeyResult + + Raises: + KeyLookupError if there was an error processing the entire response from + the server """ + logger.info( + "Requesting keys %s from notary server %s", + server_names_and_key_ids, + perspective_name, + ) # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. @@ -586,40 +597,31 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): time_now_ms = self.clock.time_msec() for response in query_response["server_keys"]: - if ( - u"signatures" not in response - or perspective_name not in response[u"signatures"] - ): + # do this first, so that we can give useful errors thereafter + server_name = response.get("server_name") + if not isinstance(server_name, six.string_types): raise KeyLookupError( - "Key response not signed by perspective server" - " %r" % (perspective_name,) + "Malformed response from key notary server %s: invalid server_name" + % (perspective_name,) ) - verified = False - for key_id in response[u"signatures"][perspective_name]: - if key_id in perspective_keys: - verify_signed_json( - response, perspective_name, perspective_keys[key_id] - ) - verified = True - - if not verified: - logging.info( - "Response from perspective server %r not signed with a" - " known key, signed with: %r, known keys: %r", + try: + processed_response = yield self._process_perspectives_response( perspective_name, - list(response[u"signatures"][perspective_name]), - list(perspective_keys), + perspective_keys, + response, + time_added_ms=time_now_ms, ) - raise KeyLookupError( - "Response not signed with a known key for perspective" - " server %r" % (perspective_name,) + except KeyLookupError as e: + logger.warning( + "Error processing response from key notary server %s for origin " + "server %s: %s", + perspective_name, + server_name, + e, ) - - processed_response = yield self.process_v2_response( - perspective_name, response, time_added_ms=time_now_ms - ) - server_name = response["server_name"] + # we continue to process the rest of the response + continue added_keys.extend( (server_name, key_id, key) for key_id, key in processed_response.items() @@ -632,6 +634,53 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): defer.returnValue(keys) + def _process_perspectives_response( + self, perspective_name, perspective_keys, response, time_added_ms + ): + """Parse a 'Server Keys' structure from the result of a /key/query request + + Checks that the entry is correctly signed by the perspectives server, and then + passes over to process_v2_response + + Args: + perspective_name (str): the name of the notary server that produced this + result + + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the + notary server + + response (dict): the json-decoded Server Keys response object + + time_added_ms (int): the timestamp to record in server_keys_json + + Returns: + Deferred[dict[str, FetchKeyResult]]: map from key_id to result object + """ + if ( + u"signatures" not in response + or perspective_name not in response[u"signatures"] + ): + raise KeyLookupError("Response not signed by the notary server") + + verified = False + for key_id in response[u"signatures"][perspective_name]: + if key_id in perspective_keys: + verify_signed_json(response, perspective_name, perspective_keys[key_id]) + verified = True + + if not verified: + raise KeyLookupError( + "Response not signed with a known key: signed with: %r, known keys: %r" + % ( + list(response[u"signatures"][perspective_name].keys()), + list(perspective_keys.keys()), + ) + ) + + return self.process_v2_response( + perspective_name, response, time_added_ms=time_added_ms + ) + class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" From cbcfd642a0dc375ea6f006c1633f82d16b3ac002 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 24 May 2019 15:47:30 +0100 Subject: [PATCH 03/92] changelog --- changelog.d/5251.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5251.bugfix diff --git a/changelog.d/5251.bugfix b/changelog.d/5251.bugfix new file mode 100644 index 0000000000..9a053204b6 --- /dev/null +++ b/changelog.d/5251.bugfix @@ -0,0 +1 @@ +Ensure that server_keys fetched via a notary server are correctly signed. \ No newline at end of file From fa1b293da2e0a5e47864ccb49e530d8a81d81790 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 24 May 2019 22:17:18 +0100 Subject: [PATCH 04/92] Simplification to Keyring.wait_for_previous_lookups. (#5250) The list of server names was redundant, since it was equivalent to the keys on the server_to_deferred map. This reduces the number of large lists being passed around, and has the benefit of deduplicating the entries in `wait_on`. --- changelog.d/5250.misc | 1 + synapse/crypto/keyring.py | 11 ++++------- tests/crypto/test_keyring.py | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5250.misc diff --git a/changelog.d/5250.misc b/changelog.d/5250.misc new file mode 100644 index 0000000000..575a299a82 --- /dev/null +++ b/changelog.d/5250.misc @@ -0,0 +1 @@ +Simplification to Keyring.wait_for_previous_lookups. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index eaf41b983c..d6ad7f1772 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -180,9 +180,7 @@ class Keyring(object): # We want to wait for any previous lookups to complete before # proceeding. - yield self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], server_to_deferred - ) + yield self.wait_for_previous_lookups(server_to_deferred) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) @@ -215,12 +213,11 @@ class Keyring(object): logger.exception("Error starting key lookups") @defer.inlineCallbacks - def wait_for_previous_lookups(self, server_names, server_to_deferred): + def wait_for_previous_lookups(self, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: - server_names (list): list of server_names we want to lookup - server_to_deferred (dict): server_name to deferred which gets + server_to_deferred (dict[str, Deferred]): server_name to deferred which gets resolved once we've finished looking up keys for that server. The Deferreds should be regular twisted ones which call their callbacks with no logcontext. @@ -233,7 +230,7 @@ class Keyring(object): while True: wait_on = [ (server_name, self.key_downloads[server_name]) - for server_name in server_names + for server_name in server_to_deferred.keys() if server_name in self.key_downloads ] if not wait_on: diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index de61bad15d..4fba462d44 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -85,7 +85,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): # we run the lookup in a logcontext so that the patched inlineCallbacks can check # it is doing the right thing with logcontexts. wait_1_deferred = run_in_context( - kr.wait_for_previous_lookups, ["server1"], {"server1": lookup_1_deferred} + kr.wait_for_previous_lookups, {"server1": lookup_1_deferred} ) # there were no previous lookups, so the deferred should be ready @@ -94,7 +94,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): # set off another wait. It should block because the first lookup # hasn't yet completed. wait_2_deferred = run_in_context( - kr.wait_for_previous_lookups, ["server1"], {"server1": lookup_2_deferred} + kr.wait_for_previous_lookups, {"server1": lookup_2_deferred} ) self.assertFalse(wait_2_deferred.called) From 56f07d980a9d3b3b8e2cc196e5d630abd98be122 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 16:32:21 -0500 Subject: [PATCH 05/92] Show correct error when logging out and access token is missing Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 430c692336..317f52cb56 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -40,11 +40,11 @@ class LogoutRestServlet(ClientV1RestServlet): def on_POST(self, request): try: requester = yield self.auth.get_user_by_req(request) - except AuthError: + except AuthError as e: # this implies the access token has already been deleted. - defer.returnValue((401, { - "errcode": "M_UNKNOWN_TOKEN", - "error": "Access Token unknown or expired" + defer.returnValue((e.code, { + "errcode": e.errcode, + "error": e.msg })) else: if requester.device_id is None: From 2d4853039f37a16cc898c7dba1a4a34f67ca1062 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 17:13:10 -0500 Subject: [PATCH 06/92] Fix error code for invalid parameter Signed-off-by: Aaron Raimist --- synapse/http/servlet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 528125e737..197c652850 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -55,7 +55,7 @@ def parse_integer_from_args(args, name, default=None, required=False): return int(args[name][0]) except Exception: message = "Query parameter %r must be an integer" % (name,) - raise SynapseError(400, message) + raise SynapseError(400, message, errcode=Codes.INVALID_PARAM) else: if required: message = "Missing integer query parameter %r" % (name,) From 6dac0e738c70ee67abb9b03cafbb07749b84309c Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 17:15:24 -0500 Subject: [PATCH 07/92] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5257.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5257.bugfix diff --git a/changelog.d/5257.bugfix b/changelog.d/5257.bugfix new file mode 100644 index 0000000000..8334af9b99 --- /dev/null +++ b/changelog.d/5257.bugfix @@ -0,0 +1 @@ +Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms From 0b4f4cb0b4bff42dd0f638d1d2891f35feaff1be Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 16:35:48 -0500 Subject: [PATCH 08/92] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5256.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5256.bugfix diff --git a/changelog.d/5256.bugfix b/changelog.d/5256.bugfix new file mode 100644 index 0000000000..86316ab5dd --- /dev/null +++ b/changelog.d/5256.bugfix @@ -0,0 +1 @@ +Show the correct error when logging out and access token is missing. From bc4b2ecf70bc3965cbbf1daee52bf7577e219d7b Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 25 May 2019 12:02:48 -0600 Subject: [PATCH 09/92] Fix logging for room stats background update --- synapse/storage/stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 71b80a891d..eb0ced5b5e 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -169,7 +169,7 @@ class StatsStore(StateDeltasStore): logger.info( "Processing the next %d rooms of %d remaining", - (len(rooms_to_work_on), progress["remaining"]), + len(rooms_to_work_on), progress["remaining"], ) # Number of state events we've processed by going through each room From 4ccdbfcdb133ed10cd53f5a1b7f77b00c1ecdf97 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 25 May 2019 12:21:21 -0600 Subject: [PATCH 10/92] Changelog --- changelog.d/5260.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5260.feature diff --git a/changelog.d/5260.feature b/changelog.d/5260.feature new file mode 100644 index 0000000000..01285e965c --- /dev/null +++ b/changelog.d/5260.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From 119c9c10b026b65affab5d5bcb6ae1d3d7e7229c Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 27 May 2019 00:13:48 -0500 Subject: [PATCH 11/92] Get rid of try except Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 317f52cb56..2cf373e83c 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -38,23 +38,16 @@ class LogoutRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request): - try: - requester = yield self.auth.get_user_by_req(request) - except AuthError as e: - # this implies the access token has already been deleted. - defer.returnValue((e.code, { - "errcode": e.errcode, - "error": e.msg - })) + requester = yield self.auth.get_user_by_req(request) + + if requester.device_id is None: + # the acccess token wasn't associated with a device. + # Just delete the access token + access_token = self._auth.get_access_token_from_request(request) + yield self._auth_handler.delete_access_token(access_token) else: - if requester.device_id is None: - # the acccess token wasn't associated with a device. - # Just delete the access token - access_token = self._auth.get_access_token_from_request(request) - yield self._auth_handler.delete_access_token(access_token) - else: - yield self._device_handler.delete_device( - requester.user.to_string(), requester.device_id) + yield self._device_handler.delete_device( + requester.user.to_string(), requester.device_id) defer.returnValue((200, {})) From ba17de7fbc29700163b23363ae0e03f8a01ef274 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 10:11:38 +0100 Subject: [PATCH 12/92] Fix schema update for account validity --- ...{account_validity.sql => account_validity_with_renewal.sql} | 3 +++ 1 file changed, 3 insertions(+) rename synapse/storage/schema/delta/54/{account_validity.sql => account_validity_with_renewal.sql} (83%) diff --git a/synapse/storage/schema/delta/54/account_validity.sql b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql similarity index 83% rename from synapse/storage/schema/delta/54/account_validity.sql rename to synapse/storage/schema/delta/54/account_validity_with_renewal.sql index 2357626000..0adb2ad55e 100644 --- a/synapse/storage/schema/delta/54/account_validity.sql +++ b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql @@ -13,6 +13,9 @@ * limitations under the License. */ +-- We previously changed the schema for this table without renaming the file, which means +-- that some databases might still be using the old schema. This ensures Synapse uses the +-- right schema for the table. DROP TABLE IF EXISTS account_validity; -- Track what users are in public rooms. From ddd30f44a09a775d290c2d41b8db4d15b967dd43 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 10:14:21 +0100 Subject: [PATCH 13/92] Changelog --- changelog.d/5268.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5268.bugfix diff --git a/changelog.d/5268.bugfix b/changelog.d/5268.bugfix new file mode 100644 index 0000000000..1a5a03bf0a --- /dev/null +++ b/changelog.d/5268.bugfix @@ -0,0 +1 @@ +Fix schema update for account validity. From 52839886d664576831462e033b88e5aba4c019e3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 16:47:42 +0100 Subject: [PATCH 14/92] Allow configuring a range for the account validity startup job When enabling the account validity feature, Synapse will look at startup for registered account without an expiration date, and will set one equals to 'now + validity_period' for them. On large servers, it can mean that a large number of users will have the same expiration date, which means that they will all be sent a renewal email at the same time, which isn't ideal. In order to mitigate this, this PR allows server admins to define a 'max_delta' so that the expiration date is a random value in the [now + validity_period ; now + validity_period + max_delta] range. This allows renewal emails to be progressively sent over a configured period instead of being sent all in one big batch. --- synapse/config/registration.py | 11 ++++++++++ synapse/storage/_base.py | 23 +++++++++++++++++++-- tests/rest/client/v2_alpha/test_register.py | 21 +++++++++++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 693288f938..b4fd4af368 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -39,6 +39,10 @@ class AccountValidityConfig(Config): else: self.renew_email_subject = "Renew your %(app)s account" + self.startup_job_max_delta = self.parse_duration( + config.get("startup_job_max_delta", 0), + ) + if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: raise ConfigError("Can't send renewal emails without 'public_baseurl'") @@ -131,11 +135,18 @@ class RegistrationConfig(Config): # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. # + # If set, the ``startup_job_max_delta`` optional setting will make the startup job + # described above set a random expiration date between t + period and + # t + period + startup_job_max_delta, t being the date and time at which the job + # sets the expiration date for a given user. This is useful for server admins that + # want to avoid Synapse sending a lot of renewal emails at once. + # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" + # startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839ceca..40802fd3dc 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -16,6 +16,7 @@ # limitations under the License. import itertools import logging +import random import sys import threading import time @@ -247,6 +248,8 @@ class SQLBaseStore(object): self._check_safe_to_upsert, ) + self.rand = random.SystemRandom() + if self._account_validity.enabled: self._clock.call_later( 0.0, @@ -308,21 +311,37 @@ class SQLBaseStore(object): res = self.cursor_to_dict(txn) if res: for user in res: - self.set_expiration_date_for_user_txn(txn, user["name"]) + self.set_expiration_date_for_user_txn( + txn, + user["name"], + use_delta=True, + ) yield self.runInteraction( "get_users_with_no_expiration_date", select_users_with_no_expiration_date_txn, ) - def set_expiration_date_for_user_txn(self, txn, user_id): + def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): """Sets an expiration date to the account with the given user ID. Args: user_id (str): User ID to set an expiration date for. + use_delta (bool): If set to False, the expiration date for the user will be + now + validity period. If set to True, this expiration date will be a + random value in the [now + period; now + period + max_delta] range, + max_delta being the configured value for the size of the range, unless + delta is 0, in which case it sets it to now + period. """ now_ms = self._clock.time_msec() expiration_ts = now_ms + self._account_validity.period + + if use_delta and self._account_validity.startup_job_max_delta: + expiration_ts = self.rand.randrange( + expiration_ts, + expiration_ts + self._account_validity.startup_job_max_delta, + ) + self._simple_insert_txn( txn, "account_validity", diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index d4a1d4d50c..7603440fd8 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -436,6 +436,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.validity_period = 10 + self.max_delta = 10 config = self.default_config() @@ -459,8 +460,28 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): """ user_id = self.register_user("kermit", "user") + self.hs.config.account_validity.startup_job_max_delta = 0 + now_ms = self.hs.clock.time_msec() self.get_success(self.store._set_expiration_date_when_missing()) res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) self.assertEqual(res, now_ms + self.validity_period) + + def test_background_job_with_max_delta(self): + """ + Tests the same thing as test_background_job, except that it sets the + startup_job_max_delta parameter and checks that the expiration date is within the + allowed range. + """ + user_id = self.register_user("kermit_delta", "user") + + self.hs.config.account_validity.startup_job_max_delta = self.max_delta + + now_ms = self.hs.clock.time_msec() + self.get_success(self.store._set_expiration_date_when_missing()) + + res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + + self.assertLessEqual(res, now_ms + self.validity_period + self.delta) + self.assertGreaterEqual(res, now_ms + self.validity_period) From 4aba561c65c842e640861035e3937e78ab950a21 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 16:55:10 +0100 Subject: [PATCH 15/92] Config and changelog --- changelog.d/5276.feature | 1 + docs/sample_config.yaml | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/5276.feature diff --git a/changelog.d/5276.feature b/changelog.d/5276.feature new file mode 100644 index 0000000000..403dee0862 --- /dev/null +++ b/changelog.d/5276.feature @@ -0,0 +1 @@ +Allow configuring a range for the account validity startup job. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f658ec8ecd..8ff53d5cb4 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -755,11 +755,18 @@ uploads_path: "DATADIR/uploads" # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. # +# If set, the ``startup_job_max_delta`` optional setting will make the startup job +# described above set a random expiration date between t + period and +# t + period + startup_job_max_delta, t being the date and time at which the job +# sets the expiration date for a given user. This is useful for server admins that +# want to avoid Synapse sending a lot of renewal emails at once. +# #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" +# startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # From 7e1c7cc2742f5eb9d6d37205a0c457b8a7bd015f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 17:13:26 +0100 Subject: [PATCH 16/92] Typo --- tests/rest/client/v2_alpha/test_register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 7603440fd8..68654e25ab 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -483,5 +483,5 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertLessEqual(res, now_ms + self.validity_period + self.delta) + self.assertLessEqual(res, now_ms + self.validity_period + self.max_delta) self.assertGreaterEqual(res, now_ms + self.validity_period) From 5726378eced1d032552318cb5fd603da8f364db2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 28 May 2019 21:20:11 +0100 Subject: [PATCH 17/92] Fix "db txn 'update_presence' from sentinel context" log messages (#5275) Fixes #4414. --- changelog.d/5275.bugfix | 1 + synapse/handlers/presence.py | 97 +++++++++++++++++------------------- 2 files changed, 47 insertions(+), 51 deletions(-) create mode 100644 changelog.d/5275.bugfix diff --git a/changelog.d/5275.bugfix b/changelog.d/5275.bugfix new file mode 100644 index 0000000000..45a554642a --- /dev/null +++ b/changelog.d/5275.bugfix @@ -0,0 +1 @@ +Fix "db txn 'update_presence' from sentinel context" log messages. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 59d53f1050..6209858bbb 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -182,17 +182,27 @@ class PresenceHandler(object): # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. + def run_timeout_handler(): + return run_as_background_process( + "handle_presence_timeouts", self._handle_timeouts + ) + self.clock.call_later( 30, self.clock.looping_call, - self._handle_timeouts, + run_timeout_handler, 5000, ) + def run_persister(): + return run_as_background_process( + "persist_presence_changes", self._persist_unpersisted_changes + ) + self.clock.call_later( 60, self.clock.looping_call, - self._persist_unpersisted_changes, + run_persister, 60 * 1000, ) @@ -229,6 +239,7 @@ class PresenceHandler(object): ) if self.unpersisted_users_changes: + yield self.store.update_presence([ self.user_to_current_state[user_id] for user_id in self.unpersisted_users_changes @@ -240,30 +251,18 @@ class PresenceHandler(object): """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. """ - logger.info( - "Performing _persist_unpersisted_changes. Persisting %d unpersisted changes", - len(self.unpersisted_users_changes) - ) - unpersisted = self.unpersisted_users_changes self.unpersisted_users_changes = set() if unpersisted: + logger.info( + "Persisting %d upersisted presence updates", len(unpersisted) + ) yield self.store.update_presence([ self.user_to_current_state[user_id] for user_id in unpersisted ]) - logger.info("Finished _persist_unpersisted_changes") - - @defer.inlineCallbacks - def _update_states_and_catch_exception(self, new_states): - try: - res = yield self._update_states(new_states) - defer.returnValue(res) - except Exception: - logger.exception("Error updating presence") - @defer.inlineCallbacks def _update_states(self, new_states): """Updates presence of users. Sets the appropriate timeouts. Pokes @@ -338,45 +337,41 @@ class PresenceHandler(object): logger.info("Handling presence timeouts") now = self.clock.time_msec() - try: - with Measure(self.clock, "presence_handle_timeouts"): - # Fetch the list of users that *may* have timed out. Things may have - # changed since the timeout was set, so we won't necessarily have to - # take any action. - users_to_check = set(self.wheel_timer.fetch(now)) + # Fetch the list of users that *may* have timed out. Things may have + # changed since the timeout was set, so we won't necessarily have to + # take any action. + users_to_check = set(self.wheel_timer.fetch(now)) - # Check whether the lists of syncing processes from an external - # process have expired. - expired_process_ids = [ - process_id for process_id, last_update - in self.external_process_last_updated_ms.items() - if now - last_update > EXTERNAL_PROCESS_EXPIRY - ] - for process_id in expired_process_ids: - users_to_check.update( - self.external_process_last_updated_ms.pop(process_id, ()) - ) - self.external_process_last_update.pop(process_id) + # Check whether the lists of syncing processes from an external + # process have expired. + expired_process_ids = [ + process_id for process_id, last_update + in self.external_process_last_updated_ms.items() + if now - last_update > EXTERNAL_PROCESS_EXPIRY + ] + for process_id in expired_process_ids: + users_to_check.update( + self.external_process_last_updated_ms.pop(process_id, ()) + ) + self.external_process_last_update.pop(process_id) - states = [ - self.user_to_current_state.get( - user_id, UserPresenceState.default(user_id) - ) - for user_id in users_to_check - ] + states = [ + self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) + ) + for user_id in users_to_check + ] - timers_fired_counter.inc(len(states)) + timers_fired_counter.inc(len(states)) - changes = handle_timeouts( - states, - is_mine_fn=self.is_mine_id, - syncing_user_ids=self.get_currently_syncing_users(), - now=now, - ) + changes = handle_timeouts( + states, + is_mine_fn=self.is_mine_id, + syncing_user_ids=self.get_currently_syncing_users(), + now=now, + ) - run_in_background(self._update_states_and_catch_exception, changes) - except Exception: - logger.exception("Exception in _handle_timeouts loop") + return self._update_states(changes) @defer.inlineCallbacks def bump_presence_active_time(self, user): From 9b6f72663e2eb8c2caf834da511c2617d8061e58 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 20:53:56 -0500 Subject: [PATCH 18/92] Fix docs on resetting the user directory (#5036) Signed-off-by: Aaron Raimist --- docs/user_directory.md | 10 +++------- synapse/config/user_directory.py | 6 +++--- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/docs/user_directory.md b/docs/user_directory.md index 4c8ee44f37..e64aa453cc 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -7,11 +7,7 @@ who are present in a publicly viewable room present on the server. The directory info is stored in various tables, which can (typically after DB corruption) get stale or out of sync. If this happens, for now the -quickest solution to fix it is: - -``` -UPDATE user_directory_stream_pos SET stream_id = NULL; -``` - -and restart the synapse, which should then start a background task to +solution to fix it is to execute the SQL here +https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql +and then restart synapse. This should then start a background task to flush the current tables and regenerate the directory. diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index 142754a7dc..023997ccde 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -43,9 +43,9 @@ class UserDirectoryConfig(Config): # # 'search_all_users' defines whether to search all users visible to your HS # when searching the user directory, rather than limiting to users visible - # in public rooms. Defaults to false. If you set it True, you'll have to run - # UPDATE user_directory_stream_pos SET stream_id = NULL; - # on your database to tell it to rebuild the user_directory search indexes. + # in public rooms. Defaults to false. If you set it True, you'll have to + # rebuild the user_directory search indexes, see + # https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md # #user_directory: # enabled: true From 878b00c39531d5200a3efab356766e4e2670e589 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 20:58:18 -0500 Subject: [PATCH 19/92] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5282.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5282.misc diff --git a/changelog.d/5282.misc b/changelog.d/5282.misc new file mode 100644 index 0000000000..350e15bc03 --- /dev/null +++ b/changelog.d/5282.misc @@ -0,0 +1 @@ +Fix docs on resetting the user directory. From f795595e956c4584ae280a59ca122057894e0c54 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 22:04:24 -0500 Subject: [PATCH 20/92] Specify the type of reCAPTCHA key to use (#5013) Signed-off-by: Aaron Raimist --- docs/CAPTCHA_SETUP.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/CAPTCHA_SETUP.rst b/docs/CAPTCHA_SETUP.rst index 19a204d9ce..0c22ee4ff6 100644 --- a/docs/CAPTCHA_SETUP.rst +++ b/docs/CAPTCHA_SETUP.rst @@ -7,6 +7,7 @@ Requires a public/private key pair from: https://developers.google.com/recaptcha/ +Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option Setting ReCaptcha Keys ---------------------- From 2ec28094606383a4e00fa6665b27a64bc00fd9fd Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 22:05:51 -0500 Subject: [PATCH 21/92] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5283.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5283.misc diff --git a/changelog.d/5283.misc b/changelog.d/5283.misc new file mode 100644 index 0000000000..002721e566 --- /dev/null +++ b/changelog.d/5283.misc @@ -0,0 +1 @@ +Specify the type of reCAPTCHA key to use. From ecaa299cabe099449a1a05aef4ba3708c9d231cf Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 29 May 2019 16:32:30 +1000 Subject: [PATCH 22/92] Rename 5282.misc to 5282.doc --- changelog.d/{5282.misc => 5282.doc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{5282.misc => 5282.doc} (100%) diff --git a/changelog.d/5282.misc b/changelog.d/5282.doc similarity index 100% rename from changelog.d/5282.misc rename to changelog.d/5282.doc From 0729ef01f80b8d6f2fcf1ab40a22587347b2c777 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 29 May 2019 16:41:25 +1000 Subject: [PATCH 23/92] regenerate sample config --- docs/sample_config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 421ae96f04..edfde05a23 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1103,9 +1103,9 @@ password_config: # # 'search_all_users' defines whether to search all users visible to your HS # when searching the user directory, rather than limiting to users visible -# in public rooms. Defaults to false. If you set it True, you'll have to run -# UPDATE user_directory_stream_pos SET stream_id = NULL; -# on your database to tell it to rebuild the user_directory search indexes. +# in public rooms. Defaults to false. If you set it True, you'll have to +# rebuild the user_directory search indexes, see +# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md # #user_directory: # enabled: true From f76d407ef3d2f6c18a568eff965e12e794105a7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 09:17:33 +0100 Subject: [PATCH 24/92] Fix dropped logcontexts during high outbound traffic. (#5277) Fixes #5271. --- changelog.d/5277.bugfix | 1 + synapse/app/_base.py | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 changelog.d/5277.bugfix diff --git a/changelog.d/5277.bugfix b/changelog.d/5277.bugfix new file mode 100644 index 0000000000..371aa2e7fb --- /dev/null +++ b/changelog.d/5277.bugfix @@ -0,0 +1 @@ +Fix dropped logcontexts during high outbound traffic. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 08199a5e8d..8cc990399f 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -344,15 +344,21 @@ class _LimitedHostnameResolver(object): def resolveHostName(self, resolutionReceiver, hostName, portNumber=0, addressTypes=None, transportSemantics='TCP'): - # Note this is happening deep within the reactor, so we don't need to - # worry about log contexts. - # We need this function to return `resolutionReceiver` so we do all the # actual logic involving deferreds in a separate function. - self._resolve( - resolutionReceiver, hostName, portNumber, - addressTypes, transportSemantics, - ) + + # even though this is happening within the depths of twisted, we need to drop + # our logcontext before starting _resolve, otherwise: (a) _resolve will drop + # the logcontext if it returns an incomplete deferred; (b) _resolve will + # call the resolutionReceiver *with* a logcontext, which it won't be expecting. + with PreserveLoggingContext(): + self._resolve( + resolutionReceiver, + hostName, + portNumber, + addressTypes, + transportSemantics, + ) return resolutionReceiver From 58c8ed5b0dbfe0556f11985a61c0e13bbe61d93c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:56:24 +0100 Subject: [PATCH 25/92] Correctly filter out extremities with soft failed prevs (#5274) When we receive a soft failed event we, correctly, *do not* update the forward extremity table with the event. However, if we later receive an event that references the soft failed event we then need to remove the soft failed events prev events from the forward extremities table, otherwise we just build up forward extremities. Fixes #5269 --- changelog.d/5274.bugfix | 1 + synapse/storage/events.py | 82 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5274.bugfix diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5274.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2ffc27ff41..6e9f3d1dc0 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -554,10 +554,18 @@ class EventsStore( e_id for event in new_events for e_id in event.prev_event_ids() ) - # Finally, remove any events which are prev_events of any existing events. + # Remove any events which are prev_events of any existing events. existing_prevs = yield self._get_events_which_are_prevs(result) result.difference_update(existing_prevs) + # Finally handle the case where the new events have soft-failed prev + # events. If they do we need to remove them and their prev events, + # otherwise we end up with dangling extremities. + existing_prevs = yield self._get_prevs_before_rejected( + e_id for event in new_events for e_id in event.prev_event_ids() + ) + result.difference_update(existing_prevs) + defer.returnValue(result) @defer.inlineCallbacks @@ -573,7 +581,7 @@ class EventsStore( """ results = [] - def _get_events(txn, batch): + def _get_events_which_are_prevs_txn(txn, batch): sql = """ SELECT prev_event_id, internal_metadata FROM event_edges @@ -596,10 +604,78 @@ class EventsStore( ) for chunk in batch_iter(event_ids, 100): - yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events_which_are_prevs_txn, + chunk, + ) defer.returnValue(results) + @defer.inlineCallbacks + def _get_prevs_before_rejected(self, event_ids): + """Get soft-failed ancestors to remove from the extremities. + + Given a set of events, find all those that have been soft-failed or + rejected. Returns those soft failed/rejected events and their prev + events (whether soft-failed/rejected or not), and recurses up the + prev-event graph until it finds no more soft-failed/rejected events. + + This is used to find extremities that are ancestors of new events, but + are separated by soft failed events. + + Args: + event_ids (Iterable[str]): Events to find prev events for. Note + that these must have already been persisted. + + Returns: + Deferred[set[str]] + """ + + # The set of event_ids to return. This includes all soft-failed events + # and their prev events. + existing_prevs = set() + + def _get_prevs_before_rejected_txn(txn, batch): + to_recursively_check = batch + + while to_recursively_check: + sql = """ + SELECT + event_id, prev_event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + LEFT JOIN event_json USING (event_id) + WHERE + event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_recursively_check), + ) + + txn.execute(sql, to_recursively_check) + to_recursively_check = [] + + for event_id, prev_event_id, metadata, rejected in txn: + if prev_event_id in existing_prevs: + continue + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + to_recursively_check.append(prev_event_id) + existing_prevs.add(prev_event_id) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_prevs_before_rejected", + _get_prevs_before_rejected_txn, + chunk, + ) + + defer.returnValue(existing_prevs) + @defer.inlineCallbacks def _get_new_state_after_events( self, room_id, events_context, old_latest_event_ids, new_latest_event_ids From 30858ff4617517916fc8973b16c6be6e13288bd0 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 29 May 2019 08:27:41 -0500 Subject: [PATCH 26/92] Fix error when downloading thumbnail with width/height param missing (#5258) Fix error when downloading thumbnail with width/height param missing Fixes #2748 Signed-off-by: Aaron Raimist --- changelog.d/5258.bugfix | 1 + synapse/rest/media/v1/thumbnail_resource.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5258.bugfix diff --git a/changelog.d/5258.bugfix b/changelog.d/5258.bugfix new file mode 100644 index 0000000000..fb5d44aedb --- /dev/null +++ b/changelog.d/5258.bugfix @@ -0,0 +1 @@ +Fix error when downloading thumbnail with missing width/height parameter. diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 5305e9175f..35a750923b 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -56,8 +56,8 @@ class ThumbnailResource(Resource): def _async_render_GET(self, request): set_cors_headers(request) server_name, media_id, _ = parse_media_id(request) - width = parse_integer(request, "width") - height = parse_integer(request, "height") + width = parse_integer(request, "width", required=True) + height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") m_type = parse_string(request, "type", "image/png") From d79c9994f416ee5dab27a277fa729ffa5ee74ccc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:52:41 +0100 Subject: [PATCH 27/92] Add DB bg update to cleanup extremities. Due to #5269 we may have extremities in our DB that we shouldn't have, so lets add a cleanup task such to remove those. --- synapse/storage/events.py | 186 ++++++++++++++++++ .../delta/54/delete_forward_extremities.sql | 19 ++ 2 files changed, 205 insertions(+) create mode 100644 synapse/storage/schema/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6e9f3d1dc0..a9be143bd5 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -221,6 +221,7 @@ class EventsStore( ): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) @@ -252,6 +253,11 @@ class EventsStore( psql_only=True, ) + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() @@ -2341,6 +2347,186 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their prev events, if any, and their prev events rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph.setdefault(event_id, set()).add(prev_event_id) + logger.info("Already handled") + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + logger.info("Deleting up to %d forward extremities", len(to_delete)) + + self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + if to_delete: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + for row in rows: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (row["room_id"],) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) + AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql new file mode 100644 index 0000000000..7056bd1d00 --- /dev/null +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('delete_soft_failed_extremities', '{}'); + +CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 7e8e683754cdc606a1440832d9b1eb47f930ddee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:58:32 +0100 Subject: [PATCH 28/92] Log actual number of entries deleted --- synapse/storage/_base.py | 12 +++++++++--- synapse/storage/events.py | 6 ++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839ceca..3fe827cd43 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1261,7 +1261,8 @@ class SQLBaseStore(object): " AND ".join("%s = ?" % (k,) for k in keyvalues), ) - return txn.execute(sql, list(keyvalues.values())) + txn.execute(sql, list(keyvalues.values())) + return txn.rowcount def _simple_delete_many(self, table, column, iterable, keyvalues, desc): return self.runInteraction( @@ -1280,9 +1281,12 @@ class SQLBaseStore(object): column : column name to test for inclusion against `iterable` iterable : list keyvalues : dict of column names and values to select the rows with + + Returns: + int: Number rows deleted """ if not iterable: - return + return 0 sql = "DELETE FROM %s" % table @@ -1297,7 +1301,9 @@ class SQLBaseStore(object): if clauses: sql = "%s WHERE %s" % (sql, " AND ".join(clauses)) - return txn.execute(sql, values) + txn.execute(sql, values) + + return txn.rowcount def _get_cache_dict( self, db_conn, table, entity_column, stream_column, max_value, limit=100000 diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9be143bd5..a9664928ca 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2476,7 +2476,7 @@ class EventsStore( logger.info("Deleting up to %d forward extremities", len(to_delete)) - self._simple_delete_many_txn( + deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", column="event_id", @@ -2484,7 +2484,9 @@ class EventsStore( keyvalues={}, ) - if to_delete: + logger.info("Deleted %d forward extremities", deleted) + + if deleted: # We now need to invalidate the caches of these rooms rows = self._simple_select_many_txn( txn, From 532b825ed9dff2faec1360fa3ee3734e0d782bd3 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 30 May 2019 00:55:18 +1000 Subject: [PATCH 29/92] Serve CAS login over r0 (#5286) --- changelog.d/5286.feature | 1 + synapse/rest/client/v1/login.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5286.feature diff --git a/changelog.d/5286.feature b/changelog.d/5286.feature new file mode 100644 index 0000000000..81860279a3 --- /dev/null +++ b/changelog.d/5286.feature @@ -0,0 +1 @@ +CAS login will now hit the r0 API, not the deprecated v1 one. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 5180e9eaf1..029039c162 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -386,7 +386,7 @@ class CasRedirectServlet(RestServlet): b"redirectUrl": args[b"redirectUrl"][0] }).encode('ascii') hs_redirect_url = (self.cas_service_url + - b"/_matrix/client/api/v1/login/cas/ticket") + b"/_matrix/client/r0/login/cas/ticket") service_param = urllib.parse.urlencode({ b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param) }).encode('ascii') @@ -395,7 +395,7 @@ class CasRedirectServlet(RestServlet): class CasTicketServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login/cas/ticket", releases=()) + PATTERNS = client_path_patterns("/login/cas/ticket") def __init__(self, hs): super(CasTicketServlet, self).__init__(hs) From d7add713a8351024aec9a51c1744f78ac39f552e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 14:19:11 +0100 Subject: [PATCH 30/92] Add test --- tests/storage/test_cleanup_extrems.py | 248 ++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 tests/storage/test_cleanup_extrems.py diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py new file mode 100644 index 0000000000..6dda66ecd3 --- /dev/null +++ b/tests/storage/test_cleanup_extrems.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path + +from synapse.api.constants import EventTypes +from synapse.storage import prepare_database +from synapse.types import Requester, UserID + +from tests.unittest import HomeserverTestCase + + +class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): + """Test the background update to clean forward extremities table. + """ + + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.event_creator = homeserver.get_event_creation_handler() + self.room_creator = homeserver.get_room_creation_handler() + + # Create a test user and room + self.user = UserID("alice", "test") + self.requester = Requester(self.user, None, False, None, None) + info = self.get_success(self.room_creator.create_room(self.requester, {})) + self.room_id = info["room_id"] + + def create_and_send_event(self, soft_failed=False, prev_event_ids=None): + """Create and send an event. + + Args: + soft_failed (bool): Whether to create a soft failed event or not + prev_event_ids (list[str]|None): Explicitly set the prev events, + or if None just use the default + + Returns: + str: The new event's ID. + """ + prev_events_and_hashes = None + if prev_event_ids: + prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] + + event, context = self.get_success( + self.event_creator.create_event( + self.requester, + { + "type": EventTypes.Message, + "room_id": self.room_id, + "sender": self.user.to_string(), + "content": {"body": "", "msgtype": "m.text"}, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) + ) + + if soft_failed: + event.internal_metadata.soft_failed = True + + self.get_success( + self.event_creator.send_nonmember_event(self.requester, event, context) + ) + + return event.event_id + + def add_extremity(self, event_id): + """Add the given event as an extremity to the room. + """ + self.get_success( + self.store._simple_insert( + table="event_forward_extremities", + values={"room_id": self.room_id, "event_id": event_id}, + desc="test_add_extremity", + ) + ) + + self.store.get_latest_event_ids_in_room.invalidate((self.room_id,)) + + def run_background_update(self): + """Re run the background update to clean up the extremities. + """ + # Make sure we don't clash with in progress updates. + self.assertTrue(self.store._all_done, "Background updates are still ongoing") + + schema_path = os.path.join( + prepare_database.dir_path, + "schema", + "delta", + "54", + "delete_forward_extremities.sql", + ) + + def run_delta_file(txn): + prepare_database.executescript(txn, schema_path) + + self.get_success( + self.store.runInteraction("test_delete_forward_extremities", run_delta_file) + ) + + # Ugh, have to reset this flag + self.store._all_done = False + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + def test_soft_failed_extremities_handled_correctly(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed. + """ + + # Create the room graph + event_id_1 = self.create_and_send_event() + event_id_2 = self.create_and_send_event(True, [event_id_1]) + event_id_3 = self.create_and_send_event(True, [event_id_2]) + event_id_4 = self.create_and_send_event(False, [event_id_3]) + + # Check the latest events are as expected + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + + self.assertEqual(latest_event_ids, [event_id_4]) + + def test_basic_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_b = self.create_and_send_event(False, [event_id_sf1]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_chain_of_fail_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_sf1]) + event_id_b = self.create_and_send_event(False, [event_id_sf2]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_forked_graph_cleanup(self): + r"""Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like, where time flows down the page: + + A B + / \ / + / \ / + SF1 SF2 + | | + SF3 | + / \ | + | \ | + C SF4 + + Where SF* are soft failed, and with them A, B and C marked as + extremities. This should resolve to B and C being marked as extremity. + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_b = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b]) + event_id_sf3 = self.create_and_send_event(True, [event_id_sf1]) + self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4 + event_id_c = self.create_and_send_event(False, [event_id_sf3]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual( + set(latest_event_ids), set((event_id_a, event_id_b, event_id_c)) + ) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set([event_id_b, event_id_c])) From 67e0631f8f8bfc2843d2c06ebf20fe2226810686 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:56:02 +0100 Subject: [PATCH 31/92] Newsfile --- changelog.d/5278.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5278.bugfix diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5278.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 46c8f7a5170d04dfa6ad02c69667d4aa48635231 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 30 May 2019 01:47:16 +1000 Subject: [PATCH 32/92] Implement the SHHS complexity API (#5216) --- changelog.d/5216.misc | 1 + synapse/api/urls.py | 1 + synapse/federation/transport/server.py | 31 ++++++++- synapse/rest/admin/__init__.py | 12 +++- synapse/storage/events_worker.py | 50 +++++++++++++- tests/federation/test_complexity.py | 90 ++++++++++++++++++++++++++ 6 files changed, 180 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5216.misc create mode 100644 tests/federation/test_complexity.py diff --git a/changelog.d/5216.misc b/changelog.d/5216.misc new file mode 100644 index 0000000000..dbfa29475f --- /dev/null +++ b/changelog.d/5216.misc @@ -0,0 +1 @@ +Synapse will now serve the experimental "room complexity" API endpoint. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 3c6bddff7a..e16c386a14 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -26,6 +26,7 @@ CLIENT_API_PREFIX = "/_matrix/client" FEDERATION_PREFIX = "/_matrix/federation" FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1" FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2" +FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable" STATIC_PREFIX = "/_matrix/static" WEB_CLIENT_PREFIX = "/_matrix/client" CONTENT_REPO_PREFIX = "/_matrix/content" diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 385eda2dca..d0efc4e0d3 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -23,7 +23,11 @@ from twisted.internet import defer import synapse from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.room_versions import RoomVersions -from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX +from synapse.api.urls import ( + FEDERATION_UNSTABLE_PREFIX, + FEDERATION_V1_PREFIX, + FEDERATION_V2_PREFIX, +) from synapse.http.endpoint import parse_and_validate_server_name from synapse.http.server import JsonResource from synapse.http.servlet import ( @@ -1304,6 +1308,30 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class RoomComplexityServlet(BaseFederationServlet): + """ + Indicates to other servers how complex (and therefore likely + resource-intensive) a public room this server knows about is. + """ + PATH = "/rooms/(?P[^/]*)/complexity" + PREFIX = FEDERATION_UNSTABLE_PREFIX + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, room_id): + + store = self.handler.hs.get_datastore() + + is_public = yield store.is_room_world_readable_or_publicly_joinable( + room_id + ) + + if not is_public: + raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM) + + complexity = yield store.get_room_complexity(room_id) + defer.returnValue((200, complexity)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationEventServlet, @@ -1327,6 +1355,7 @@ FEDERATION_SERVLET_CLASSES = ( FederationThirdPartyInviteExchangeServlet, On3pidBindServlet, FederationVersionServlet, + RoomComplexityServlet, ) OPENID_SERVLET_CLASSES = ( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 744d85594f..d6c4dcdb18 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -822,10 +822,16 @@ class AdminRestResource(JsonResource): def __init__(self, hs): JsonResource.__init__(self, hs, canonical_json=False) + register_servlets(hs, self) - register_servlets_for_client_rest_resource(hs, self) - SendServerNoticeServlet(hs).register(self) - VersionServlet(hs).register(self) + +def register_servlets(hs, http_server): + """ + Register all the admin servlets. + """ + register_servlets_for_client_rest_resource(hs, http_server) + SendServerNoticeServlet(hs).register(http_server) + VersionServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 21b353cad3..b56c83e460 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import division + import itertools import logging from collections import namedtuple @@ -614,7 +616,7 @@ class EventsWorkerStore(SQLBaseStore): def _get_total_state_event_counts_txn(self, txn, room_id): """ - See get_state_event_counts. + See get_total_state_event_counts. """ sql = "SELECT COUNT(*) FROM state_events WHERE room_id=?" txn.execute(sql, (room_id,)) @@ -635,3 +637,49 @@ class EventsWorkerStore(SQLBaseStore): "get_total_state_event_counts", self._get_total_state_event_counts_txn, room_id ) + + def _get_current_state_event_counts_txn(self, txn, room_id): + """ + See get_current_state_event_counts. + """ + sql = "SELECT COUNT(*) FROM current_state_events WHERE room_id=?" + txn.execute(sql, (room_id,)) + row = txn.fetchone() + return row[0] if row else 0 + + def get_current_state_event_counts(self, room_id): + """ + Gets the current number of state events in a room. + + Args: + room_id (str) + + Returns: + Deferred[int] + """ + return self.runInteraction( + "get_current_state_event_counts", + self._get_current_state_event_counts_txn, room_id + ) + + @defer.inlineCallbacks + def get_room_complexity(self, room_id): + """ + Get a rough approximation of the complexity of the room. This is used by + remote servers to decide whether they wish to join the room or not. + Higher complexity value indicates that being in the room will consume + more resources. + + Args: + room_id (str) + + Returns: + Deferred[dict[str:int]] of complexity version to complexity. + """ + state_events = yield self.get_current_state_event_counts(room_id) + + # Call this one "v1", so we can introduce new ones as we want to develop + # it. + complexity_v1 = round(state_events / 500, 2) + + defer.returnValue({"v1": complexity_v1}) diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py new file mode 100644 index 0000000000..1e3e5aec66 --- /dev/null +++ b/tests/federation/test_complexity.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 Matrix.org Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.config.ratelimiting import FederationRateLimitConfig +from synapse.federation.transport import server +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.util.ratelimitutils import FederationRateLimiter + +from tests import unittest + + +class RoomComplexityTests(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def default_config(self, name='test'): + config = super(RoomComplexityTests, self).default_config(name=name) + config["limit_large_remote_room_joins"] = True + config["limit_large_remote_room_complexity"] = 0.05 + return config + + def prepare(self, reactor, clock, homeserver): + class Authenticator(object): + def authenticate_request(self, request, content): + return defer.succeed("otherserver.nottld") + + ratelimiter = FederationRateLimiter( + clock, + FederationRateLimitConfig( + window_size=1, + sleep_limit=1, + sleep_msec=1, + reject_limit=1000, + concurrent_requests=1000, + ), + ) + server.register_servlets( + homeserver, self.resource, Authenticator(), ratelimiter + ) + + def test_complexity_simple(self): + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + self.helper.send_state( + room_1, event_type="m.room.topic", body={"topic": "foo"}, tok=u1_token + ) + + # Get the room complexity + request, channel = self.make_request( + "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) + ) + self.render(request) + self.assertEquals(200, channel.code) + complexity = channel.json_body["v1"] + self.assertTrue(complexity > 0, complexity) + + # Artificially raise the complexity + store = self.hs.get_datastore() + store.get_current_state_event_counts = lambda x: defer.succeed(500 * 1.23) + + # Get the room complexity again -- make sure it's our artificial value + request, channel = self.make_request( + "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) + ) + self.render(request) + self.assertEquals(200, channel.code) + complexity = channel.json_body["v1"] + self.assertEqual(complexity, 1.23) From 3dcf2feba8ee38c43c63c0f321fd379f843a5929 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 19:27:50 +0100 Subject: [PATCH 33/92] Improve logging for logcontext leaks. (#5288) --- changelog.d/5288.misc | 1 + synapse/util/logcontext.py | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5288.misc diff --git a/changelog.d/5288.misc b/changelog.d/5288.misc new file mode 100644 index 0000000000..fbf049ba6a --- /dev/null +++ b/changelog.d/5288.misc @@ -0,0 +1 @@ +Improve logging for logcontext leaks. diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 311b49e18a..fe412355d8 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -226,6 +226,8 @@ class LoggingContext(object): self.request = request def __str__(self): + if self.request: + return str(self.request) return "%s@%x" % (self.name, id(self)) @classmethod @@ -274,12 +276,10 @@ class LoggingContext(object): current = self.set_current_context(self.previous_context) if current is not self: if current is self.sentinel: - logger.warn("Expected logging context %s has been lost", self) + logger.warning("Expected logging context %s was lost", self) else: - logger.warn( - "Current logging context %s is not expected context %s", - current, - self + logger.warning( + "Expected logging context %s but found %s", self, current ) self.previous_context = None self.alive = False @@ -433,10 +433,14 @@ class PreserveLoggingContext(object): context = LoggingContext.set_current_context(self.current_context) if context != self.new_context: - logger.warn( - "Unexpected logging context: %s is not %s", - context, self.new_context, - ) + if context is LoggingContext.sentinel: + logger.warning("Expected logging context %s was lost", self.new_context) + else: + logger.warning( + "Expected logging context %s but found %s", + self.new_context, + context, + ) if self.current_context is not LoggingContext.sentinel: if not self.current_context.alive: From 8d92329214f92b0e4e4f2d6fa21e1197a691ba5b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 19:31:52 +0100 Subject: [PATCH 34/92] Remove spurious debug from MatrixFederationHttpClient.get_json (#5287) This is just unhelpful spam --- changelog.d/5287.misc | 1 + synapse/http/matrixfederationclient.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/5287.misc diff --git a/changelog.d/5287.misc b/changelog.d/5287.misc new file mode 100644 index 0000000000..1286f1dd08 --- /dev/null +++ b/changelog.d/5287.misc @@ -0,0 +1 @@ +Remove spurious debug from MatrixFederationHttpClient.get_json. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 7eefc7b1fc..8197619a78 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -711,10 +711,6 @@ class MatrixFederationHttpClient(object): RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ - logger.debug("get_json args: %s", args) - - logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - request = MatrixFederationRequest( method="GET", destination=destination, From 123918b73938bdba89e6e0ce66482444590f2b4e Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 29 May 2019 14:44:25 -0500 Subject: [PATCH 35/92] Lint Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 2cf373e83c..ba20e75033 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -17,8 +17,6 @@ import logging from twisted.internet import defer -from synapse.api.errors import AuthError - from .base import ClientV1RestServlet, client_path_patterns logger = logging.getLogger(__name__) From 640fcbb07f8dc7d89465734f009d8e0a458c2b17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 10:55:55 +0100 Subject: [PATCH 36/92] Fixup comments and logging --- synapse/storage/events.py | 21 +++++++++++-------- .../delta/54/delete_forward_extremities.sql | 3 +++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9664928ca..418d88b8dc 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2387,7 +2387,8 @@ class EventsStore( soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their prev events, if any, and their prev events rejection status. + # their successor events, if any, and their successor events + # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, rejections.event_id IS NOT NULL, events.outlier @@ -2450,11 +2451,10 @@ class EventsStore( if event_id in graph: # Already handled this event previously, but we still # want to record the edge. - graph.setdefault(event_id, set()).add(prev_event_id) - logger.info("Already handled") + graph[event_id].add(prev_event_id) continue - graph.setdefault(event_id, set()).add(prev_event_id) + graph[event_id] = {prev_event_id} soft_failed = json.loads(metadata).get("soft_failed") if soft_failed or rejected: @@ -2474,8 +2474,6 @@ class EventsStore( to_delete.intersection_update(original_set) - logger.info("Deleting up to %d forward extremities", len(to_delete)) - deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", @@ -2484,7 +2482,11 @@ class EventsStore( keyvalues={}, ) - logger.info("Deleted %d forward extremities", deleted) + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) if deleted: # We now need to invalidate the caches of these rooms @@ -2496,10 +2498,11 @@ class EventsStore( keyvalues={}, retcols=("room_id",) ) - for row in rows: + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: txn.call_after( self.get_latest_event_ids_in_room.invalidate, - (row["room_id"],) + (room_id,) ) self._simple_delete_many_txn( diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index 7056bd1d00..aa40f13da7 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -13,7 +13,10 @@ * limitations under the License. */ +-- Start a background job to cleanup extremities that were incorrectly added +-- by bug #5269. INSERT INTO background_updates (update_name, progress_json) VALUES ('delete_soft_failed_extremities', '{}'); +DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 5c1ece0ffcd803eb4bf8e5748d3e2633426e00a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:22:59 +0100 Subject: [PATCH 37/92] Move event background updates to a separate file --- synapse/storage/__init__.py | 2 + synapse/storage/events.py | 371 +------------------------ synapse/storage/events_bg_updates.py | 401 +++++++++++++++++++++++++++ 3 files changed, 405 insertions(+), 369 deletions(-) create mode 100644 synapse/storage/events_bg_updates.py diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 66675d08ae..71316f7d09 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -36,6 +36,7 @@ from .engines import PostgresEngine from .event_federation import EventFederationStore from .event_push_actions import EventPushActionsStore from .events import EventsStore +from .events_bg_updates import EventsBackgroundUpdatesStore from .filtering import FilteringStore from .group_server import GroupServerStore from .keys import KeyStore @@ -66,6 +67,7 @@ logger = logging.getLogger(__name__) class DataStore( + EventsBackgroundUpdatesStore, RoomMemberStore, RoomStore, RegistrationStore, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 418d88b8dc..f9162be9b9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -219,47 +220,11 @@ class EventsStore( EventsWorkerStore, BackgroundUpdateStore, ): - EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" - EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) - self.register_background_update_handler( - self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts - ) - self.register_background_update_handler( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, - self._background_reindex_fields_sender, - ) - - self.register_background_index_update( - "event_contains_url_index", - index_name="event_contains_url_index", - table="events", - columns=["room_id", "topological_ordering", "stream_ordering"], - where_clause="contains_url = true AND outlier = false", - ) - - # an event_id index on event_search is useful for the purge_history - # api. Plus it means we get to enforce some integrity with a UNIQUE - # clause - self.register_background_index_update( - "event_search_event_id_idx", - index_name="event_search_event_id_idx", - table="event_search", - columns=["event_id"], - unique=True, - psql_only=True, - ) - - self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, - self._cleanup_extremities_bg_update, - ) self._event_persist_queue = _EventPeristenceQueue() - self._state_resolution_handler = hs.get_state_resolution_handler() @defer.inlineCallbacks @@ -1585,153 +1550,6 @@ class EventsStore( ret = yield self.runInteraction("count_daily_active_rooms", _count) defer.returnValue(ret) - @defer.inlineCallbacks - def _background_reindex_fields_sender(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_txn(txn): - sql = ( - "SELECT stream_ordering, event_id, json FROM events" - " INNER JOIN event_json USING (event_id)" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - - update_rows = [] - for row in rows: - try: - event_id = row[1] - event_json = json.loads(row[2]) - sender = event_json["sender"] - content = event_json["content"] - - contains_url = "url" in content - if contains_url: - contains_url &= isinstance(content["url"], text_type) - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - update_rows.append((sender, contains_url, event_id)) - - sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" - - for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): - clump = update_rows[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows), - } - - self._background_update_progress_txn( - txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress - ) - - return len(rows) - - result = yield self.runInteraction( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - - defer.returnValue(result) - - @defer.inlineCallbacks - def _background_reindex_origin_server_ts(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_search_txn(txn): - sql = ( - "SELECT stream_ordering, event_id FROM events" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - event_ids = [row[1] for row in rows] - - rows_to_update = [] - - chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] - for chunk in chunks: - ev_rows = self._simple_select_many_txn( - txn, - table="event_json", - column="event_id", - iterable=chunk, - retcols=["event_id", "json"], - keyvalues={}, - ) - - for row in ev_rows: - event_id = row["event_id"] - event_json = json.loads(row["json"]) - try: - origin_server_ts = event_json["origin_server_ts"] - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - rows_to_update.append((origin_server_ts, event_id)) - - sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" - - for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): - clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows_to_update), - } - - self._background_update_progress_txn( - txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress - ) - - return len(rows_to_update) - - result = yield self.runInteraction( - self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - - defer.returnValue(result) - def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" return -self._backfill_id_gen.get_current_token() @@ -2347,191 +2165,6 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) - @defer.inlineCallbacks - def _cleanup_extremities_bg_update(self, progress, batch_size): - """Background update to clean out extremities that should have been - deleted previously. - - Mainly used to deal with the aftermath of #5269. - """ - - # This works by first copying all existing forward extremities into the - # `_extremities_to_check` table at start up, and then checking each - # event in that table whether we have any descendants that are not - # soft-failed/rejected. If that is the case then we delete that event - # from the forward extremities table. - # - # For efficiency, we do this in batches by recursively pulling out all - # descendants of a batch until we find the non soft-failed/rejected - # events, i.e. the set of descendants whose chain of prev events back - # to the batch of extremities are all soft-failed or rejected. - # Typically, we won't find any such events as extremities will rarely - # have any descendants, but if they do then we should delete those - # extremities. - - def _cleanup_extremities_bg_update_txn(txn): - # The set of extremity event IDs that we're checking this round - original_set = set() - - # A dict[str, set[str]] of event ID to their prev events. - graph = {} - - # The set of descendants of the original set that are not rejected - # nor soft-failed. Ancestors of these events should be removed - # from the forward extremities table. - non_rejected_leaves = set() - - # Set of event IDs that have been soft failed, and for which we - # should check if they have descendants which haven't been soft - # failed. - soft_failed_events_to_lookup = set() - - # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events - # rejection status. - txn.execute( - """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL, events.outlier - FROM ( - SELECT event_id AS prev_event_id - FROM _extremities_to_check - LIMIT ? - ) AS f - LEFT JOIN event_edges USING (prev_event_id) - LEFT JOIN events USING (event_id) - LEFT JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - """, (batch_size,) - ) - - for prev_event_id, event_id, metadata, rejected, outlier in txn: - original_set.add(prev_event_id) - - if not event_id or outlier: - # Common case where the forward extremity doesn't have any - # descendants. - continue - - graph.setdefault(event_id, set()).add(prev_event_id) - - soft_failed = False - if metadata: - soft_failed = json.loads(metadata).get("soft_failed") - - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # Now we recursively check all the soft-failed descendants we - # found above in the same way, until we have nothing left to - # check. - while soft_failed_events_to_lookup: - # We only want to do 100 at a time, so we split given list - # into two. - batch = list(soft_failed_events_to_lookup) - to_check, to_defer = batch[:100], batch[100:] - soft_failed_events_to_lookup = set(to_defer) - - sql = """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL - FROM event_edges - INNER JOIN events USING (event_id) - INNER JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - WHERE - prev_event_id IN (%s) - AND NOT events.outlier - """ % ( - ",".join("?" for _ in to_check), - ) - txn.execute(sql, to_check) - - for prev_event_id, event_id, metadata, rejected in txn: - if event_id in graph: - # Already handled this event previously, but we still - # want to record the edge. - graph[event_id].add(prev_event_id) - continue - - graph[event_id] = {prev_event_id} - - soft_failed = json.loads(metadata).get("soft_failed") - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # We have a set of non-soft-failed descendants, so we recurse up - # the graph to find all ancestors and add them to the set of event - # IDs that we can delete from forward extremities table. - to_delete = set() - while non_rejected_leaves: - event_id = non_rejected_leaves.pop() - prev_event_ids = graph.get(event_id, set()) - non_rejected_leaves.update(prev_event_ids) - to_delete.update(prev_event_ids) - - to_delete.intersection_update(original_set) - - deleted = self._simple_delete_many_txn( - txn=txn, - table="event_forward_extremities", - column="event_id", - iterable=to_delete, - keyvalues={}, - ) - - logger.info( - "Deleted %d forward extremities of %d checked, to clean up #5269", - deleted, - len(original_set), - ) - - if deleted: - # We now need to invalidate the caches of these rooms - rows = self._simple_select_many_txn( - txn, - table="events", - column="event_id", - iterable=to_delete, - keyvalues={}, - retcols=("room_id",) - ) - room_ids = set(row["room_id"] for row in rows) - for room_id in room_ids: - txn.call_after( - self.get_latest_event_ids_in_room.invalidate, - (room_id,) - ) - - self._simple_delete_many_txn( - txn=txn, - table="_extremities_to_check", - column="event_id", - iterable=original_set, - keyvalues={}, - ) - - return len(original_set) - - num_handled = yield self.runInteraction( - "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, - ) - - if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) - - def _drop_table_txn(txn): - txn.execute("DROP TABLE _extremities_to_check") - - yield self.runInteraction( - "_cleanup_extremities_bg_update_drop_table", - _drop_table_txn, - ) - - defer.returnValue(num_handled) - AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py new file mode 100644 index 0000000000..2eba106abf --- /dev/null +++ b/synapse/storage/events_bg_updates.py @@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from six import text_type + +from canonicaljson import json + +from twisted.internet import defer + +from synapse.storage.background_updates import BackgroundUpdateStore + +logger = logging.getLogger(__name__) + + +class EventsBackgroundUpdatesStore(BackgroundUpdateStore): + + EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + + def __init__(self, db_conn, hs): + super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) + + self.register_background_update_handler( + self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts + ) + self.register_background_update_handler( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, + self._background_reindex_fields_sender, + ) + + self.register_background_index_update( + "event_contains_url_index", + index_name="event_contains_url_index", + table="events", + columns=["room_id", "topological_ordering", "stream_ordering"], + where_clause="contains_url = true AND outlier = false", + ) + + # an event_id index on event_search is useful for the purge_history + # api. Plus it means we get to enforce some integrity with a UNIQUE + # clause + self.register_background_index_update( + "event_search_event_id_idx", + index_name="event_search_event_id_idx", + table="event_search", + columns=["event_id"], + unique=True, + psql_only=True, + ) + + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + + @defer.inlineCallbacks + def _background_reindex_fields_sender(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_txn(txn): + sql = ( + "SELECT stream_ordering, event_id, json FROM events" + " INNER JOIN event_json USING (event_id)" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + + update_rows = [] + for row in rows: + try: + event_id = row[1] + event_json = json.loads(row[2]) + sender = event_json["sender"] + content = event_json["content"] + + contains_url = "url" in content + if contains_url: + contains_url &= isinstance(content["url"], text_type) + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + update_rows.append((sender, contains_url, event_id)) + + sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" + + for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): + clump = update_rows[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows), + } + + self._background_update_progress_txn( + txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress + ) + + return len(rows) + + result = yield self.runInteraction( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _background_reindex_origin_server_ts(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_search_txn(txn): + sql = ( + "SELECT stream_ordering, event_id FROM events" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + event_ids = [row[1] for row in rows] + + rows_to_update = [] + + chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] + for chunk in chunks: + ev_rows = self._simple_select_many_txn( + txn, + table="event_json", + column="event_id", + iterable=chunk, + retcols=["event_id", "json"], + keyvalues={}, + ) + + for row in ev_rows: + event_id = row["event_id"] + event_json = json.loads(row["json"]) + try: + origin_server_ts = event_json["origin_server_ts"] + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + rows_to_update.append((origin_server_ts, event_id)) + + sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" + + for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): + clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows_to_update), + } + + self._background_update_progress_txn( + txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress + ) + + return len(rows_to_update) + + result = yield self.runInteraction( + self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their successor events, if any, and their successor events + # rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph[event_id].add(prev_event_id) + continue + + graph[event_id] = {prev_event_id} + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + deleted = self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) + + if deleted: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (room_id,) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) From 468bd090ff354f27597e08b54a969f22afbfad43 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:24:42 +0100 Subject: [PATCH 38/92] Rename constant --- synapse/storage/events_bg_updates.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 2eba106abf..22aac1393d 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -30,7 +30,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) @@ -64,7 +64,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update, ) @@ -388,7 +388,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES) def _drop_table_txn(txn): txn.execute("DROP TABLE _extremities_to_check") From cb967e2346096d7e647c757e3b57093549746f14 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:06:42 +0100 Subject: [PATCH 39/92] Update synapse/storage/events_bg_updates.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 22aac1393d..75c1935bf3 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -255,7 +255,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events + # their successor events, if any, and the successor events' # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, From 6cdfb0207e2de72286a7a8d3b3c417c2808e90dc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:54:56 +0100 Subject: [PATCH 40/92] Add index to temp table --- synapse/storage/schema/delta/54/delete_forward_extremities.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index aa40f13da7..b062ec840c 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -20,3 +20,4 @@ INSERT INTO background_updates (update_name, progress_json) VALUES DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; +CREATE INDEX _extremities_to_check_id ON _extremities_to_check(event_id); From 06675db684f06b5a369846aac896216bf4cc74ed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:05:26 +0100 Subject: [PATCH 41/92] Newsfile --- changelog.d/5291.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5291.bugfix diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5291.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 54d50fbfdf8c39d92c36291a572419cee6b9b916 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:15:13 +0100 Subject: [PATCH 42/92] Get events all at once --- synapse/storage/stats.py | 57 ++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index eb0ced5b5e..99b4af5555 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -179,46 +179,39 @@ class StatsStore(StateDeltasStore): current_state_ids = yield self.get_current_state_ids(room_id) - join_rules = yield self.get_event( - current_state_ids.get((EventTypes.JoinRules, "")), allow_none=True - ) - history_visibility = yield self.get_event( - current_state_ids.get((EventTypes.RoomHistoryVisibility, "")), - allow_none=True, - ) - encryption = yield self.get_event( - current_state_ids.get((EventTypes.RoomEncryption, "")), allow_none=True - ) - name = yield self.get_event( - current_state_ids.get((EventTypes.Name, "")), allow_none=True - ) - topic = yield self.get_event( - current_state_ids.get((EventTypes.Topic, "")), allow_none=True - ) - avatar = yield self.get_event( - current_state_ids.get((EventTypes.RoomAvatar, "")), allow_none=True - ) - canonical_alias = yield self.get_event( - current_state_ids.get((EventTypes.CanonicalAlias, "")), allow_none=True + join_rules_id = current_state_ids.get((EventTypes.JoinRules, "")) + history_visibility_id = current_state_ids.get( + (EventTypes.RoomHistoryVisibility, "") ) + encryption_id = current_state_ids.get((EventTypes.RoomEncryption, "")) + name_id = current_state_ids.get((EventTypes.Name, "")) + topic_id = current_state_ids.get((EventTypes.Topic, "")) + avatar_id = current_state_ids.get((EventTypes.RoomAvatar, "")) + canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, "")) - def _or_none(x, arg): - if x: - return x.content.get(arg) + state_events = yield self.get_events([ + join_rules_id, history_visibility_id, encryption_id, name_id, + topic_id, avatar_id, canonical_alias_id, + ]) + + def _get_or_none(event_id, arg): + event = state_events.get(event_id) + if event: + return event.content.get(arg) return None yield self.update_room_state( room_id, { - "join_rules": _or_none(join_rules, "join_rule"), - "history_visibility": _or_none( - history_visibility, "history_visibility" + "join_rules": _get_or_none(join_rules_id, "join_rule"), + "history_visibility": _get_or_none( + history_visibility_id, "history_visibility" ), - "encryption": _or_none(encryption, "algorithm"), - "name": _or_none(name, "name"), - "topic": _or_none(topic, "topic"), - "avatar": _or_none(avatar, "url"), - "canonical_alias": _or_none(canonical_alias, "alias"), + "encryption": _get_or_none(encryption_id, "algorithm"), + "name": _get_or_none(name_id, "name"), + "topic": _get_or_none(topic_id, "topic"), + "avatar": _get_or_none(avatar_id, "url"), + "canonical_alias": _get_or_none(canonical_alias_id, "alias"), }, ) From 04710cc2d71127b1f416e87f7a4aea3ce6d93410 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:22:32 +0100 Subject: [PATCH 43/92] Fetch membership counts all at once --- synapse/storage/roommember.py | 33 +++++++++++---------------------- synapse/storage/stats.py | 23 +++++++---------------- 2 files changed, 18 insertions(+), 38 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 4bd1669458..7617913326 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -142,26 +142,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): return self.runInteraction("get_room_summary", _get_room_summary_txn) - def _get_user_count_in_room_txn(self, txn, room_id, membership): + def _get_user_counts_in_room_txn(self, txn, room_id): """ - See get_user_count_in_room. - """ - sql = ( - "SELECT count(*) FROM room_memberships as m" - " INNER JOIN current_state_events as c" - " ON m.event_id = c.event_id " - " AND m.room_id = c.room_id " - " AND m.user_id = c.state_key" - " WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?" - ) - - txn.execute(sql, (room_id, membership)) - row = txn.fetchone() - return row[0] - - def get_user_count_in_room(self, room_id, membership): - """ - Get the user count in a room with a particular membership. + Get the user count in a room by membership. Args: room_id (str) @@ -170,9 +153,15 @@ class RoomMemberWorkerStore(EventsWorkerStore): Returns: Deferred[int] """ - return self.runInteraction( - "get_users_in_room", self._get_user_count_in_room_txn, room_id, membership - ) + sql = """ + SELECT m.membership, count(*) FROM room_memberships as m + INNER JOIN current_state_events as c USING(event_id) + WHERE c.type = 'm.room.member' AND c.room_id = ? + GROUP BY m.membership + """ + + txn.execute(sql, (room_id,)) + return {row[0]: row[1] for row in txn} @cached() def get_invited_rooms_for_user(self, user_id): diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 99b4af5555..727f60b3bd 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -226,18 +226,9 @@ class StatsStore(StateDeltasStore): current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) current_state_events = len(current_state_ids) - joined_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.JOIN - ) - invited_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.INVITE - ) - left_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.LEAVE - ) - banned_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.BAN - ) + + membership_counts = self._get_user_counts_in_room_txn(txn, room_id) + total_state_events = self._get_total_state_event_counts_txn( txn, room_id ) @@ -250,10 +241,10 @@ class StatsStore(StateDeltasStore): { "bucket_size": self.stats_bucket_size, "current_state_events": current_state_events, - "joined_members": joined_members, - "invited_members": invited_members, - "left_members": left_members, - "banned_members": banned_members, + "joined_members": membership_counts.get(Membership.JOIN, 0), + "invited_members": membership_counts.get(Membership.INVITE, 0), + "left_members": membership_counts.get(Membership.LEAVE, 0), + "banned_members": membership_counts.get(Membership.BAN, 0), "state_events": total_state_events, }, ) From e2c46ed851599dc08cc8a822e07c0d4f9a050ee2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:26:38 +0100 Subject: [PATCH 44/92] Move deletion from table inside txn --- synapse/storage/stats.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 727f60b3bd..a99637d4b4 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -254,10 +254,13 @@ class StatsStore(StateDeltasStore): {"room_id": room_id, "token": current_token}, ) + # We've finished a room. Delete it from the table. + self._simple_delete_one_txn( + txn, TEMP_TABLE + "_rooms", {"room_id": room_id}, + ) + yield self.runInteraction("update_room_stats", _fetch_data) - # We've finished a room. Delete it from the table. - yield self._simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id}) # Update the remaining counter. progress["remaining"] -= 1 yield self.runInteraction( From 5ac75fc9a2d80ddf2974d281c381f82515606403 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:26:55 +0100 Subject: [PATCH 45/92] Join against events to use its room_id index --- synapse/storage/events_worker.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index b56c83e460..1782428048 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -618,7 +618,12 @@ class EventsWorkerStore(SQLBaseStore): """ See get_total_state_event_counts. """ - sql = "SELECT COUNT(*) FROM state_events WHERE room_id=?" + # We join against the events table as that has an index on room_id + sql = """ + SELECT COUNT(*) FROM state_events + INNER JOIN events USING (room_id, event_id) + WHERE room_id=? + """ txn.execute(sql, (room_id,)) row = txn.fetchone() return row[0] if row else 0 From 8824325b829baa5262242a50d0ea2c9b738feb78 Mon Sep 17 00:00:00 2001 From: Eisha Chen-yen-su Date: Thu, 30 May 2019 16:58:53 +0200 Subject: [PATCH 46/92] Fix ignored filter field in `/messages` endpoint This fixes a bug which were causing the "event_format" field to be ignored in the filter of requests to the `/messages` endpoint of the CS API. Signed-off-by: Eisha Chen-yen-su --- synapse/rest/client/v1/room.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 255a85c588..b92c6a9a9c 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -475,6 +475,8 @@ class RoomMessageListRestServlet(ClientV1RestServlet): if filter_bytes: filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) event_filter = Filter(json.loads(filter_json)) + if event_filter.filter_json.get("event_format", "client") == "federation": + as_client_event = False else: event_filter = None msgs = yield self.pagination_handler.get_messages( From 0b6bc36402b747a6c1bad119aaffdcd326990346 Mon Sep 17 00:00:00 2001 From: Eisha Chen-yen-su Date: Thu, 30 May 2019 17:07:21 +0200 Subject: [PATCH 47/92] Add changelog --- changelog.d/5293.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5293.bugfix diff --git a/changelog.d/5293.bugfix b/changelog.d/5293.bugfix new file mode 100644 index 0000000000..aa519a8433 --- /dev/null +++ b/changelog.d/5293.bugfix @@ -0,0 +1 @@ +Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. From df9d9005448d837c5e1a2b75edb5730e2062b0f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:56:24 +0100 Subject: [PATCH 48/92] Correctly filter out extremities with soft failed prevs (#5274) When we receive a soft failed event we, correctly, *do not* update the forward extremity table with the event. However, if we later receive an event that references the soft failed event we then need to remove the soft failed events prev events from the forward extremities table, otherwise we just build up forward extremities. Fixes #5269 --- changelog.d/5274.bugfix | 1 + synapse/storage/events.py | 82 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5274.bugfix diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5274.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2ffc27ff41..6e9f3d1dc0 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -554,10 +554,18 @@ class EventsStore( e_id for event in new_events for e_id in event.prev_event_ids() ) - # Finally, remove any events which are prev_events of any existing events. + # Remove any events which are prev_events of any existing events. existing_prevs = yield self._get_events_which_are_prevs(result) result.difference_update(existing_prevs) + # Finally handle the case where the new events have soft-failed prev + # events. If they do we need to remove them and their prev events, + # otherwise we end up with dangling extremities. + existing_prevs = yield self._get_prevs_before_rejected( + e_id for event in new_events for e_id in event.prev_event_ids() + ) + result.difference_update(existing_prevs) + defer.returnValue(result) @defer.inlineCallbacks @@ -573,7 +581,7 @@ class EventsStore( """ results = [] - def _get_events(txn, batch): + def _get_events_which_are_prevs_txn(txn, batch): sql = """ SELECT prev_event_id, internal_metadata FROM event_edges @@ -596,10 +604,78 @@ class EventsStore( ) for chunk in batch_iter(event_ids, 100): - yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events_which_are_prevs_txn, + chunk, + ) defer.returnValue(results) + @defer.inlineCallbacks + def _get_prevs_before_rejected(self, event_ids): + """Get soft-failed ancestors to remove from the extremities. + + Given a set of events, find all those that have been soft-failed or + rejected. Returns those soft failed/rejected events and their prev + events (whether soft-failed/rejected or not), and recurses up the + prev-event graph until it finds no more soft-failed/rejected events. + + This is used to find extremities that are ancestors of new events, but + are separated by soft failed events. + + Args: + event_ids (Iterable[str]): Events to find prev events for. Note + that these must have already been persisted. + + Returns: + Deferred[set[str]] + """ + + # The set of event_ids to return. This includes all soft-failed events + # and their prev events. + existing_prevs = set() + + def _get_prevs_before_rejected_txn(txn, batch): + to_recursively_check = batch + + while to_recursively_check: + sql = """ + SELECT + event_id, prev_event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + LEFT JOIN event_json USING (event_id) + WHERE + event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_recursively_check), + ) + + txn.execute(sql, to_recursively_check) + to_recursively_check = [] + + for event_id, prev_event_id, metadata, rejected in txn: + if prev_event_id in existing_prevs: + continue + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + to_recursively_check.append(prev_event_id) + existing_prevs.add(prev_event_id) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_prevs_before_rejected", + _get_prevs_before_rejected_txn, + chunk, + ) + + defer.returnValue(existing_prevs) + @defer.inlineCallbacks def _get_new_state_after_events( self, room_id, events_context, old_latest_event_ids, new_latest_event_ids From 6ebc08c09d4ced251750cb087aa4689f90cdd4b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:52:41 +0100 Subject: [PATCH 49/92] Add DB bg update to cleanup extremities. Due to #5269 we may have extremities in our DB that we shouldn't have, so lets add a cleanup task such to remove those. --- synapse/storage/events.py | 186 ++++++++++++++++++ .../delta/54/delete_forward_extremities.sql | 19 ++ 2 files changed, 205 insertions(+) create mode 100644 synapse/storage/schema/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6e9f3d1dc0..a9be143bd5 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -221,6 +221,7 @@ class EventsStore( ): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) @@ -252,6 +253,11 @@ class EventsStore( psql_only=True, ) + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() @@ -2341,6 +2347,186 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their prev events, if any, and their prev events rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph.setdefault(event_id, set()).add(prev_event_id) + logger.info("Already handled") + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + logger.info("Deleting up to %d forward extremities", len(to_delete)) + + self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + if to_delete: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + for row in rows: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (row["room_id"],) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) + AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql new file mode 100644 index 0000000000..7056bd1d00 --- /dev/null +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('delete_soft_failed_extremities', '{}'); + +CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 1d818fde14595299de9e13008c67239ca677014f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:58:32 +0100 Subject: [PATCH 50/92] Log actual number of entries deleted --- synapse/storage/_base.py | 12 +++++++++--- synapse/storage/events.py | 6 ++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839ceca..3fe827cd43 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1261,7 +1261,8 @@ class SQLBaseStore(object): " AND ".join("%s = ?" % (k,) for k in keyvalues), ) - return txn.execute(sql, list(keyvalues.values())) + txn.execute(sql, list(keyvalues.values())) + return txn.rowcount def _simple_delete_many(self, table, column, iterable, keyvalues, desc): return self.runInteraction( @@ -1280,9 +1281,12 @@ class SQLBaseStore(object): column : column name to test for inclusion against `iterable` iterable : list keyvalues : dict of column names and values to select the rows with + + Returns: + int: Number rows deleted """ if not iterable: - return + return 0 sql = "DELETE FROM %s" % table @@ -1297,7 +1301,9 @@ class SQLBaseStore(object): if clauses: sql = "%s WHERE %s" % (sql, " AND ".join(clauses)) - return txn.execute(sql, values) + txn.execute(sql, values) + + return txn.rowcount def _get_cache_dict( self, db_conn, table, entity_column, stream_column, max_value, limit=100000 diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9be143bd5..a9664928ca 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2476,7 +2476,7 @@ class EventsStore( logger.info("Deleting up to %d forward extremities", len(to_delete)) - self._simple_delete_many_txn( + deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", column="event_id", @@ -2484,7 +2484,9 @@ class EventsStore( keyvalues={}, ) - if to_delete: + logger.info("Deleted %d forward extremities", deleted) + + if deleted: # We now need to invalidate the caches of these rooms rows = self._simple_select_many_txn( txn, From 6574d4ad0a95e1d5a709e5d1d61555561dc180f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 14:19:11 +0100 Subject: [PATCH 51/92] Add test --- tests/storage/test_cleanup_extrems.py | 248 ++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 tests/storage/test_cleanup_extrems.py diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py new file mode 100644 index 0000000000..6dda66ecd3 --- /dev/null +++ b/tests/storage/test_cleanup_extrems.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path + +from synapse.api.constants import EventTypes +from synapse.storage import prepare_database +from synapse.types import Requester, UserID + +from tests.unittest import HomeserverTestCase + + +class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): + """Test the background update to clean forward extremities table. + """ + + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.event_creator = homeserver.get_event_creation_handler() + self.room_creator = homeserver.get_room_creation_handler() + + # Create a test user and room + self.user = UserID("alice", "test") + self.requester = Requester(self.user, None, False, None, None) + info = self.get_success(self.room_creator.create_room(self.requester, {})) + self.room_id = info["room_id"] + + def create_and_send_event(self, soft_failed=False, prev_event_ids=None): + """Create and send an event. + + Args: + soft_failed (bool): Whether to create a soft failed event or not + prev_event_ids (list[str]|None): Explicitly set the prev events, + or if None just use the default + + Returns: + str: The new event's ID. + """ + prev_events_and_hashes = None + if prev_event_ids: + prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] + + event, context = self.get_success( + self.event_creator.create_event( + self.requester, + { + "type": EventTypes.Message, + "room_id": self.room_id, + "sender": self.user.to_string(), + "content": {"body": "", "msgtype": "m.text"}, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) + ) + + if soft_failed: + event.internal_metadata.soft_failed = True + + self.get_success( + self.event_creator.send_nonmember_event(self.requester, event, context) + ) + + return event.event_id + + def add_extremity(self, event_id): + """Add the given event as an extremity to the room. + """ + self.get_success( + self.store._simple_insert( + table="event_forward_extremities", + values={"room_id": self.room_id, "event_id": event_id}, + desc="test_add_extremity", + ) + ) + + self.store.get_latest_event_ids_in_room.invalidate((self.room_id,)) + + def run_background_update(self): + """Re run the background update to clean up the extremities. + """ + # Make sure we don't clash with in progress updates. + self.assertTrue(self.store._all_done, "Background updates are still ongoing") + + schema_path = os.path.join( + prepare_database.dir_path, + "schema", + "delta", + "54", + "delete_forward_extremities.sql", + ) + + def run_delta_file(txn): + prepare_database.executescript(txn, schema_path) + + self.get_success( + self.store.runInteraction("test_delete_forward_extremities", run_delta_file) + ) + + # Ugh, have to reset this flag + self.store._all_done = False + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + def test_soft_failed_extremities_handled_correctly(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed. + """ + + # Create the room graph + event_id_1 = self.create_and_send_event() + event_id_2 = self.create_and_send_event(True, [event_id_1]) + event_id_3 = self.create_and_send_event(True, [event_id_2]) + event_id_4 = self.create_and_send_event(False, [event_id_3]) + + # Check the latest events are as expected + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + + self.assertEqual(latest_event_ids, [event_id_4]) + + def test_basic_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_b = self.create_and_send_event(False, [event_id_sf1]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_chain_of_fail_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_sf1]) + event_id_b = self.create_and_send_event(False, [event_id_sf2]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_forked_graph_cleanup(self): + r"""Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like, where time flows down the page: + + A B + / \ / + / \ / + SF1 SF2 + | | + SF3 | + / \ | + | \ | + C SF4 + + Where SF* are soft failed, and with them A, B and C marked as + extremities. This should resolve to B and C being marked as extremity. + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_b = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b]) + event_id_sf3 = self.create_and_send_event(True, [event_id_sf1]) + self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4 + event_id_c = self.create_and_send_event(False, [event_id_sf3]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual( + set(latest_event_ids), set((event_id_a, event_id_b, event_id_c)) + ) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set([event_id_b, event_id_c])) From 9f5268388abb266260c30e9855da35a0b6b11bcb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:56:02 +0100 Subject: [PATCH 52/92] Newsfile --- changelog.d/5278.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5278.bugfix diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5278.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 9b8cd66524304f76209a59e12f4eca561b1a43d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 10:55:55 +0100 Subject: [PATCH 53/92] Fixup comments and logging --- synapse/storage/events.py | 21 +++++++++++-------- .../delta/54/delete_forward_extremities.sql | 3 +++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9664928ca..418d88b8dc 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2387,7 +2387,8 @@ class EventsStore( soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their prev events, if any, and their prev events rejection status. + # their successor events, if any, and their successor events + # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, rejections.event_id IS NOT NULL, events.outlier @@ -2450,11 +2451,10 @@ class EventsStore( if event_id in graph: # Already handled this event previously, but we still # want to record the edge. - graph.setdefault(event_id, set()).add(prev_event_id) - logger.info("Already handled") + graph[event_id].add(prev_event_id) continue - graph.setdefault(event_id, set()).add(prev_event_id) + graph[event_id] = {prev_event_id} soft_failed = json.loads(metadata).get("soft_failed") if soft_failed or rejected: @@ -2474,8 +2474,6 @@ class EventsStore( to_delete.intersection_update(original_set) - logger.info("Deleting up to %d forward extremities", len(to_delete)) - deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", @@ -2484,7 +2482,11 @@ class EventsStore( keyvalues={}, ) - logger.info("Deleted %d forward extremities", deleted) + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) if deleted: # We now need to invalidate the caches of these rooms @@ -2496,10 +2498,11 @@ class EventsStore( keyvalues={}, retcols=("room_id",) ) - for row in rows: + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: txn.call_after( self.get_latest_event_ids_in_room.invalidate, - (row["room_id"],) + (room_id,) ) self._simple_delete_many_txn( diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index 7056bd1d00..aa40f13da7 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -13,7 +13,10 @@ * limitations under the License. */ +-- Start a background job to cleanup extremities that were incorrectly added +-- by bug #5269. INSERT INTO background_updates (update_name, progress_json) VALUES ('delete_soft_failed_extremities', '{}'); +DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 98f438b52a93b1ce9d1f3e93fa57db0f870f9101 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:22:59 +0100 Subject: [PATCH 54/92] Move event background updates to a separate file --- synapse/storage/__init__.py | 2 + synapse/storage/events.py | 371 +------------------------ synapse/storage/events_bg_updates.py | 401 +++++++++++++++++++++++++++ 3 files changed, 405 insertions(+), 369 deletions(-) create mode 100644 synapse/storage/events_bg_updates.py diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7522d3fd57..56c434d4e8 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -36,6 +36,7 @@ from .engines import PostgresEngine from .event_federation import EventFederationStore from .event_push_actions import EventPushActionsStore from .events import EventsStore +from .events_bg_updates import EventsBackgroundUpdatesStore from .filtering import FilteringStore from .group_server import GroupServerStore from .keys import KeyStore @@ -65,6 +66,7 @@ logger = logging.getLogger(__name__) class DataStore( + EventsBackgroundUpdatesStore, RoomMemberStore, RoomStore, RegistrationStore, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 418d88b8dc..f9162be9b9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -219,47 +220,11 @@ class EventsStore( EventsWorkerStore, BackgroundUpdateStore, ): - EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" - EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) - self.register_background_update_handler( - self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts - ) - self.register_background_update_handler( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, - self._background_reindex_fields_sender, - ) - - self.register_background_index_update( - "event_contains_url_index", - index_name="event_contains_url_index", - table="events", - columns=["room_id", "topological_ordering", "stream_ordering"], - where_clause="contains_url = true AND outlier = false", - ) - - # an event_id index on event_search is useful for the purge_history - # api. Plus it means we get to enforce some integrity with a UNIQUE - # clause - self.register_background_index_update( - "event_search_event_id_idx", - index_name="event_search_event_id_idx", - table="event_search", - columns=["event_id"], - unique=True, - psql_only=True, - ) - - self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, - self._cleanup_extremities_bg_update, - ) self._event_persist_queue = _EventPeristenceQueue() - self._state_resolution_handler = hs.get_state_resolution_handler() @defer.inlineCallbacks @@ -1585,153 +1550,6 @@ class EventsStore( ret = yield self.runInteraction("count_daily_active_rooms", _count) defer.returnValue(ret) - @defer.inlineCallbacks - def _background_reindex_fields_sender(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_txn(txn): - sql = ( - "SELECT stream_ordering, event_id, json FROM events" - " INNER JOIN event_json USING (event_id)" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - - update_rows = [] - for row in rows: - try: - event_id = row[1] - event_json = json.loads(row[2]) - sender = event_json["sender"] - content = event_json["content"] - - contains_url = "url" in content - if contains_url: - contains_url &= isinstance(content["url"], text_type) - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - update_rows.append((sender, contains_url, event_id)) - - sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" - - for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): - clump = update_rows[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows), - } - - self._background_update_progress_txn( - txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress - ) - - return len(rows) - - result = yield self.runInteraction( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - - defer.returnValue(result) - - @defer.inlineCallbacks - def _background_reindex_origin_server_ts(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_search_txn(txn): - sql = ( - "SELECT stream_ordering, event_id FROM events" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - event_ids = [row[1] for row in rows] - - rows_to_update = [] - - chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] - for chunk in chunks: - ev_rows = self._simple_select_many_txn( - txn, - table="event_json", - column="event_id", - iterable=chunk, - retcols=["event_id", "json"], - keyvalues={}, - ) - - for row in ev_rows: - event_id = row["event_id"] - event_json = json.loads(row["json"]) - try: - origin_server_ts = event_json["origin_server_ts"] - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - rows_to_update.append((origin_server_ts, event_id)) - - sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" - - for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): - clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows_to_update), - } - - self._background_update_progress_txn( - txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress - ) - - return len(rows_to_update) - - result = yield self.runInteraction( - self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - - defer.returnValue(result) - def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" return -self._backfill_id_gen.get_current_token() @@ -2347,191 +2165,6 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) - @defer.inlineCallbacks - def _cleanup_extremities_bg_update(self, progress, batch_size): - """Background update to clean out extremities that should have been - deleted previously. - - Mainly used to deal with the aftermath of #5269. - """ - - # This works by first copying all existing forward extremities into the - # `_extremities_to_check` table at start up, and then checking each - # event in that table whether we have any descendants that are not - # soft-failed/rejected. If that is the case then we delete that event - # from the forward extremities table. - # - # For efficiency, we do this in batches by recursively pulling out all - # descendants of a batch until we find the non soft-failed/rejected - # events, i.e. the set of descendants whose chain of prev events back - # to the batch of extremities are all soft-failed or rejected. - # Typically, we won't find any such events as extremities will rarely - # have any descendants, but if they do then we should delete those - # extremities. - - def _cleanup_extremities_bg_update_txn(txn): - # The set of extremity event IDs that we're checking this round - original_set = set() - - # A dict[str, set[str]] of event ID to their prev events. - graph = {} - - # The set of descendants of the original set that are not rejected - # nor soft-failed. Ancestors of these events should be removed - # from the forward extremities table. - non_rejected_leaves = set() - - # Set of event IDs that have been soft failed, and for which we - # should check if they have descendants which haven't been soft - # failed. - soft_failed_events_to_lookup = set() - - # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events - # rejection status. - txn.execute( - """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL, events.outlier - FROM ( - SELECT event_id AS prev_event_id - FROM _extremities_to_check - LIMIT ? - ) AS f - LEFT JOIN event_edges USING (prev_event_id) - LEFT JOIN events USING (event_id) - LEFT JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - """, (batch_size,) - ) - - for prev_event_id, event_id, metadata, rejected, outlier in txn: - original_set.add(prev_event_id) - - if not event_id or outlier: - # Common case where the forward extremity doesn't have any - # descendants. - continue - - graph.setdefault(event_id, set()).add(prev_event_id) - - soft_failed = False - if metadata: - soft_failed = json.loads(metadata).get("soft_failed") - - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # Now we recursively check all the soft-failed descendants we - # found above in the same way, until we have nothing left to - # check. - while soft_failed_events_to_lookup: - # We only want to do 100 at a time, so we split given list - # into two. - batch = list(soft_failed_events_to_lookup) - to_check, to_defer = batch[:100], batch[100:] - soft_failed_events_to_lookup = set(to_defer) - - sql = """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL - FROM event_edges - INNER JOIN events USING (event_id) - INNER JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - WHERE - prev_event_id IN (%s) - AND NOT events.outlier - """ % ( - ",".join("?" for _ in to_check), - ) - txn.execute(sql, to_check) - - for prev_event_id, event_id, metadata, rejected in txn: - if event_id in graph: - # Already handled this event previously, but we still - # want to record the edge. - graph[event_id].add(prev_event_id) - continue - - graph[event_id] = {prev_event_id} - - soft_failed = json.loads(metadata).get("soft_failed") - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # We have a set of non-soft-failed descendants, so we recurse up - # the graph to find all ancestors and add them to the set of event - # IDs that we can delete from forward extremities table. - to_delete = set() - while non_rejected_leaves: - event_id = non_rejected_leaves.pop() - prev_event_ids = graph.get(event_id, set()) - non_rejected_leaves.update(prev_event_ids) - to_delete.update(prev_event_ids) - - to_delete.intersection_update(original_set) - - deleted = self._simple_delete_many_txn( - txn=txn, - table="event_forward_extremities", - column="event_id", - iterable=to_delete, - keyvalues={}, - ) - - logger.info( - "Deleted %d forward extremities of %d checked, to clean up #5269", - deleted, - len(original_set), - ) - - if deleted: - # We now need to invalidate the caches of these rooms - rows = self._simple_select_many_txn( - txn, - table="events", - column="event_id", - iterable=to_delete, - keyvalues={}, - retcols=("room_id",) - ) - room_ids = set(row["room_id"] for row in rows) - for room_id in room_ids: - txn.call_after( - self.get_latest_event_ids_in_room.invalidate, - (room_id,) - ) - - self._simple_delete_many_txn( - txn=txn, - table="_extremities_to_check", - column="event_id", - iterable=original_set, - keyvalues={}, - ) - - return len(original_set) - - num_handled = yield self.runInteraction( - "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, - ) - - if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) - - def _drop_table_txn(txn): - txn.execute("DROP TABLE _extremities_to_check") - - yield self.runInteraction( - "_cleanup_extremities_bg_update_drop_table", - _drop_table_txn, - ) - - defer.returnValue(num_handled) - AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py new file mode 100644 index 0000000000..2eba106abf --- /dev/null +++ b/synapse/storage/events_bg_updates.py @@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from six import text_type + +from canonicaljson import json + +from twisted.internet import defer + +from synapse.storage.background_updates import BackgroundUpdateStore + +logger = logging.getLogger(__name__) + + +class EventsBackgroundUpdatesStore(BackgroundUpdateStore): + + EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + + def __init__(self, db_conn, hs): + super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) + + self.register_background_update_handler( + self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts + ) + self.register_background_update_handler( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, + self._background_reindex_fields_sender, + ) + + self.register_background_index_update( + "event_contains_url_index", + index_name="event_contains_url_index", + table="events", + columns=["room_id", "topological_ordering", "stream_ordering"], + where_clause="contains_url = true AND outlier = false", + ) + + # an event_id index on event_search is useful for the purge_history + # api. Plus it means we get to enforce some integrity with a UNIQUE + # clause + self.register_background_index_update( + "event_search_event_id_idx", + index_name="event_search_event_id_idx", + table="event_search", + columns=["event_id"], + unique=True, + psql_only=True, + ) + + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + + @defer.inlineCallbacks + def _background_reindex_fields_sender(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_txn(txn): + sql = ( + "SELECT stream_ordering, event_id, json FROM events" + " INNER JOIN event_json USING (event_id)" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + + update_rows = [] + for row in rows: + try: + event_id = row[1] + event_json = json.loads(row[2]) + sender = event_json["sender"] + content = event_json["content"] + + contains_url = "url" in content + if contains_url: + contains_url &= isinstance(content["url"], text_type) + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + update_rows.append((sender, contains_url, event_id)) + + sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" + + for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): + clump = update_rows[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows), + } + + self._background_update_progress_txn( + txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress + ) + + return len(rows) + + result = yield self.runInteraction( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _background_reindex_origin_server_ts(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_search_txn(txn): + sql = ( + "SELECT stream_ordering, event_id FROM events" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + event_ids = [row[1] for row in rows] + + rows_to_update = [] + + chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] + for chunk in chunks: + ev_rows = self._simple_select_many_txn( + txn, + table="event_json", + column="event_id", + iterable=chunk, + retcols=["event_id", "json"], + keyvalues={}, + ) + + for row in ev_rows: + event_id = row["event_id"] + event_json = json.loads(row["json"]) + try: + origin_server_ts = event_json["origin_server_ts"] + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + rows_to_update.append((origin_server_ts, event_id)) + + sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" + + for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): + clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows_to_update), + } + + self._background_update_progress_txn( + txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress + ) + + return len(rows_to_update) + + result = yield self.runInteraction( + self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their successor events, if any, and their successor events + # rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph[event_id].add(prev_event_id) + continue + + graph[event_id] = {prev_event_id} + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + deleted = self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) + + if deleted: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (room_id,) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) From 7386c35f58f360269df1410b2d1ec6d179081b32 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:24:42 +0100 Subject: [PATCH 55/92] Rename constant --- synapse/storage/events_bg_updates.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 2eba106abf..22aac1393d 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -30,7 +30,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) @@ -64,7 +64,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update, ) @@ -388,7 +388,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES) def _drop_table_txn(txn): txn.execute("DROP TABLE _extremities_to_check") From 06eb408da5c4ab52a3072dc6d76fe5ac3b9b1e83 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:06:42 +0100 Subject: [PATCH 56/92] Update synapse/storage/events_bg_updates.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 22aac1393d..75c1935bf3 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -255,7 +255,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events + # their successor events, if any, and the successor events' # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, From e2c3660a0ffb13d4198893e91a90ae1abcad8915 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:54:56 +0100 Subject: [PATCH 57/92] Add index to temp table --- synapse/storage/schema/delta/54/delete_forward_extremities.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index aa40f13da7..b062ec840c 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -20,3 +20,4 @@ INSERT INTO background_updates (update_name, progress_json) VALUES DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; +CREATE INDEX _extremities_to_check_id ON _extremities_to_check(event_id); From f5c7f90d7278f199523457ade9587d284b3ce39b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:05:26 +0100 Subject: [PATCH 58/92] Newsfile --- changelog.d/5291.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5291.bugfix diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5291.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 9315802221ede86def56986d5c5303f649cd2fa9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 30 May 2019 16:28:02 +0100 Subject: [PATCH 59/92] fix changelog for 0.99.5.1 (#5270) --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6bdfdd6d70..350151b62c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,8 +1,7 @@ Synapse 0.99.5.1 (2019-05-22) ============================= -No significant changes. - +0.99.5.1 supersedes 0.99.5 due to malformed debian changelog - no functional changes. Synapse 0.99.5 (2019-05-22) =========================== From c831748f4d243d74e9a3fd2042bc2b35cc30f961 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 16:28:52 +0100 Subject: [PATCH 60/92] 0.99.5.2 --- CHANGES.md | 9 +++++++++ changelog.d/5274.bugfix | 1 - changelog.d/5278.bugfix | 1 - changelog.d/5291.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 6 files changed, 16 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/5274.bugfix delete mode 100644 changelog.d/5278.bugfix delete mode 100644 changelog.d/5291.bugfix diff --git a/CHANGES.md b/CHANGES.md index 350151b62c..0ffdf1aaef 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 0.99.5.2 (2019-05-30) +============================= + +Bugfixes +-------- + +- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291)) + + Synapse 0.99.5.1 (2019-05-22) ============================= diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5274.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5291.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/debian/changelog b/debian/changelog index 90c6b86c5b..6a1a72c0e3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (0.99.5.2) stable; urgency=medium + + * New synapse release 0.99.5.2. + + -- Synapse Packaging team Thu, 30 May 2019 16:28:07 +0100 + matrix-synapse-py3 (0.99.5.1) stable; urgency=medium * New synapse release 0.99.5.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4f95778eea..d0e8d7c21b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.5.1" +__version__ = "0.99.5.2" From 099829d5a95b913c47634d13391d6c9f200f0bde Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 1 Apr 2019 12:28:40 +0100 Subject: [PATCH 61/92] use attr.s for VerifyKeyRequest because namedtuple is awful --- changelog.d/5296.misc | 1 + synapse/crypto/keyring.py | 36 ++++++++++++++++++++---------------- 2 files changed, 21 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5296.misc diff --git a/changelog.d/5296.misc b/changelog.d/5296.misc new file mode 100644 index 0000000000..a038a6f7f6 --- /dev/null +++ b/changelog.d/5296.misc @@ -0,0 +1 @@ +Refactor keyring.VerifyKeyRequest to use attr.s. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index c63f106cf3..e1e026214f 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -15,12 +15,12 @@ # limitations under the License. import logging -from collections import namedtuple import six from six import raise_from from six.moves import urllib +import attr from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -57,22 +57,26 @@ from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) -VerifyKeyRequest = namedtuple( - "VerifyRequest", ("server_name", "key_ids", "json_object", "deferred") -) -""" -A request for a verify key to verify a JSON object. +@attr.s(slots=True, cmp=False) +class VerifyKeyRequest(object): + """ + A request for a verify key to verify a JSON object. -Attributes: - server_name(str): The name of the server to verify against. - key_ids(set(str)): The set of key_ids to that could be used to verify the - JSON object - json_object(dict): The JSON object to verify. - deferred(Deferred[str, str, nacl.signing.VerifyKey]): - A deferred (server_name, key_id, verify_key) tuple that resolves when - a verify key has been fetched. The deferreds' callbacks are run with no - logcontext. -""" + Attributes: + server_name(str): The name of the server to verify against. + key_ids(set[str]): The set of key_ids to that could be used to verify the + JSON object + json_object(dict): The JSON object to verify. + deferred(Deferred[str, str, nacl.signing.VerifyKey]): + A deferred (server_name, key_id, verify_key) tuple that resolves when + a verify key has been fetched. The deferreds' callbacks are run with no + logcontext. + """ + + server_name = attr.ib() + key_ids = attr.ib() + json_object = attr.ib() + deferred = attr.ib() class KeyLookupError(ValueError): From a82c96b87fd6fb8b8c71cc34e6a712a12ff4222f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 18:30:13 +0100 Subject: [PATCH 62/92] Rewrite get_server_verify_keys, again. Attempt to simplify the logic in get_server_verify_keys by splitting it into two methods. --- changelog.d/5299.misc | 1 + synapse/crypto/keyring.py | 101 ++++++++++++++++++++------------------ 2 files changed, 54 insertions(+), 48 deletions(-) create mode 100644 changelog.d/5299.misc diff --git a/changelog.d/5299.misc b/changelog.d/5299.misc new file mode 100644 index 0000000000..53297c768b --- /dev/null +++ b/changelog.d/5299.misc @@ -0,0 +1 @@ +Rewrite get_server_verify_keys, again. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index c63f106cf3..194867db03 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -270,59 +270,21 @@ class Keyring(object): verify_requests (list[VerifyKeyRequest]): list of verify requests """ + remaining_requests = set( + (rq for rq in verify_requests if not rq.deferred.called) + ) + @defer.inlineCallbacks def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): - # dict[str, set(str)]: keys to fetch for each server - missing_keys = {} - for verify_request in verify_requests: - missing_keys.setdefault(verify_request.server_name, set()).update( - verify_request.key_ids - ) - for f in self._key_fetchers: - results = yield f.get_keys(missing_keys.items()) - - # We now need to figure out which verify requests we have keys - # for and which we don't - missing_keys = {} - requests_missing_keys = [] - for verify_request in verify_requests: - if verify_request.deferred.called: - # We've already called this deferred, which probably - # means that we've already found a key for it. - continue - - server_name = verify_request.server_name - - # see if any of the keys we got this time are sufficient to - # complete this VerifyKeyRequest. - result_keys = results.get(server_name, {}) - for key_id in verify_request.key_ids: - fetch_key_result = result_keys.get(key_id) - if fetch_key_result: - with PreserveLoggingContext(): - verify_request.deferred.callback( - ( - server_name, - key_id, - fetch_key_result.verify_key, - ) - ) - break - else: - # The else block is only reached if the loop above - # doesn't break. - missing_keys.setdefault(server_name, set()).update( - verify_request.key_ids - ) - requests_missing_keys.append(verify_request) - - if not missing_keys: - break + if not remaining_requests: + return + yield self._attempt_key_fetches_with_fetcher(f, remaining_requests) + # look for any requests which weren't satisfied with PreserveLoggingContext(): - for verify_request in requests_missing_keys: + for verify_request in remaining_requests: verify_request.deferred.errback( SynapseError( 401, @@ -333,13 +295,56 @@ class Keyring(object): ) def on_err(err): + # we don't really expect to get here, because any errors should already + # have been caught and logged. But if we do, let's log the error and make + # sure that all of the deferreds are resolved. + logger.error("Unexpected error in _get_server_verify_keys: %s", err) with PreserveLoggingContext(): - for verify_request in verify_requests: + for verify_request in remaining_requests: if not verify_request.deferred.called: verify_request.deferred.errback(err) run_in_background(do_iterations).addErrback(on_err) + @defer.inlineCallbacks + def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): + """Use a key fetcher to attempt to satisfy some key requests + + Args: + fetcher (KeyFetcher): fetcher to use to fetch the keys + remaining_requests (set[VerifyKeyRequest]): outstanding key requests. + Any successfully-completed requests will be reomved from the list. + """ + # dict[str, set(str)]: keys to fetch for each server + missing_keys = {} + for verify_request in remaining_requests: + # any completed requests should already have been removed + assert not verify_request.deferred.called + missing_keys.setdefault(verify_request.server_name, set()).update( + verify_request.key_ids + ) + + results = yield fetcher.get_keys(missing_keys.items()) + + completed = list() + for verify_request in remaining_requests: + server_name = verify_request.server_name + + # see if any of the keys we got this time are sufficient to + # complete this VerifyKeyRequest. + result_keys = results.get(server_name, {}) + for key_id in verify_request.key_ids: + key = result_keys.get(key_id) + if key: + with PreserveLoggingContext(): + verify_request.deferred.callback( + (server_name, key_id, key.verify_key) + ) + completed.append(verify_request) + break + + remaining_requests.difference_update(completed) + class KeyFetcher(object): def get_keys(self, server_name_and_key_ids): From 8ea2f756a947d668afc9a6b22707c12a29af6be4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 29 May 2019 17:21:39 +0100 Subject: [PATCH 63/92] Remove some pointless exception handling The verify_request deferred already returns a suitable SynapseError, so I don't really know what we expect to achieve by doing more wrapping, other than log spam. Fixes #4278. --- changelog.d/5300.bugfix | 1 + synapse/crypto/keyring.py | 33 ++++++++------------------------- 2 files changed, 9 insertions(+), 25 deletions(-) create mode 100644 changelog.d/5300.bugfix diff --git a/changelog.d/5300.bugfix b/changelog.d/5300.bugfix new file mode 100644 index 0000000000..049e93cd5a --- /dev/null +++ b/changelog.d/5300.bugfix @@ -0,0 +1 @@ +Fix noisy 'no key for server' logs. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e1e026214f..5756478ad7 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -64,13 +64,19 @@ class VerifyKeyRequest(object): Attributes: server_name(str): The name of the server to verify against. + key_ids(set[str]): The set of key_ids to that could be used to verify the JSON object + json_object(dict): The JSON object to verify. + deferred(Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no logcontext. + + If we are unable to find a key which satisfies the request, the deferred + errbacks with an M_UNAUTHORIZED SynapseError. """ server_name = attr.ib() @@ -771,31 +777,8 @@ def _handle_key_deferred(verify_request): SynapseError if there was a problem performing the verification """ server_name = verify_request.server_name - try: - with PreserveLoggingContext(): - _, key_id, verify_key = yield verify_request.deferred - except KeyLookupError as e: - logger.warn( - "Failed to download keys for %s: %s %s", - server_name, - type(e).__name__, - str(e), - ) - raise SynapseError( - 502, "Error downloading keys for %s" % (server_name,), Codes.UNAUTHORIZED - ) - except Exception as e: - logger.exception( - "Got Exception when downloading keys for %s: %s %s", - server_name, - type(e).__name__, - str(e), - ) - raise SynapseError( - 401, - "No key for %s with id %s" % (server_name, verify_request.key_ids), - Codes.UNAUTHORIZED, - ) + with PreserveLoggingContext(): + _, key_id, verify_key = yield verify_request.deferred json_object = verify_request.json_object From 3e1af5109cb91b8e22f0e14aee875f86bd9fcd92 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 31 May 2019 02:45:46 -0600 Subject: [PATCH 64/92] Clarify that the admin change password endpoint logs them out (#5303) --- changelog.d/5303.misc | 1 + docs/admin_api/user_admin_api.rst | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5303.misc diff --git a/changelog.d/5303.misc b/changelog.d/5303.misc new file mode 100644 index 0000000000..f6a7f1f8e3 --- /dev/null +++ b/changelog.d/5303.misc @@ -0,0 +1 @@ +Clarify that the admin change password API logs the user out. diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 8aca4f158d..213359d0c0 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -69,7 +69,7 @@ An empty body may be passed for backwards compatibility. Reset password ============== -Changes the password of another user. +Changes the password of another user. This will automatically log the user out of all their devices. The api is:: From 847b9dcd1c9d7d7a43333e85f69dc78471095475 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:54:46 +0100 Subject: [PATCH 65/92] Make max_delta equal to period * 10% --- synapse/config/registration.py | 15 ++++----------- synapse/storage/_base.py | 7 +++---- tests/rest/client/v2_alpha/test_register.py | 18 +----------------- 3 files changed, 8 insertions(+), 32 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index b4fd4af368..1835b4b1f3 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -39,9 +39,7 @@ class AccountValidityConfig(Config): else: self.renew_email_subject = "Renew your %(app)s account" - self.startup_job_max_delta = self.parse_duration( - config.get("startup_job_max_delta", 0), - ) + self.startup_job_max_delta = self.period * 10. / 100. if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: raise ConfigError("Can't send renewal emails without 'public_baseurl'") @@ -133,20 +131,15 @@ class RegistrationConfig(Config): # This means that, if a validity period is set, and Synapse is restarted (it will # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' - # expiration dates won't be updated unless their account is manually renewed. - # - # If set, the ``startup_job_max_delta`` optional setting will make the startup job - # described above set a random expiration date between t + period and - # t + period + startup_job_max_delta, t being the date and time at which the job - # sets the expiration date for a given user. This is useful for server admins that - # want to avoid Synapse sending a lot of renewal emails at once. + # expiration dates won't be updated unless their account is manually renewed. This + # date will be randomly selected within a range [now + period ; now + period + d], + # where d is equal to 10% of the validity period. # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" - # startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 40802fd3dc..7f944ec717 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -329,14 +329,13 @@ class SQLBaseStore(object): user_id (str): User ID to set an expiration date for. use_delta (bool): If set to False, the expiration date for the user will be now + validity period. If set to True, this expiration date will be a - random value in the [now + period; now + period + max_delta] range, - max_delta being the configured value for the size of the range, unless - delta is 0, in which case it sets it to now + period. + random value in the [now + period; now + period + d] range, d being a + delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() expiration_ts = now_ms + self._account_validity.period - if use_delta and self._account_validity.startup_job_max_delta: + if use_delta: expiration_ts = self.rand.randrange( expiration_ts, expiration_ts + self._account_validity.startup_job_max_delta, diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 68654e25ab..711628ded1 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -436,7 +436,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.validity_period = 10 - self.max_delta = 10 + self.max_delta = self.validity_period * 10. / 100. config = self.default_config() @@ -453,22 +453,6 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): return self.hs def test_background_job(self): - """ - Tests whether the account validity startup background job does the right thing, - which is sticking an expiration date to every account that doesn't already have - one. - """ - user_id = self.register_user("kermit", "user") - - self.hs.config.account_validity.startup_job_max_delta = 0 - - now_ms = self.hs.clock.time_msec() - self.get_success(self.store._set_expiration_date_when_missing()) - - res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertEqual(res, now_ms + self.validity_period) - - def test_background_job_with_max_delta(self): """ Tests the same thing as test_background_job, except that it sets the startup_job_max_delta parameter and checks that the expiration date is within the From 0c2362861e3fad44ede5e9c23dbef8e1a9113f36 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:56:52 +0100 Subject: [PATCH 66/92] Gah python --- synapse/config/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 1835b4b1f3..4af825a2ab 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -133,7 +133,7 @@ class RegistrationConfig(Config): # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This # date will be randomly selected within a range [now + period ; now + period + d], - # where d is equal to 10% of the validity period. + # where d is equal to 10%% of the validity period. # #account_validity: # enabled: True From 6bfc5ad3a189acd993a1ef9db36d28b963be345d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:56:57 +0100 Subject: [PATCH 67/92] Sample config --- docs/sample_config.yaml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8ff53d5cb4..13c0ddc7c5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -753,20 +753,15 @@ uploads_path: "DATADIR/uploads" # This means that, if a validity period is set, and Synapse is restarted (it will # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' -# expiration dates won't be updated unless their account is manually renewed. -# -# If set, the ``startup_job_max_delta`` optional setting will make the startup job -# described above set a random expiration date between t + period and -# t + period + startup_job_max_delta, t being the date and time at which the job -# sets the expiration date for a given user. This is useful for server admins that -# want to avoid Synapse sending a lot of renewal emails at once. +# expiration dates won't be updated unless their account is manually renewed. This +# date will be randomly selected within a range [now + period ; now + period + d], +# where d is equal to 10% of the validity period. # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" -# startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # From 5037326d6624d1d1780a0536d19ff79e275f8735 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:37:57 +0100 Subject: [PATCH 68/92] Add indices. Remove room_ids accidentally added We have to do this by re-inserting a background update and recreating tables, as the tables only get created during a background update and will later be deleted. We also make sure that we remove any entries that should have been removed but weren't due to a race that has been fixed in a previous commit. --- synapse/storage/schema/delta/54/stats2.sql | 28 +++++++++++++++ synapse/storage/stats.py | 41 +++++++++++++++------- 2 files changed, 56 insertions(+), 13 deletions(-) create mode 100644 synapse/storage/schema/delta/54/stats2.sql diff --git a/synapse/storage/schema/delta/54/stats2.sql b/synapse/storage/schema/delta/54/stats2.sql new file mode 100644 index 0000000000..3b2d48447f --- /dev/null +++ b/synapse/storage/schema/delta/54/stats2.sql @@ -0,0 +1,28 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This delta file gets run after `54/stats.sql` delta. + +-- We want to add some indices to the temporary stats table, so we re-insert +-- 'populate_stats_createtables' if we are still processing the rooms update. +INSERT INTO background_updates (update_name, progress_json) + SELECT 'populate_stats_createtables', '{}' + WHERE + 'populate_stats_process_rooms' IN ( + SELECT update_name FROM background_updates + ) + AND 'populate_stats_createtables' NOT IN ( -- don't insert if already exists + SELECT update_name FROM background_updates + ); diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index a99637d4b4..1c0b183a56 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.storage.prepare_database import get_statements from synapse.storage.state_deltas import StateDeltasStore from synapse.util.caches.descriptors import cached @@ -69,12 +70,25 @@ class StatsStore(StateDeltasStore): # Get all the rooms that we want to process. def _make_staging_area(txn): - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)" - ) - txn.execute(sql) + # Create the temporary tables + stmts = get_statements(""" + -- We just recreate the table, we'll be reinserting the + -- correct entries again later anyway. + DROP TABLE IF EXISTS {temp}_rooms; + + CREATE TABLE IF NOT EXISTS {temp}_rooms( + room_id TEXT NOT NULL, + events BIGINT NOT NULL + ); + + CREATE INDEX {temp}_rooms_events + ON {temp}_rooms(events); + CREATE INDEX {temp}_rooms_id + ON {temp}_rooms(room_id); + """.format(temp=TEMP_TABLE).splitlines()) + + for statement in stmts: + txn.execute(statement) sql = ( "CREATE TABLE IF NOT EXISTS " @@ -83,15 +97,16 @@ class StatsStore(StateDeltasStore): ) txn.execute(sql) - # Get rooms we want to process from the database + # Get rooms we want to process from the database, only adding + # those that we haven't (i.e. those not in room_stats_earliest_token) sql = """ - SELECT room_id, count(*) FROM current_state_events - GROUP BY room_id - """ + INSERT INTO %s_rooms (room_id, events) + SELECT c.room_id, count(*) FROM current_state_events AS c + LEFT JOIN room_stats_earliest_token AS t USING (room_id) + WHERE t.room_id IS NULL + GROUP BY c.room_id + """ % (TEMP_TABLE,) txn.execute(sql) - rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()] - self._simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms) - del rooms new_pos = yield self.get_max_stream_id_in_current_state_deltas() yield self.runInteraction("populate_stats_temp_build", _make_staging_area) From 39bbf6a4a5b954de56865a2aa0877587acbd9552 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 16:07:23 +0100 Subject: [PATCH 69/92] Newsfile --- changelog.d/5294.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5294.bugfix diff --git a/changelog.d/5294.bugfix b/changelog.d/5294.bugfix new file mode 100644 index 0000000000..5924bda319 --- /dev/null +++ b/changelog.d/5294.bugfix @@ -0,0 +1 @@ +Fix performance problems with the rooms stats background update. From 4d794dae210ce30e87d8a6b9ee2f9b481cadf539 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 11:09:34 +0100 Subject: [PATCH 70/92] Move delta from +10% to -10% --- synapse/config/registration.py | 2 +- synapse/storage/_base.py | 4 ++-- tests/rest/client/v2_alpha/test_register.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4af825a2ab..aad3400819 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -132,7 +132,7 @@ class RegistrationConfig(Config): # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This - # date will be randomly selected within a range [now + period ; now + period + d], + # date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10%% of the validity period. # #account_validity: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 7f944ec717..086318a530 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -329,7 +329,7 @@ class SQLBaseStore(object): user_id (str): User ID to set an expiration date for. use_delta (bool): If set to False, the expiration date for the user will be now + validity period. If set to True, this expiration date will be a - random value in the [now + period; now + period + d] range, d being a + random value in the [now + period - d ; now + period] range, d being a delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() @@ -337,8 +337,8 @@ class SQLBaseStore(object): if use_delta: expiration_ts = self.rand.randrange( + expiration_ts - self._account_validity.startup_job_max_delta, expiration_ts, - expiration_ts + self._account_validity.startup_job_max_delta, ) self._simple_insert_txn( diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 711628ded1..0cb6a363d6 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -467,5 +467,5 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertLessEqual(res, now_ms + self.validity_period + self.max_delta) - self.assertGreaterEqual(res, now_ms + self.validity_period) + self.assertGreaterEqual(res, now_ms + self.validity_period - self.max_delta) + self.assertLessEqual(res, now_ms + self.validity_period) From e975b15101c08299218bd15963a9dc5ea6f990ff Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 11:14:21 +0100 Subject: [PATCH 71/92] Sample config --- docs/sample_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 13c0ddc7c5..9536681068 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -754,7 +754,7 @@ uploads_path: "DATADIR/uploads" # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This -# date will be randomly selected within a range [now + period ; now + period + d], +# date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10% of the validity period. # #account_validity: From 3600f5568b5f8c6902a0dbeeb349c1891f8114b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 May 2019 15:48:36 +0100 Subject: [PATCH 72/92] Stop overwriting server keys with other keys Fix a bug where we would discard a key result which the origin server is no longer returning. Fixes #5305. --- changelog.d/5307.bugfix | 1 + synapse/crypto/keyring.py | 14 ++------------ 2 files changed, 3 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5307.bugfix diff --git a/changelog.d/5307.bugfix b/changelog.d/5307.bugfix new file mode 100644 index 0000000000..6b152f4854 --- /dev/null +++ b/changelog.d/5307.bugfix @@ -0,0 +1 @@ +Fix bug where a notary server would sometimes forget old keys. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5756478ad7..8f47469a1c 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -394,7 +394,7 @@ class BaseV2KeyFetcher(object): @defer.inlineCallbacks def process_v2_response( - self, from_server, response_json, time_added_ms, requested_ids=[] + self, from_server, response_json, time_added_ms ): """Parse a 'Server Keys' structure from the result of a /key request @@ -417,10 +417,6 @@ class BaseV2KeyFetcher(object): time_added_ms (int): the timestamp to record in server_keys_json - requested_ids (iterable[str]): a list of the key IDs that were requested. - We will store the json for these key ids as well as any that are - actually in the response - Returns: Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ @@ -476,11 +472,6 @@ class BaseV2KeyFetcher(object): signed_key_json_bytes = encode_canonical_json(signed_key_json) - # for reasons I don't quite understand, we store this json for the key ids we - # requested, as well as those we got. - updated_key_ids = set(requested_ids) - updated_key_ids.update(verify_keys) - yield logcontext.make_deferred_yieldable( defer.gatherResults( [ @@ -493,7 +484,7 @@ class BaseV2KeyFetcher(object): ts_expires_ms=ts_valid_until_ms, key_json_bytes=signed_key_json_bytes, ) - for key_id in updated_key_ids + for key_id in verify_keys ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -749,7 +740,6 @@ class ServerKeyFetcher(BaseV2KeyFetcher): response_keys = yield self.process_v2_response( from_server=server_name, - requested_ids=[requested_key_id], response_json=response, time_added_ms=time_now_ms, ) From d16c6375fe39deaafd70b151e496f5e15fd7b29c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 1 Jun 2019 10:42:33 +0100 Subject: [PATCH 73/92] Limit displaynames and avatar URLs These end up in join events everywhere, so let's limit them. Fixes #5079 --- changelog.d/5309.bugfix | 1 + synapse/handlers/profile.py | 13 +++++++++++++ synapse/handlers/register.py | 2 ++ 3 files changed, 16 insertions(+) create mode 100644 changelog.d/5309.bugfix diff --git a/changelog.d/5309.bugfix b/changelog.d/5309.bugfix new file mode 100644 index 0000000000..97b3527266 --- /dev/null +++ b/changelog.d/5309.bugfix @@ -0,0 +1 @@ +Prevent users from setting huge displaynames and avatar URLs. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 91fc718ff8..a5fc6c5dbf 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -31,6 +31,9 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +MAX_DISPLAYNAME_LEN = 100 +MAX_AVATAR_URL_LEN = 1000 + class BaseProfileHandler(BaseHandler): """Handles fetching and updating user profile information. @@ -162,6 +165,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") + if len(new_displayname) > MAX_DISPLAYNAME_LEN: + raise SynapseError( + 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN, ), + ) + if new_displayname == '': new_displayname = None @@ -217,6 +225,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") + if len(new_avatar_url) > MAX_AVATAR_URL_LEN: + raise SynapseError( + 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN, ), + ) + yield self.store.set_profile_avatar_url( target_user.localpart, new_avatar_url ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e83ee24f10..9a388ea013 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -531,6 +531,8 @@ class RegistrationHandler(BaseHandler): A tuple of (user_id, access_token). Raises: RegistrationError if there was a problem registering. + + NB this is only used in tests. TODO: move it to the test package! """ if localpart is None: raise SynapseError(400, "Request must include user id") From 93003aa1720af846f238bd0c6fd2f2a0df3c20ef Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 1 Jun 2019 11:13:49 +0100 Subject: [PATCH 74/92] add some tests --- tests/rest/client/v1/test_profile.py | 62 +++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 2 deletions(-) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 769c37ce52..f4d0d48dad 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -14,6 +14,8 @@ # limitations under the License. """Tests REST events for /profile paths.""" +import json + from mock import Mock from twisted.internet import defer @@ -31,8 +33,11 @@ myid = "@1234ABCD:test" PATH_PREFIX = "/_matrix/client/api/v1" -class ProfileTestCase(unittest.TestCase): - """ Tests profile management. """ +class MockHandlerProfileTestCase(unittest.TestCase): + """ Tests rest layer of profile management. + + Todo: move these into ProfileTestCase + """ @defer.inlineCallbacks def setUp(self): @@ -159,6 +164,59 @@ class ProfileTestCase(unittest.TestCase): self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif") +class ProfileTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + profile.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.hs = self.setup_test_homeserver() + return self.hs + + def prepare(self, reactor, clock, hs): + self.owner = self.register_user("owner", "pass") + self.owner_tok = self.login("owner", "pass") + + def test_set_displayname(self): + request, channel = self.make_request( + "PUT", + "/profile/%s/displayname" % (self.owner, ), + content=json.dumps({"displayname": "test"}), + access_token=self.owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + res = self.get_displayname() + self.assertEqual(res, "test") + + def test_set_displayname_too_long(self): + """Attempts to set a stupid displayname should get a 400""" + request, channel = self.make_request( + "PUT", + "/profile/%s/displayname" % (self.owner, ), + content=json.dumps({"displayname": "test" * 100}), + access_token=self.owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, 400, channel.result) + + res = self.get_displayname() + self.assertEqual(res, "owner") + + def get_displayname(self): + request, channel = self.make_request( + "GET", + "/profile/%s/displayname" % (self.owner, ), + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + return channel.json_body["displayname"] + + class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): servlets = [ From 2889b055543c8db6bf93eaad7035d0eca1ec2874 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 21:28:59 +1000 Subject: [PATCH 75/92] Unify v1 and v2 REST client APIs (#5226) --- changelog.d/5226.misc | 1 + scripts-dev/list_url_patterns.py | 4 +- synapse/app/frontend_proxy.py | 11 +- synapse/rest/client/v1/base.py | 65 --------- synapse/rest/client/v1/directory.py | 28 ++-- synapse/rest/client/v1/events.py | 17 ++- synapse/rest/client/v1/initial_sync.py | 12 +- synapse/rest/client/v1/login.py | 22 +-- synapse/rest/client/v1/logout.py | 19 +-- synapse/rest/client/v1/presence.py | 13 +- synapse/rest/client/v1/profile.py | 29 ++-- synapse/rest/client/v1/push_rule.py | 12 +- synapse/rest/client/v1/pusher.py | 21 +-- synapse/rest/client/v1/room.py | 137 +++++++++++------- synapse/rest/client/v1/voip.py | 11 +- synapse/rest/client/v2_alpha/_base.py | 6 +- synapse/rest/client/v2_alpha/account.py | 20 +-- synapse/rest/client/v2_alpha/account_data.py | 6 +- .../rest/client/v2_alpha/account_validity.py | 6 +- synapse/rest/client/v2_alpha/auth.py | 4 +- synapse/rest/client/v2_alpha/capabilities.py | 4 +- synapse/rest/client/v2_alpha/devices.py | 8 +- synapse/rest/client/v2_alpha/filter.py | 6 +- synapse/rest/client/v2_alpha/groups.py | 50 +++---- synapse/rest/client/v2_alpha/keys.py | 10 +- synapse/rest/client/v2_alpha/notifications.py | 4 +- synapse/rest/client/v2_alpha/openid.py | 4 +- synapse/rest/client/v2_alpha/read_marker.py | 4 +- synapse/rest/client/v2_alpha/receipts.py | 4 +- synapse/rest/client/v2_alpha/register.py | 10 +- synapse/rest/client/v2_alpha/relations.py | 12 +- synapse/rest/client/v2_alpha/report_event.py | 4 +- synapse/rest/client/v2_alpha/room_keys.py | 8 +- .../v2_alpha/room_upgrade_rest_servlet.py | 4 +- synapse/rest/client/v2_alpha/sendtodevice.py | 4 +- synapse/rest/client/v2_alpha/sync.py | 4 +- synapse/rest/client/v2_alpha/tags.py | 6 +- synapse/rest/client/v2_alpha/thirdparty.py | 10 +- synapse/rest/client/v2_alpha/tokenrefresh.py | 4 +- .../rest/client/v2_alpha/user_directory.py | 4 +- tests/__init__.py | 2 +- tests/rest/admin/test_admin.py | 1 - tests/rest/client/v1/test_profile.py | 2 +- 43 files changed, 296 insertions(+), 317 deletions(-) create mode 100644 changelog.d/5226.misc delete mode 100644 synapse/rest/client/v1/base.py diff --git a/changelog.d/5226.misc b/changelog.d/5226.misc new file mode 100644 index 0000000000..e1b9dc58a3 --- /dev/null +++ b/changelog.d/5226.misc @@ -0,0 +1 @@ +The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index da027be26e..62e5a07472 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -20,9 +20,7 @@ class CallVisitor(ast.NodeVisitor): else: return - if name == "client_path_patterns": - PATTERNS_V1.append(node.args[0].s) - elif name == "client_v2_patterns": + if name == "client_patterns": PATTERNS_V2.append(node.args[0].s) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8479fee738..6504da5278 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -37,8 +37,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns -from synapse.rest.client.v2_alpha._base import client_v2_patterns +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree @@ -49,11 +48,11 @@ from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.frontend_proxy") -class PresenceStatusStubServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") +class PresenceStatusStubServlet(RestServlet): + PATTERNS = client_patterns("/presence/(?P[^/]*)/status") def __init__(self, hs): - super(PresenceStatusStubServlet, self).__init__(hs) + super(PresenceStatusStubServlet, self).__init__() self.http_client = hs.get_simple_http_client() self.auth = hs.get_auth() self.main_uri = hs.config.worker_main_http_uri @@ -84,7 +83,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet): class KeyUploadServlet(RestServlet): - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") + PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py deleted file mode 100644 index dc63b661c0..0000000000 --- a/synapse/rest/client/v1/base.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module contains base REST classes for constructing client v1 servlets. -""" - -import logging -import re - -from synapse.api.urls import CLIENT_API_PREFIX -from synapse.http.servlet import RestServlet -from synapse.rest.client.transactions import HttpTransactionCache - -logger = logging.getLogger(__name__) - - -def client_path_patterns(path_regex, releases=(0,), include_in_unstable=True): - """Creates a regex compiled client path with the correct client path - prefix. - - Args: - path_regex (str): The regex string to match. This should NOT have a ^ - as this will be prefixed. - Returns: - SRE_Pattern - """ - patterns = [re.compile("^" + CLIENT_API_PREFIX + "/api/v1" + path_regex)] - if include_in_unstable: - unstable_prefix = CLIENT_API_PREFIX + "/unstable" - patterns.append(re.compile("^" + unstable_prefix + path_regex)) - for release in releases: - new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) - patterns.append(re.compile("^" + new_prefix + path_regex)) - return patterns - - -class ClientV1RestServlet(RestServlet): - """A base Synapse REST Servlet for the client version 1 API. - """ - - # This subclass was presumably created to allow the auth for the v1 - # protocol version to be different, however this behaviour was removed. - # it may no longer be necessary - - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): - """ - self.hs = hs - self.builder_factory = hs.get_event_builder_factory() - self.auth = hs.get_auth() - self.txns = HttpTransactionCache(hs) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 0220acf644..0035182bb9 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -19,11 +19,10 @@ import logging from twisted.internet import defer from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import RoomAlias -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) @@ -33,13 +32,14 @@ def register_servlets(hs, http_server): ClientAppserviceDirectoryListServer(hs).register(http_server) -class ClientDirectoryServer(ClientV1RestServlet): - PATTERNS = client_path_patterns("/directory/room/(?P[^/]*)$") +class ClientDirectoryServer(RestServlet): + PATTERNS = client_patterns("/directory/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryServer, self).__init__(hs) + super(ClientDirectoryServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_alias): @@ -120,13 +120,14 @@ class ClientDirectoryServer(ClientV1RestServlet): defer.returnValue((200, {})) -class ClientDirectoryListServer(ClientV1RestServlet): - PATTERNS = client_path_patterns("/directory/list/room/(?P[^/]*)$") +class ClientDirectoryListServer(RestServlet): + PATTERNS = client_patterns("/directory/list/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryListServer, self).__init__(hs) + super(ClientDirectoryListServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -162,15 +163,16 @@ class ClientDirectoryListServer(ClientV1RestServlet): defer.returnValue((200, {})) -class ClientAppserviceDirectoryListServer(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/directory/list/appservice/(?P[^/]*)/(?P[^/]*)$" +class ClientAppserviceDirectoryListServer(RestServlet): + PATTERNS = client_patterns( + "/directory/list/appservice/(?P[^/]*)/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(ClientAppserviceDirectoryListServer, self).__init__(hs) + super(ClientAppserviceDirectoryListServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() def on_PUT(self, request, network_id, room_id): content = parse_json_object_from_request(request) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index c3b0a39ab7..84ca36270b 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -19,21 +19,22 @@ import logging from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.streams.config import PaginationConfig -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class EventStreamRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/events$") +class EventStreamRestServlet(RestServlet): + PATTERNS = client_patterns("/events$", v1=True) DEFAULT_LONGPOLL_TIME_MS = 30000 def __init__(self, hs): - super(EventStreamRestServlet, self).__init__(hs) + super(EventStreamRestServlet, self).__init__() self.event_stream_handler = hs.get_event_stream_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -76,11 +77,11 @@ class EventStreamRestServlet(ClientV1RestServlet): # TODO: Unit test gets, with and without auth, with different kinds of events. -class EventRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/events/(?P[^/]*)$") +class EventRestServlet(RestServlet): + PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(EventRestServlet, self).__init__(hs) + super(EventRestServlet, self).__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 3ead75cb77..0fe5f2d79b 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -15,19 +15,19 @@ from twisted.internet import defer -from synapse.http.servlet import parse_boolean +from synapse.http.servlet import RestServlet, parse_boolean +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.streams.config import PaginationConfig -from .base import ClientV1RestServlet, client_path_patterns - # TODO: Needs unit testing -class InitialSyncRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/initialSync$") +class InitialSyncRestServlet(RestServlet): + PATTERNS = client_patterns("/initialSync$", v1=True) def __init__(self, hs): - super(InitialSyncRestServlet, self).__init__(hs) + super(InitialSyncRestServlet, self).__init__() self.initial_sync_handler = hs.get_initial_sync_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 029039c162..3b60728628 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -29,12 +29,11 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder from synapse.types import UserID, map_username_to_mxid_localpart from synapse.util.msisdn import phone_number_to_msisdn -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) @@ -81,15 +80,16 @@ def login_id_thirdparty_from_phone(identifier): } -class LoginRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login$") +class LoginRestServlet(RestServlet): + PATTERNS = client_patterns("/login$", v1=True) CAS_TYPE = "m.login.cas" SSO_TYPE = "m.login.sso" TOKEN_TYPE = "m.login.token" JWT_TYPE = "m.login.jwt" def __init__(self, hs): - super(LoginRestServlet, self).__init__(hs) + super(LoginRestServlet, self).__init__() + self.hs = hs self.jwt_enabled = hs.config.jwt_enabled self.jwt_secret = hs.config.jwt_secret self.jwt_algorithm = hs.config.jwt_algorithm @@ -371,7 +371,7 @@ class LoginRestServlet(ClientV1RestServlet): class CasRedirectServlet(RestServlet): - PATTERNS = client_path_patterns("/login/(cas|sso)/redirect") + PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True) def __init__(self, hs): super(CasRedirectServlet, self).__init__() @@ -394,27 +394,27 @@ class CasRedirectServlet(RestServlet): finish_request(request) -class CasTicketServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login/cas/ticket") +class CasTicketServlet(RestServlet): + PATTERNS = client_patterns("/login/cas/ticket", v1=True) def __init__(self, hs): - super(CasTicketServlet, self).__init__(hs) + super(CasTicketServlet, self).__init__() self.cas_server_url = hs.config.cas_server_url self.cas_service_url = hs.config.cas_service_url self.cas_required_attributes = hs.config.cas_required_attributes self._sso_auth_handler = SSOAuthHandler(hs) + self._http_client = hs.get_simple_http_client() @defer.inlineCallbacks def on_GET(self, request): client_redirect_url = parse_string(request, "redirectUrl", required=True) - http_client = self.hs.get_simple_http_client() uri = self.cas_server_url + "/proxyValidate" args = { "ticket": parse_string(request, "ticket", required=True), "service": self.cas_service_url } try: - body = yield http_client.get_raw(uri, args) + body = yield self._http_client.get_raw(uri, args) except PartialDownloadError as pde: # Twisted raises this error if the connection is closed, # even if that's being used old-http style to signal end-of-data diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index ba20e75033..b8064f261e 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -17,17 +17,18 @@ import logging from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns logger = logging.getLogger(__name__) -class LogoutRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/logout$") +class LogoutRestServlet(RestServlet): + PATTERNS = client_patterns("/logout$", v1=True) def __init__(self, hs): - super(LogoutRestServlet, self).__init__(hs) - self._auth = hs.get_auth() + super(LogoutRestServlet, self).__init__() + self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() @@ -41,7 +42,7 @@ class LogoutRestServlet(ClientV1RestServlet): if requester.device_id is None: # the acccess token wasn't associated with a device. # Just delete the access token - access_token = self._auth.get_access_token_from_request(request) + access_token = self.auth.get_access_token_from_request(request) yield self._auth_handler.delete_access_token(access_token) else: yield self._device_handler.delete_device( @@ -50,11 +51,11 @@ class LogoutRestServlet(ClientV1RestServlet): defer.returnValue((200, {})) -class LogoutAllRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/logout/all$") +class LogoutAllRestServlet(RestServlet): + PATTERNS = client_patterns("/logout/all$", v1=True) def __init__(self, hs): - super(LogoutAllRestServlet, self).__init__(hs) + super(LogoutAllRestServlet, self).__init__() self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 045d5a20ac..e263da3cb7 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -23,21 +23,22 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError from synapse.handlers.presence import format_user_presence_state -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import UserID -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class PresenceStatusRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") +class PresenceStatusRestServlet(RestServlet): + PATTERNS = client_patterns("/presence/(?P[^/]*)/status", v1=True) def __init__(self, hs): - super(PresenceStatusRestServlet, self).__init__(hs) + super(PresenceStatusRestServlet, self).__init__() + self.hs = hs self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index eac1966c5e..e15d9d82a6 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -16,18 +16,19 @@ """ This module contains REST servlets to do with profile: /profile/ """ from twisted.internet import defer -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import UserID -from .base import ClientV1RestServlet, client_path_patterns - -class ProfileDisplaynameRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/displayname") +class ProfileDisplaynameRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)/displayname", v1=True) def __init__(self, hs): - super(ProfileDisplaynameRestServlet, self).__init__(hs) + super(ProfileDisplaynameRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -71,12 +72,14 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): return (200, {}) -class ProfileAvatarURLRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/avatar_url") +class ProfileAvatarURLRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)/avatar_url", v1=True) def __init__(self, hs): - super(ProfileAvatarURLRestServlet, self).__init__(hs) + super(ProfileAvatarURLRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -119,12 +122,14 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): return (200, {}) -class ProfileRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)") +class ProfileRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)", v1=True) def __init__(self, hs): - super(ProfileRestServlet, self).__init__(hs) + super(ProfileRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 506ec95ddd..3d6326fe2f 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -21,22 +21,22 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) -from synapse.http.servlet import parse_json_value_from_request, parse_string +from synapse.http.servlet import RestServlet, parse_json_value_from_request, parse_string from synapse.push.baserules import BASE_RULE_IDS from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException -from .base import ClientV1RestServlet, client_path_patterns - -class PushRuleRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/(?Ppushrules/.*)$") +class PushRuleRestServlet(RestServlet): + PATTERNS = client_patterns("/(?Ppushrules/.*)$", v1=True) SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = ( "Unrecognised request: You probably wanted a trailing slash") def __init__(self, hs): - super(PushRuleRestServlet, self).__init__(hs) + super(PushRuleRestServlet, self).__init__() + self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() self._is_worker = hs.config.worker_app is not None diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 4c07ae7f45..15d860db37 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -26,17 +26,18 @@ from synapse.http.servlet import ( parse_string, ) from synapse.push import PusherConfigException - -from .base import ClientV1RestServlet, client_path_patterns +from synapse.rest.client.v2_alpha._base import client_patterns logger = logging.getLogger(__name__) -class PushersRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/pushers$") +class PushersRestServlet(RestServlet): + PATTERNS = client_patterns("/pushers$", v1=True) def __init__(self, hs): - super(PushersRestServlet, self).__init__(hs) + super(PushersRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -69,11 +70,13 @@ class PushersRestServlet(ClientV1RestServlet): return 200, {} -class PushersSetRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/pushers/set$") +class PushersSetRestServlet(RestServlet): + PATTERNS = client_patterns("/pushers/set$", v1=True) def __init__(self, hs): - super(PushersSetRestServlet, self).__init__(hs) + super(PushersSetRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() self.notifier = hs.get_notifier() self.pusher_pool = self.hs.get_pusherpool() @@ -141,7 +144,7 @@ class PushersRemoveRestServlet(RestServlet): """ To allow pusher to be delete by clicking a link (ie. GET request) """ - PATTERNS = client_path_patterns("/pushers/remove$") + PATTERNS = client_patterns("/pushers/remove$", v1=True) SUCCESS_HTML = b"You have been unsubscribed" def __init__(self, hs): diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index b92c6a9a9c..e8f672c4ba 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -28,37 +28,45 @@ from synapse.api.errors import AuthError, Codes, SynapseError from synapse.api.filtering import Filter from synapse.events.utils import format_event_for_client_v2 from synapse.http.servlet import ( + RestServlet, assert_params_in_dict, parse_integer, parse_json_object_from_request, parse_string, ) +from synapse.rest.client.transactions import HttpTransactionCache +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class RoomCreateRestServlet(ClientV1RestServlet): +class TransactionRestServlet(RestServlet): + def __init__(self, hs): + super(TransactionRestServlet, self).__init__() + self.txns = HttpTransactionCache(hs) + + +class RoomCreateRestServlet(TransactionRestServlet): # No PATTERN; we have custom dispatch rules here def __init__(self, hs): super(RoomCreateRestServlet, self).__init__(hs) self._room_creation_handler = hs.get_room_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = "/createRoom" register_txn_path(self, PATTERNS, http_server) # define CORS for all of /rooms in RoomCreateRestServlet for simplicity http_server.register_paths("OPTIONS", - client_path_patterns("/rooms(?:/.*)?$"), + client_patterns("/rooms(?:/.*)?$", v1=True), self.on_OPTIONS) # define CORS for /createRoom[/txnid] http_server.register_paths("OPTIONS", - client_path_patterns("/createRoom(?:/.*)?$"), + client_patterns("/createRoom(?:/.*)?$", v1=True), self.on_OPTIONS) def on_PUT(self, request, txn_id): @@ -85,13 +93,14 @@ class RoomCreateRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for generic events -class RoomStateEventRestServlet(ClientV1RestServlet): +class RoomStateEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomStateEventRestServlet, self).__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() def register(self, http_server): # /room/$roomid/state/$eventtype @@ -102,16 +111,16 @@ class RoomStateEventRestServlet(ClientV1RestServlet): "(?P[^/]*)/(?P[^/]*)$") http_server.register_paths("GET", - client_path_patterns(state_key), + client_patterns(state_key, v1=True), self.on_GET) http_server.register_paths("PUT", - client_path_patterns(state_key), + client_patterns(state_key, v1=True), self.on_PUT) http_server.register_paths("GET", - client_path_patterns(no_state_key), + client_patterns(no_state_key, v1=True), self.on_GET_no_state_key) http_server.register_paths("PUT", - client_path_patterns(no_state_key), + client_patterns(no_state_key, v1=True), self.on_PUT_no_state_key) def on_GET_no_state_key(self, request, room_id, event_type): @@ -185,11 +194,12 @@ class RoomStateEventRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for generic events + feedback -class RoomSendEventRestServlet(ClientV1RestServlet): +class RoomSendEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomSendEventRestServlet, self).__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): # /rooms/$roomid/send/$event_type[/$txn_id] @@ -229,10 +239,11 @@ class RoomSendEventRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for room ID + alias joins -class JoinRoomAliasServlet(ClientV1RestServlet): +class JoinRoomAliasServlet(TransactionRestServlet): def __init__(self, hs): super(JoinRoomAliasServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): # /join/$room_identifier[/$txn_id] @@ -291,8 +302,13 @@ class JoinRoomAliasServlet(ClientV1RestServlet): # TODO: Needs unit testing -class PublicRoomListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/publicRooms$") +class PublicRoomListRestServlet(TransactionRestServlet): + PATTERNS = client_patterns("/publicRooms$", v1=True) + + def __init__(self, hs): + super(PublicRoomListRestServlet, self).__init__(hs) + self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -382,12 +398,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomMemberListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/members$") +class RoomMemberListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/members$", v1=True) def __init__(self, hs): - super(RoomMemberListRestServlet, self).__init__(hs) + super(RoomMemberListRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -436,12 +453,13 @@ class RoomMemberListRestServlet(ClientV1RestServlet): # deprecated in favour of /members?membership=join? # except it does custom AS logic and has a simpler return format -class JoinedRoomMemberListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/joined_members$") +class JoinedRoomMemberListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/joined_members$", v1=True) def __init__(self, hs): - super(JoinedRoomMemberListRestServlet, self).__init__(hs) + super(JoinedRoomMemberListRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -457,12 +475,13 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet): # TODO: Needs better unit testing -class RoomMessageListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/messages$") +class RoomMessageListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/messages$", v1=True) def __init__(self, hs): - super(RoomMessageListRestServlet, self).__init__(hs) + super(RoomMessageListRestServlet, self).__init__() self.pagination_handler = hs.get_pagination_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -491,12 +510,13 @@ class RoomMessageListRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomStateRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/state$") +class RoomStateRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/state$", v1=True) def __init__(self, hs): - super(RoomStateRestServlet, self).__init__(hs) + super(RoomStateRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -511,12 +531,13 @@ class RoomStateRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomInitialSyncRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/initialSync$") +class RoomInitialSyncRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/initialSync$", v1=True) def __init__(self, hs): - super(RoomInitialSyncRestServlet, self).__init__(hs) + super(RoomInitialSyncRestServlet, self).__init__() self.initial_sync_handler = hs.get_initial_sync_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -530,16 +551,17 @@ class RoomInitialSyncRestServlet(ClientV1RestServlet): defer.returnValue((200, content)) -class RoomEventServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/event/(?P[^/]*)$" +class RoomEventServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/event/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomEventServlet, self).__init__(hs) + super(RoomEventServlet, self).__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -554,16 +576,17 @@ class RoomEventServlet(ClientV1RestServlet): defer.returnValue((404, "Event not found.")) -class RoomEventContextServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/context/(?P[^/]*)$" +class RoomEventContextServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/context/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomEventContextServlet, self).__init__(hs) + super(RoomEventContextServlet, self).__init__() self.clock = hs.get_clock() self.room_context_handler = hs.get_room_context_handler() self._event_serializer = hs.get_event_client_serializer() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -609,10 +632,11 @@ class RoomEventContextServlet(ClientV1RestServlet): defer.returnValue((200, results)) -class RoomForgetRestServlet(ClientV1RestServlet): +class RoomForgetRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomForgetRestServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = ("/rooms/(?P[^/]*)/forget") @@ -639,11 +663,12 @@ class RoomForgetRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomMembershipRestServlet(ClientV1RestServlet): +class RoomMembershipRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomMembershipRestServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): # /rooms/$roomid/[invite|join|leave] @@ -722,11 +747,12 @@ class RoomMembershipRestServlet(ClientV1RestServlet): ) -class RoomRedactEventRestServlet(ClientV1RestServlet): +class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomRedactEventRestServlet, self).__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = ("/rooms/(?P[^/]*)/redact/(?P[^/]*)") @@ -757,15 +783,16 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): ) -class RoomTypingRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/typing/(?P[^/]*)$" +class RoomTypingRestServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/typing/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomTypingRestServlet, self).__init__(hs) + super(RoomTypingRestServlet, self).__init__() self.presence_handler = hs.get_presence_handler() self.typing_handler = hs.get_typing_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_PUT(self, request, room_id, user_id): @@ -798,14 +825,13 @@ class RoomTypingRestServlet(ClientV1RestServlet): defer.returnValue((200, {})) -class SearchRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/search$" - ) +class SearchRestServlet(RestServlet): + PATTERNS = client_patterns("/search$", v1=True) def __init__(self, hs): - super(SearchRestServlet, self).__init__(hs) + super(SearchRestServlet, self).__init__() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request): @@ -823,12 +849,13 @@ class SearchRestServlet(ClientV1RestServlet): defer.returnValue((200, results)) -class JoinedRoomsRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/joined_rooms$") +class JoinedRoomsRestServlet(RestServlet): + PATTERNS = client_patterns("/joined_rooms$", v1=True) def __init__(self, hs): - super(JoinedRoomsRestServlet, self).__init__(hs) + super(JoinedRoomsRestServlet, self).__init__() self.store = hs.get_datastore() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -853,18 +880,18 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False): """ http_server.register_paths( "POST", - client_path_patterns(regex_string + "$"), + client_patterns(regex_string + "$", v1=True), servlet.on_POST ) http_server.register_paths( "PUT", - client_path_patterns(regex_string + "/(?P[^/]*)$"), + client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_PUT ) if with_get: http_server.register_paths( "GET", - client_path_patterns(regex_string + "/(?P[^/]*)$"), + client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_GET ) diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 53da905eea..0975df84cf 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -19,11 +19,16 @@ import hmac from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns -class VoipRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/voip/turnServer$") +class VoipRestServlet(RestServlet): + PATTERNS = client_patterns("/voip/turnServer$", v1=True) + + def __init__(self, hs): + super(VoipRestServlet, self).__init__() + self.hs = hs @defer.inlineCallbacks def on_GET(self, request): diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index 24ac26bf03..5236d5d566 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -26,8 +26,7 @@ from synapse.api.urls import CLIENT_API_PREFIX logger = logging.getLogger(__name__) -def client_v2_patterns(path_regex, releases=(0,), - unstable=True): +def client_patterns(path_regex, releases=(0,), unstable=True, v1=False): """Creates a regex compiled client path with the correct client path prefix. @@ -41,6 +40,9 @@ def client_v2_patterns(path_regex, releases=(0,), if unstable: unstable_prefix = CLIENT_API_PREFIX + "/unstable" patterns.append(re.compile("^" + unstable_prefix + path_regex)) + if v1: + v1_prefix = CLIENT_API_PREFIX + "/api/v1" + patterns.append(re.compile("^" + v1_prefix + path_regex)) for release in releases: new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) patterns.append(re.compile("^" + new_prefix + path_regex)) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ee069179f0..ca35dc3c83 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -30,13 +30,13 @@ from synapse.http.servlet import ( from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.threepids import check_3pid_allowed -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler logger = logging.getLogger(__name__) class EmailPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password/email/requestToken$") + PATTERNS = client_patterns("/account/password/email/requestToken$") def __init__(self, hs): super(EmailPasswordRequestTokenRestServlet, self).__init__() @@ -70,7 +70,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): class MsisdnPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password/msisdn/requestToken$") + PATTERNS = client_patterns("/account/password/msisdn/requestToken$") def __init__(self, hs): super(MsisdnPasswordRequestTokenRestServlet, self).__init__() @@ -108,7 +108,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): class PasswordRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password$") + PATTERNS = client_patterns("/account/password$") def __init__(self, hs): super(PasswordRestServlet, self).__init__() @@ -180,7 +180,7 @@ class PasswordRestServlet(RestServlet): class DeactivateAccountRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/deactivate$") + PATTERNS = client_patterns("/account/deactivate$") def __init__(self, hs): super(DeactivateAccountRestServlet, self).__init__() @@ -228,7 +228,7 @@ class DeactivateAccountRestServlet(RestServlet): class EmailThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$") + PATTERNS = client_patterns("/account/3pid/email/requestToken$") def __init__(self, hs): self.hs = hs @@ -263,7 +263,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): class MsisdnThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/msisdn/requestToken$") + PATTERNS = client_patterns("/account/3pid/msisdn/requestToken$") def __init__(self, hs): self.hs = hs @@ -300,7 +300,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): class ThreepidRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid$") + PATTERNS = client_patterns("/account/3pid$") def __init__(self, hs): super(ThreepidRestServlet, self).__init__() @@ -364,7 +364,7 @@ class ThreepidRestServlet(RestServlet): class ThreepidDeleteRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/delete$") + PATTERNS = client_patterns("/account/3pid/delete$") def __init__(self, hs): super(ThreepidDeleteRestServlet, self).__init__() @@ -401,7 +401,7 @@ class ThreepidDeleteRestServlet(RestServlet): class WhoamiRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/whoami$") + PATTERNS = client_patterns("/account/whoami$") def __init__(self, hs): super(WhoamiRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index f171b8d626..574a6298ce 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError, NotFoundError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -30,7 +30,7 @@ class AccountDataServlet(RestServlet): PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1 GET /user/{user_id}/account_data/{account_dataType} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/account_data/(?P[^/]*)" ) @@ -79,7 +79,7 @@ class RoomAccountDataServlet(RestServlet): PUT /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1 GET /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)" "/rooms/(?P[^/]*)" "/account_data/(?P[^/]*)" diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index fc8dbeb617..55c4ed5660 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -21,13 +21,13 @@ from synapse.api.errors import AuthError, SynapseError from synapse.http.server import finish_request from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class AccountValidityRenewServlet(RestServlet): - PATTERNS = client_v2_patterns("/account_validity/renew$") + PATTERNS = client_patterns("/account_validity/renew$") SUCCESS_HTML = b"Your account has been successfully renewed." def __init__(self, hs): @@ -60,7 +60,7 @@ class AccountValidityRenewServlet(RestServlet): class AccountValiditySendMailServlet(RestServlet): - PATTERNS = client_v2_patterns("/account_validity/send_mail$") + PATTERNS = client_patterns("/account_validity/send_mail$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 4c380ab84d..8dfe5cba02 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -23,7 +23,7 @@ from synapse.api.urls import CLIENT_API_PREFIX from synapse.http.server import finish_request from synapse.http.servlet import RestServlet, parse_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -122,7 +122,7 @@ class AuthRestServlet(RestServlet): cannot be handled in the normal flow (with requests to the same endpoint). Current use is for web fallback auth. """ - PATTERNS = client_v2_patterns(r"/auth/(?P[\w\.]*)/fallback/web") + PATTERNS = client_patterns(r"/auth/(?P[\w\.]*)/fallback/web") def __init__(self, hs): super(AuthRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index 2b4892330c..fc7e2f4dd5 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -27,7 +27,7 @@ logger = logging.getLogger(__name__) class CapabilitiesRestServlet(RestServlet): """End point to expose the capabilities of the server.""" - PATTERNS = client_v2_patterns("/capabilities$") + PATTERNS = client_patterns("/capabilities$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 5a5be7c390..78665304a5 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -24,13 +24,13 @@ from synapse.http.servlet import ( parse_json_object_from_request, ) -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler logger = logging.getLogger(__name__) class DevicesRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices$") + PATTERNS = client_patterns("/devices$") def __init__(self, hs): """ @@ -56,7 +56,7 @@ class DeleteDevicesRestServlet(RestServlet): API for bulk deletion of devices. Accepts a JSON object with a devices key which lists the device_ids to delete. Requires user interactive auth. """ - PATTERNS = client_v2_patterns("/delete_devices") + PATTERNS = client_patterns("/delete_devices") def __init__(self, hs): super(DeleteDevicesRestServlet, self).__init__() @@ -95,7 +95,7 @@ class DeleteDevicesRestServlet(RestServlet): class DeviceRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$") + PATTERNS = client_patterns("/devices/(?P[^/]*)$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index ae86728879..65db48c3cc 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -21,13 +21,13 @@ from synapse.api.errors import AuthError, Codes, StoreError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID -from ._base import client_v2_patterns, set_timeline_upper_limit +from ._base import client_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) class GetFilterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") + PATTERNS = client_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") def __init__(self, hs): super(GetFilterRestServlet, self).__init__() @@ -63,7 +63,7 @@ class GetFilterRestServlet(RestServlet): class CreateFilterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user/(?P[^/]*)/filter") + PATTERNS = client_patterns("/user/(?P[^/]*)/filter") def __init__(self, hs): super(CreateFilterRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 21e02c07c0..d082385ec7 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -21,7 +21,7 @@ from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import GroupID -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ logger = logging.getLogger(__name__) class GroupServlet(RestServlet): """Get the group profile """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/profile$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/profile$") def __init__(self, hs): super(GroupServlet, self).__init__() @@ -65,7 +65,7 @@ class GroupServlet(RestServlet): class GroupSummaryServlet(RestServlet): """Get the full group summary """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/summary$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/summary$") def __init__(self, hs): super(GroupSummaryServlet, self).__init__() @@ -93,7 +93,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): - /groups/:group/summary/rooms/:room_id - /groups/:group/summary/categories/:category/rooms/:room_id """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/summary" "(/categories/(?P[^/]+))?" "/rooms/(?P[^/]*)$" @@ -137,7 +137,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): class GroupCategoryServlet(RestServlet): """Get/add/update/delete a group category """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/categories/(?P[^/]+)$" ) @@ -189,7 +189,7 @@ class GroupCategoryServlet(RestServlet): class GroupCategoriesServlet(RestServlet): """Get all group categories """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/categories/$" ) @@ -214,7 +214,7 @@ class GroupCategoriesServlet(RestServlet): class GroupRoleServlet(RestServlet): """Get/add/update/delete a group role """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/roles/(?P[^/]+)$" ) @@ -266,7 +266,7 @@ class GroupRoleServlet(RestServlet): class GroupRolesServlet(RestServlet): """Get all group roles """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/roles/$" ) @@ -295,7 +295,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): - /groups/:group/summary/users/:room_id - /groups/:group/summary/roles/:role/users/:user_id """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/summary" "(/roles/(?P[^/]+))?" "/users/(?P[^/]*)$" @@ -339,7 +339,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): class GroupRoomServlet(RestServlet): """Get all rooms in a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/rooms$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/rooms$") def __init__(self, hs): super(GroupRoomServlet, self).__init__() @@ -360,7 +360,7 @@ class GroupRoomServlet(RestServlet): class GroupUsersServlet(RestServlet): """Get all users in a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/users$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/users$") def __init__(self, hs): super(GroupUsersServlet, self).__init__() @@ -381,7 +381,7 @@ class GroupUsersServlet(RestServlet): class GroupInvitedUsersServlet(RestServlet): """Get users invited to a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/invited_users$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/invited_users$") def __init__(self, hs): super(GroupInvitedUsersServlet, self).__init__() @@ -405,7 +405,7 @@ class GroupInvitedUsersServlet(RestServlet): class GroupSettingJoinPolicyServlet(RestServlet): """Set group join policy """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") def __init__(self, hs): super(GroupSettingJoinPolicyServlet, self).__init__() @@ -431,7 +431,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): class GroupCreateServlet(RestServlet): """Create a group """ - PATTERNS = client_v2_patterns("/create_group$") + PATTERNS = client_patterns("/create_group$") def __init__(self, hs): super(GroupCreateServlet, self).__init__() @@ -462,7 +462,7 @@ class GroupCreateServlet(RestServlet): class GroupAdminRoomsServlet(RestServlet): """Add a room to the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)$" ) @@ -499,7 +499,7 @@ class GroupAdminRoomsServlet(RestServlet): class GroupAdminRoomsConfigServlet(RestServlet): """Update the config of a room in a group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)" "/config/(?P[^/]*)$" ) @@ -526,7 +526,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): class GroupAdminUsersInviteServlet(RestServlet): """Invite a user to the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/users/invite/(?P[^/]*)$" ) @@ -555,7 +555,7 @@ class GroupAdminUsersInviteServlet(RestServlet): class GroupAdminUsersKickServlet(RestServlet): """Kick a user from the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/users/remove/(?P[^/]*)$" ) @@ -581,7 +581,7 @@ class GroupAdminUsersKickServlet(RestServlet): class GroupSelfLeaveServlet(RestServlet): """Leave a joined group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/leave$" ) @@ -607,7 +607,7 @@ class GroupSelfLeaveServlet(RestServlet): class GroupSelfJoinServlet(RestServlet): """Attempt to join a group, or knock """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/join$" ) @@ -633,7 +633,7 @@ class GroupSelfJoinServlet(RestServlet): class GroupSelfAcceptInviteServlet(RestServlet): """Accept a group invite """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/accept_invite$" ) @@ -659,7 +659,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): class GroupSelfUpdatePublicityServlet(RestServlet): """Update whether we publicise a users membership of a group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/update_publicity$" ) @@ -686,7 +686,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): class PublicisedGroupsForUserServlet(RestServlet): """Get the list of groups a user is advertising """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/publicised_groups/(?P[^/]*)$" ) @@ -711,7 +711,7 @@ class PublicisedGroupsForUserServlet(RestServlet): class PublicisedGroupsForUsersServlet(RestServlet): """Get the list of groups a user is advertising """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/publicised_groups$" ) @@ -739,7 +739,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): class GroupsForUserServlet(RestServlet): """Get all groups the logged in user is joined to """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/joined_groups$" ) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 8486086b51..4cbfbf5631 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -26,7 +26,7 @@ from synapse.http.servlet import ( ) from synapse.types import StreamToken -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ class KeyUploadServlet(RestServlet): }, } """ - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") + PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ @@ -130,7 +130,7 @@ class KeyQueryServlet(RestServlet): } } } } } } """ - PATTERNS = client_v2_patterns("/keys/query$") + PATTERNS = client_patterns("/keys/query$") def __init__(self, hs): """ @@ -159,7 +159,7 @@ class KeyChangesServlet(RestServlet): 200 OK { "changed": ["@foo:example.com"] } """ - PATTERNS = client_v2_patterns("/keys/changes$") + PATTERNS = client_patterns("/keys/changes$") def __init__(self, hs): """ @@ -209,7 +209,7 @@ class OneTimeKeyServlet(RestServlet): } } } } """ - PATTERNS = client_v2_patterns("/keys/claim$") + PATTERNS = client_patterns("/keys/claim$") def __init__(self, hs): super(OneTimeKeyServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 0a1eb0ae45..53e666989b 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -20,13 +20,13 @@ from twisted.internet import defer from synapse.events.utils import format_event_for_client_v2_without_room_id from synapse.http.servlet import RestServlet, parse_integer, parse_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class NotificationsServlet(RestServlet): - PATTERNS = client_v2_patterns("/notifications$") + PATTERNS = client_patterns("/notifications$") def __init__(self, hs): super(NotificationsServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index 01c90aa2a3..bb927d9f9d 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -22,7 +22,7 @@ from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.util.stringutils import random_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ class IdTokenServlet(RestServlet): "expires_in": 3600, } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/openid/request_token" ) diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index a6e582a5ae..f4bd0d077f 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -19,13 +19,13 @@ from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReadMarkerRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/rooms/(?P[^/]*)/read_markers$") + PATTERNS = client_patterns("/rooms/(?P[^/]*)/read_markers$") def __init__(self, hs): super(ReadMarkerRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index de370cac45..fa12ac3e4d 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -20,13 +20,13 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReceiptRestServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)" "/receipt/(?P[^/]*)" "/(?P[^/]*)$" diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 042f636135..79c085408b 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -43,7 +43,7 @@ from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.threepids import check_3pid_allowed -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler # We ought to be using hmac.compare_digest() but on older pythons it doesn't # exist. It's a _really minor_ security flaw to use plain string comparison @@ -60,7 +60,7 @@ logger = logging.getLogger(__name__) class EmailRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/email/requestToken$") + PATTERNS = client_patterns("/register/email/requestToken$") def __init__(self, hs): """ @@ -98,7 +98,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): class MsisdnRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/msisdn/requestToken$") + PATTERNS = client_patterns("/register/msisdn/requestToken$") def __init__(self, hs): """ @@ -142,7 +142,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): class UsernameAvailabilityRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/available") + PATTERNS = client_patterns("/register/available") def __init__(self, hs): """ @@ -182,7 +182,7 @@ class UsernameAvailabilityRestServlet(RestServlet): class RegisterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register$") + PATTERNS = client_patterns("/register$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 41e0a44936..f8f8742bdc 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -34,7 +34,7 @@ from synapse.http.servlet import ( from synapse.rest.client.transactions import HttpTransactionCache from synapse.storage.relations import AggregationPaginationToken, RelationPaginationToken -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -66,12 +66,12 @@ class RelationSendServlet(RestServlet): def register(self, http_server): http_server.register_paths( "POST", - client_v2_patterns(self.PATTERN + "$", releases=()), + client_patterns(self.PATTERN + "$", releases=()), self.on_PUT_or_POST, ) http_server.register_paths( "PUT", - client_v2_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), + client_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), self.on_PUT, ) @@ -120,7 +120,7 @@ class RelationPaginationServlet(RestServlet): filtered by relation type and event type. """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/relations/(?P[^/]*)" "(/(?P[^/]*)(/(?P[^/]*))?)?$", releases=(), @@ -197,7 +197,7 @@ class RelationAggregationPaginationServlet(RestServlet): } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" "(/(?P[^/]*)(/(?P[^/]*))?)?$", releases=(), @@ -269,7 +269,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" "/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)$", releases=(), diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 95d2a71ec2..10198662a9 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -27,13 +27,13 @@ from synapse.http.servlet import ( parse_json_object_from_request, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReportEventRestServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/report/(?P[^/]*)$" ) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 220a0de30b..87779645f9 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -24,13 +24,13 @@ from synapse.http.servlet import ( parse_string, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class RoomKeysServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$" ) @@ -256,7 +256,7 @@ class RoomKeysServlet(RestServlet): class RoomKeysNewVersionServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/version$" ) @@ -314,7 +314,7 @@ class RoomKeysNewVersionServlet(RestServlet): class RoomKeysVersionServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/version(/(?P[^/]+))?$" ) diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 62b8de71fa..c621a90fba 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -25,7 +25,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -47,7 +47,7 @@ class RoomUpgradeRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( # /rooms/$roomid/upgrade "/rooms/(?P[^/]*)/upgrade$", ) diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 21e9cef2d0..120a713361 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -21,13 +21,13 @@ from synapse.http import servlet from synapse.http.servlet import parse_json_object_from_request from synapse.rest.client.transactions import HttpTransactionCache -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class SendToDeviceRestServlet(servlet.RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/sendToDevice/(?P[^/]*)/(?P[^/]*)$", ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index d3025025e3..148fc6c985 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -32,7 +32,7 @@ from synapse.handlers.sync import SyncConfig from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.types import StreamToken -from ._base import client_v2_patterns, set_timeline_upper_limit +from ._base import client_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) @@ -73,7 +73,7 @@ class SyncRestServlet(RestServlet): } """ - PATTERNS = client_v2_patterns("/sync$") + PATTERNS = client_patterns("/sync$") ALLOWED_PRESENCE = set(["online", "offline", "unavailable"]) def __init__(self, hs): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 4fea614e95..ebff7cff45 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ class TagListServlet(RestServlet): """ GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags" ) @@ -54,7 +54,7 @@ class TagServlet(RestServlet): PUT /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1 DELETE /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags/(?P[^/]*)" ) diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index b9b5d07677..e7a987466a 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -21,13 +21,13 @@ from twisted.internet import defer from synapse.api.constants import ThirdPartyEntityKind from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ThirdPartyProtocolsServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocols") + PATTERNS = client_patterns("/thirdparty/protocols") def __init__(self, hs): super(ThirdPartyProtocolsServlet, self).__init__() @@ -44,7 +44,7 @@ class ThirdPartyProtocolsServlet(RestServlet): class ThirdPartyProtocolServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P[^/]+)$") + PATTERNS = client_patterns("/thirdparty/protocol/(?P[^/]+)$") def __init__(self, hs): super(ThirdPartyProtocolServlet, self).__init__() @@ -66,7 +66,7 @@ class ThirdPartyProtocolServlet(RestServlet): class ThirdPartyUserServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/user(/(?P[^/]+))?$") + PATTERNS = client_patterns("/thirdparty/user(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyUserServlet, self).__init__() @@ -89,7 +89,7 @@ class ThirdPartyUserServlet(RestServlet): class ThirdPartyLocationServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/location(/(?P[^/]+))?$") + PATTERNS = client_patterns("/thirdparty/location(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyLocationServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 6e76b9e9c2..6c366142e1 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns class TokenRefreshRestServlet(RestServlet): @@ -26,7 +26,7 @@ class TokenRefreshRestServlet(RestServlet): Exchanges refresh tokens for a pair of an access token and a new refresh token. """ - PATTERNS = client_v2_patterns("/tokenrefresh") + PATTERNS = client_patterns("/tokenrefresh") def __init__(self, hs): super(TokenRefreshRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 36b02de37f..69e4efc47a 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -20,13 +20,13 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class UserDirectorySearchRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user_directory/search$") + PATTERNS = client_patterns("/user_directory/search$") def __init__(self, hs): """ diff --git a/tests/__init__.py b/tests/__init__.py index d3181f9403..f7fc502f01 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -21,4 +21,4 @@ import tests.patch_inline_callbacks # attempt to do the patch before we load any synapse code tests.patch_inline_callbacks.do_patch() -util.DEFAULT_TIMEOUT_DURATION = 10 +util.DEFAULT_TIMEOUT_DURATION = 20 diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index ee5f09041f..e5fc2fcd15 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -408,7 +408,6 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): users_in_room = self.get_success(self.store.get_users_in_room(room_id)) self.assertEqual([], users_in_room) - @unittest.DEBUG def test_shutdown_room_block_peek(self): """Test that a world_readable room can no longer be peeked into after it has been shut down. diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index f4d0d48dad..72c7ed93cb 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -30,7 +30,7 @@ from tests import unittest from ....utils import MockHttpResource, setup_test_homeserver myid = "@1234ABCD:test" -PATH_PREFIX = "/_matrix/client/api/v1" +PATH_PREFIX = "/_matrix/client/r0" class MockHandlerProfileTestCase(unittest.TestCase): From fa4b54aca57bebc94e2b763abdae79343a08f969 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:06:54 +0100 Subject: [PATCH 76/92] Ignore room state with null bytes in for room stats --- synapse/storage/stats.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1c0b183a56..1f39ef211a 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -328,6 +328,21 @@ class StatsStore(StateDeltasStore): room_id (str) fields (dict[str:Any]) """ + + # For whatever reason some of the fields may contain null bytes, which + # postgres isn't a fan of, so we replace those fields with null. + for col in ( + "join_rules", + "history_visibility", + "encryption", + "name", + "topic", + "avatar", + "canonical_alias" + ): + if "\0" in fields.get(col, ""): + fields[col] = None + return self._simple_upsert( table="room_state", keyvalues={"room_id": room_id}, From 4bd67db100efacc3d31a2f8187b7bdd4479d9bc3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:08:33 +0100 Subject: [PATCH 77/92] Newsfile --- changelog.d/5324.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature new file mode 100644 index 0000000000..01285e965c --- /dev/null +++ b/changelog.d/5324.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From fe2294ec8dc4b37d19930bd1ae0867645207af2e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:17:35 +0100 Subject: [PATCH 78/92] Revert "Newsfile" This reverts commit 4bd67db100efacc3d31a2f8187b7bdd4479d9bc3. --- changelog.d/5324.feature | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature deleted file mode 100644 index 01285e965c..0000000000 --- a/changelog.d/5324.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. From 0d67a8cd9de9564fcdaa1206c18b411b4c43b74a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:17:57 +0100 Subject: [PATCH 79/92] Newsfile --- changelog.d/5324.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature new file mode 100644 index 0000000000..01285e965c --- /dev/null +++ b/changelog.d/5324.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From 0a56966f7d4879f9d517c96a3c714accdce4e17f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:42:52 +0100 Subject: [PATCH 80/92] Fix --- synapse/storage/stats.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1f39ef211a..ff266b09b0 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -340,7 +340,8 @@ class StatsStore(StateDeltasStore): "avatar", "canonical_alias" ): - if "\0" in fields.get(col, ""): + field = fields.get(col) + if field and "\0" in field: fields[col] = None return self._simple_upsert( From fec2dcb1a538ab8ab447f724af1a94d5b3517197 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 3 Jun 2019 22:59:51 +0100 Subject: [PATCH 81/92] Enforce validity period on server_keys for fed requests. (#5321) When handling incoming federation requests, make sure that we have an up-to-date copy of the signing key. We do not yet enforce the validity period for event signatures. --- changelog.d/5321.bugfix | 1 + synapse/crypto/keyring.py | 167 ++++++++++++++++--------- synapse/federation/federation_base.py | 4 +- synapse/federation/transport/server.py | 4 +- synapse/groups/attestations.py | 5 +- tests/crypto/test_keyring.py | 135 ++++++++++++++++---- 6 files changed, 228 insertions(+), 88 deletions(-) create mode 100644 changelog.d/5321.bugfix diff --git a/changelog.d/5321.bugfix b/changelog.d/5321.bugfix new file mode 100644 index 0000000000..943a61956d --- /dev/null +++ b/changelog.d/5321.bugfix @@ -0,0 +1 @@ +Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index b2f4cea536..cdec06c88e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +from collections import defaultdict import six from six import raise_from @@ -70,6 +71,9 @@ class VerifyKeyRequest(object): json_object(dict): The JSON object to verify. + minimum_valid_until_ts (int): time at which we require the signing key to + be valid. (0 implies we don't care) + deferred(Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no @@ -82,7 +86,8 @@ class VerifyKeyRequest(object): server_name = attr.ib() key_ids = attr.ib() json_object = attr.ib() - deferred = attr.ib() + minimum_valid_until_ts = attr.ib() + deferred = attr.ib(default=attr.Factory(defer.Deferred)) class KeyLookupError(ValueError): @@ -90,14 +95,16 @@ class KeyLookupError(ValueError): class Keyring(object): - def __init__(self, hs): + def __init__(self, hs, key_fetchers=None): self.clock = hs.get_clock() - self._key_fetchers = ( - StoreKeyFetcher(hs), - PerspectivesKeyFetcher(hs), - ServerKeyFetcher(hs), - ) + if key_fetchers is None: + key_fetchers = ( + StoreKeyFetcher(hs), + PerspectivesKeyFetcher(hs), + ServerKeyFetcher(hs), + ) + self._key_fetchers = key_fetchers # map from server name to Deferred. Has an entry for each server with # an ongoing key download; the Deferred completes once the download @@ -106,9 +113,25 @@ class Keyring(object): # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} - def verify_json_for_server(self, server_name, json_object): + def verify_json_for_server(self, server_name, json_object, validity_time): + """Verify that a JSON object has been signed by a given server + + Args: + server_name (str): name of the server which must have signed this object + + json_object (dict): object to be checked + + validity_time (int): timestamp at which we require the signing key to + be valid. (0 implies we don't care) + + Returns: + Deferred[None]: completes if the the object was correctly signed, otherwise + errbacks with an error + """ + req = server_name, json_object, validity_time + return logcontext.make_deferred_yieldable( - self.verify_json_objects_for_server([(server_name, json_object)])[0] + self.verify_json_objects_for_server((req,))[0] ) def verify_json_objects_for_server(self, server_and_json): @@ -116,10 +139,12 @@ class Keyring(object): necessary. Args: - server_and_json (list): List of pairs of (server_name, json_object) + server_and_json (iterable[Tuple[str, dict, int]): + Iterable of triplets of (server_name, json_object, validity_time) + validity_time is a timestamp at which the signing key must be valid. Returns: - List: for each input pair, a deferred indicating success + List: for each input triplet, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. @@ -128,12 +153,12 @@ class Keyring(object): verify_requests = [] handle = preserve_fn(_handle_key_deferred) - def process(server_name, json_object): + def process(server_name, json_object, validity_time): """Process an entry in the request list - Given a (server_name, json_object) pair from the request list, - adds a key request to verify_requests, and returns a deferred which will - complete or fail (in the sentinel context) when verification completes. + Given a (server_name, json_object, validity_time) triplet from the request + list, adds a key request to verify_requests, and returns a deferred which + will complete or fail (in the sentinel context) when verification completes. """ key_ids = signature_ids(json_object, server_name) @@ -148,7 +173,7 @@ class Keyring(object): # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, defer.Deferred() + server_name, key_ids, json_object, validity_time ) verify_requests.append(verify_request) @@ -160,8 +185,8 @@ class Keyring(object): return handle(verify_request) results = [ - process(server_name, json_object) - for server_name, json_object in server_and_json + process(server_name, json_object, validity_time) + for server_name, json_object, validity_time in server_and_json ] if verify_requests: @@ -298,8 +323,12 @@ class Keyring(object): verify_request.deferred.errback( SynapseError( 401, - "No key for %s with id %s" - % (verify_request.server_name, verify_request.key_ids), + "No key for %s with ids in %s (min_validity %i)" + % ( + verify_request.server_name, + verify_request.key_ids, + verify_request.minimum_valid_until_ts, + ), Codes.UNAUTHORIZED, ) ) @@ -323,18 +352,28 @@ class Keyring(object): Args: fetcher (KeyFetcher): fetcher to use to fetch the keys remaining_requests (set[VerifyKeyRequest]): outstanding key requests. - Any successfully-completed requests will be reomved from the list. + Any successfully-completed requests will be removed from the list. """ - # dict[str, set(str)]: keys to fetch for each server - missing_keys = {} + # dict[str, dict[str, int]]: keys to fetch. + # server_name -> key_id -> min_valid_ts + missing_keys = defaultdict(dict) + for verify_request in remaining_requests: # any completed requests should already have been removed assert not verify_request.deferred.called - missing_keys.setdefault(verify_request.server_name, set()).update( - verify_request.key_ids - ) + keys_for_server = missing_keys[verify_request.server_name] - results = yield fetcher.get_keys(missing_keys.items()) + for key_id in verify_request.key_ids: + # If we have several requests for the same key, then we only need to + # request that key once, but we should do so with the greatest + # min_valid_until_ts of the requests, so that we can satisfy all of + # the requests. + keys_for_server[key_id] = max( + keys_for_server.get(key_id, -1), + verify_request.minimum_valid_until_ts + ) + + results = yield fetcher.get_keys(missing_keys) completed = list() for verify_request in remaining_requests: @@ -344,25 +383,34 @@ class Keyring(object): # complete this VerifyKeyRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: - key = result_keys.get(key_id) - if key: - with PreserveLoggingContext(): - verify_request.deferred.callback( - (server_name, key_id, key.verify_key) - ) - completed.append(verify_request) - break + fetch_key_result = result_keys.get(key_id) + if not fetch_key_result: + # we didn't get a result for this key + continue + + if ( + fetch_key_result.valid_until_ts + < verify_request.minimum_valid_until_ts + ): + # key was not valid at this point + continue + + with PreserveLoggingContext(): + verify_request.deferred.callback( + (server_name, key_id, fetch_key_result.verify_key) + ) + completed.append(verify_request) + break remaining_requests.difference_update(completed) class KeyFetcher(object): - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """ Args: - server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): - list of (server_name, iterable[key_id]) tuples to fetch keys for - Note that the iterables may be iterated more than once. + keys_to_fetch (dict[str, dict[str, int]]): + the keys to be fetched. server_name -> key_id -> min_valid_ts Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: @@ -378,13 +426,15 @@ class StoreKeyFetcher(KeyFetcher): self.store = hs.get_datastore() @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" + keys_to_fetch = ( (server_name, key_id) - for server_name, key_ids in server_name_and_key_ids - for key_id in key_ids + for server_name, keys_for_server in keys_to_fetch.items() + for key_id in keys_for_server.keys() ) + res = yield self.store.get_server_verify_keys(keys_to_fetch) keys = {} for (server_name, key_id), key in res.items(): @@ -508,14 +558,14 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): self.perspective_servers = self.config.perspectives @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" @defer.inlineCallbacks def get_key(perspective_name, perspective_keys): try: result = yield self.get_server_verify_key_v2_indirect( - server_name_and_key_ids, perspective_name, perspective_keys + keys_to_fetch, perspective_name, perspective_keys ) defer.returnValue(result) except KeyLookupError as e: @@ -549,13 +599,15 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): @defer.inlineCallbacks def get_server_verify_key_v2_indirect( - self, server_names_and_key_ids, perspective_name, perspective_keys + self, keys_to_fetch, perspective_name, perspective_keys ): """ Args: - server_names_and_key_ids (iterable[Tuple[str, iterable[str]]]): - list of (server_name, iterable[key_id]) tuples to fetch keys for + keys_to_fetch (dict[str, dict[str, int]]): + the keys to be fetched. server_name -> key_id -> min_valid_ts + perspective_name (str): name of the notary server to query for the keys + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the notary server @@ -569,12 +621,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): """ logger.info( "Requesting keys %s from notary server %s", - server_names_and_key_ids, + keys_to_fetch.items(), perspective_name, ) - # TODO(mark): Set the minimum_valid_until_ts to that needed by - # the events being validated or the current time if validating - # an incoming request. + try: query_response = yield self.client.post_json( destination=perspective_name, @@ -582,9 +632,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): data={ u"server_keys": { server_name: { - key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids + key_id: {u"minimum_valid_until_ts": min_valid_ts} + for key_id, min_valid_ts in server_keys.items() } - for server_name, key_ids in server_names_and_key_ids + for server_name, server_keys in keys_to_fetch.items() } }, long_retries=True, @@ -694,15 +745,18 @@ class ServerKeyFetcher(BaseV2KeyFetcher): self.client = hs.get_http_client() @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" + # TODO make this more resilient results = yield logcontext.make_deferred_yieldable( defer.gatherResults( [ run_in_background( - self.get_server_verify_key_v2_direct, server_name, key_ids + self.get_server_verify_key_v2_direct, + server_name, + server_keys.keys(), ) - for server_name, key_ids in server_name_and_key_ids + for server_name, server_keys in keys_to_fetch.items() ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -721,6 +775,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: + # we may have found this key as a side-effect of asking for another. if requested_key_id in keys: continue diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index cffa831d80..4b38f7c759 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -265,7 +265,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): ] more_deferreds = keyring.verify_json_objects_for_server([ - (p.sender_domain, p.redacted_pdu_json) + (p.sender_domain, p.redacted_pdu_json, 0) for p in pdus_to_check_sender ]) @@ -298,7 +298,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): ] more_deferreds = keyring.verify_json_objects_for_server([ - (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json) + (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, 0) for p in pdus_to_check_event_id ]) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index d0efc4e0d3..0db8858cf1 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -94,6 +94,7 @@ class NoAuthenticationError(AuthenticationError): class Authenticator(object): def __init__(self, hs): + self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname self.store = hs.get_datastore() @@ -102,6 +103,7 @@ class Authenticator(object): # A method just so we can pass 'self' as the authenticator to the Servlets @defer.inlineCallbacks def authenticate_request(self, request, content): + now = self._clock.time_msec() json_request = { "method": request.method.decode('ascii'), "uri": request.uri.decode('ascii'), @@ -138,7 +140,7 @@ class Authenticator(object): 401, "Missing Authorization headers", Codes.UNAUTHORIZED, ) - yield self.keyring.verify_json_for_server(origin, json_request) + yield self.keyring.verify_json_for_server(origin, json_request, now) logger.info("Request from %s", origin) request.authenticated_entity = origin diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 786149be65..fa6b641ee1 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -97,10 +97,11 @@ class GroupAttestationSigning(object): # TODO: We also want to check that *new* attestations that people give # us to store are valid for at least a little while. - if valid_until_ms < self.clock.time_msec(): + now = self.clock.time_msec() + if valid_until_ms < now: raise SynapseError(400, "Attestation expired") - yield self.keyring.verify_json_for_server(server_name, attestation) + yield self.keyring.verify_json_for_server(server_name, attestation, now) def create_attestation(self, group_id, user_id): """Create an attestation for the group_id and user_id with default diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 3933ad4347..096401938d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,6 +19,7 @@ from mock import Mock import canonicaljson import signedjson.key import signedjson.sign +from signedjson.key import get_verify_key from twisted.internet import defer @@ -137,7 +138,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): context_11.request = "11" res_deferreds = kr.verify_json_objects_for_server( - [("server10", json1), ("server11", {})] + [("server10", json1, 0), ("server11", {}, 0)] ) # the unsigned json should be rejected pretty quickly @@ -174,7 +175,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1)] + [("server10", json1, 0)] ) res_deferreds_2[0].addBoth(self.check_context, None) yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) @@ -197,31 +198,108 @@ class KeyringTestCase(unittest.HomeserverTestCase): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) - key1_id = "%s:%s" % (key1.alg, key1.version) - r = self.hs.datastore.store_server_verify_keys( "server9", time.time() * 1000, - [ - ( - "server9", - key1_id, - FetchKeyResult(signedjson.key.get_verify_key(key1), 1000), - ), - ], + [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], ) self.get_success(r) + json1 = {} signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}) + d = _verify_json_for_server(kr, "server9", {}, 0) self.failureResultOf(d, SynapseError) - d = _verify_json_for_server(kr, "server9", json1) - self.assertFalse(d.called) + # should suceed on a signed object + d = _verify_json_for_server(kr, "server9", json1, 500) + # self.assertFalse(d.called) self.get_success(d) + def test_verify_json_dedupes_key_requests(self): + """Two requests for the same key should be deduped.""" + key1 = signedjson.key.generate_signing_key(1) + + def get_keys(keys_to_fetch): + # there should only be one request object (with the max validity) + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) + } + } + ) + + mock_fetcher = keyring.KeyFetcher() + mock_fetcher.get_keys = Mock(side_effect=get_keys) + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher,)) + + json1 = {} + signedjson.sign.sign_json(json1, "server1", key1) + + # the first request should succeed; the second should fail because the key + # has expired + results = kr.verify_json_objects_for_server( + [("server1", json1, 500), ("server1", json1, 1500)] + ) + self.assertEqual(len(results), 2) + self.get_success(results[0]) + e = self.get_failure(results[1], SynapseError).value + self.assertEqual(e.errcode, "M_UNAUTHORIZED") + self.assertEqual(e.code, 401) + + # there should have been a single call to the fetcher + mock_fetcher.get_keys.assert_called_once() + + def test_verify_json_falls_back_to_other_fetchers(self): + """If the first fetcher cannot provide a recent enough key, we fall back""" + key1 = signedjson.key.generate_signing_key(1) + + def get_keys1(keys_to_fetch): + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800) + } + } + ) + + def get_keys2(keys_to_fetch): + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) + } + } + ) + + mock_fetcher1 = keyring.KeyFetcher() + mock_fetcher1.get_keys = Mock(side_effect=get_keys1) + mock_fetcher2 = keyring.KeyFetcher() + mock_fetcher2.get_keys = Mock(side_effect=get_keys2) + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher1, mock_fetcher2)) + + json1 = {} + signedjson.sign.sign_json(json1, "server1", key1) + + results = kr.verify_json_objects_for_server( + [("server1", json1, 1200), ("server1", json1, 1500)] + ) + self.assertEqual(len(results), 2) + self.get_success(results[0]) + e = self.get_failure(results[1], SynapseError).value + self.assertEqual(e.errcode, "M_UNAUTHORIZED") + self.assertEqual(e.code, 401) + + # there should have been a single call to each fetcher + mock_fetcher1.get_keys.assert_called_once() + mock_fetcher2.get_keys.assert_called_once() + class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): @@ -260,8 +338,8 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): self.http_client.get_json.side_effect = get_json - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) + keys_to_fetch = {SERVER_NAME: {"key1": 0}} + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) @@ -288,9 +366,7 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): # change the server name: it should cause a rejection response["server_name"] = "OTHER_SERVER" - self.get_failure( - fetcher.get_keys(server_name_and_key_ids), KeyLookupError - ) + self.get_failure(fetcher.get_keys(keys_to_fetch), KeyLookupError) class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): @@ -342,8 +418,8 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.http_client.post_json.side_effect = post_json - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) + keys_to_fetch = {SERVER_NAME: {"key1": 0}} + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) self.assertIn(SERVER_NAME, keys) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) @@ -401,7 +477,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): def get_key_from_perspectives(response): fetcher = PerspectivesKeyFetcher(self.hs) - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + keys_to_fetch = {SERVER_NAME: {"key1": 0}} def post_json(destination, path, data, **kwargs): self.assertEqual(destination, self.mock_perspective_server.server_name) @@ -410,9 +486,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.http_client.post_json.side_effect = post_json - return self.get_success( - fetcher.get_keys(server_name_and_key_ids) - ) + return self.get_success(fetcher.get_keys(keys_to_fetch)) # start with a valid response so we can check we are testing the right thing response = build_response() @@ -435,6 +509,11 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual(keys, {}, "Expected empty dict with missing origin server sig") +def get_key_id(key): + """Get the matrix ID tag for a given SigningKey or VerifyKey""" + return "%s:%s" % (key.alg, key.version) + + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): with LoggingContext("testctx") as ctx: @@ -445,14 +524,16 @@ def run_in_context(f, *args, **kwargs): defer.returnValue(rv) -def _verify_json_for_server(keyring, server_name, json_object): +def _verify_json_for_server(keyring, server_name, json_object, validity_time): """thin wrapper around verify_json_for_server which makes sure it is wrapped with the patched defer.inlineCallbacks. """ @defer.inlineCallbacks def v(): - rv1 = yield keyring.verify_json_for_server(server_name, json_object) + rv1 = yield keyring.verify_json_for_server( + server_name, json_object, validity_time + ) defer.returnValue(rv1) return run_in_context(v) From 06a1f3e20719ab2631089a37cef50b80c1155f89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 17:56:54 +0100 Subject: [PATCH 82/92] Reduce timeout for outbound /key/v2/server requests. --- synapse/crypto/keyring.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index cdec06c88e..bef6498f4b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -786,6 +786,19 @@ class ServerKeyFetcher(BaseV2KeyFetcher): path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), ignore_backoff=True, + + # we only give the remote server 10s to respond. It should be an + # easy request to handle, so if it doesn't reply within 10s, it's + # probably not going to. + # + # Furthermore, when we are acting as a notary server, we cannot + # wait all day for all of the origin servers, as the requesting + # server will otherwise time out before we can respond. + # + # (Note that get_json may make 4 attempts, so this can still take + # almost 45 seconds to fetch the headers, plus up to another 60s to + # read the response). + timeout=10000, ) except (NotRetryingDestination, RequestSendFailed) as e: raise_from(KeyLookupError("Failed to connect to remote server"), e) From dce6e9e0c11fc5d99b2da6698aed04e9f525f242 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 23:50:43 +0100 Subject: [PATCH 83/92] Avoid rapidly backing-off a server if we ignore the retry interval --- changelog.d/5335.bugfix | 1 + synapse/util/retryutils.py | 60 +++++++++++++++++++++++--------------- 2 files changed, 38 insertions(+), 23 deletions(-) create mode 100644 changelog.d/5335.bugfix diff --git a/changelog.d/5335.bugfix b/changelog.d/5335.bugfix new file mode 100644 index 0000000000..7318cbe35e --- /dev/null +++ b/changelog.d/5335.bugfix @@ -0,0 +1 @@ +Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 26cce7d197..f6dfa77d8f 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -46,8 +46,7 @@ class NotRetryingDestination(Exception): @defer.inlineCallbacks -def get_retry_limiter(destination, clock, store, ignore_backoff=False, - **kwargs): +def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs): """For a given destination check if we have previously failed to send a request there and are waiting before retrying the destination. If we are not ready to retry the destination, this will raise a @@ -60,8 +59,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, clock (synapse.util.clock): timing source store (synapse.storage.transactions.TransactionStore): datastore ignore_backoff (bool): true to ignore the historical backoff data and - try the request anyway. We will still update the next - retry_interval on success/failure. + try the request anyway. We will still reset the retry_interval on success. Example usage: @@ -75,13 +73,12 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, """ retry_last_ts, retry_interval = (0, 0) - retry_timings = yield store.get_destination_retry_timings( - destination - ) + retry_timings = yield store.get_destination_retry_timings(destination) if retry_timings: retry_last_ts, retry_interval = ( - retry_timings["retry_last_ts"], retry_timings["retry_interval"] + retry_timings["retry_last_ts"], + retry_timings["retry_interval"], ) now = int(clock.time_msec()) @@ -93,22 +90,31 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, destination=destination, ) + # if we are ignoring the backoff data, we should also not increment the backoff + # when we get another failure - otherwise a server can very quickly reach the + # maximum backoff even though it might only have been down briefly + backoff_on_failure = not ignore_backoff + defer.returnValue( RetryDestinationLimiter( - destination, - clock, - store, - retry_interval, - **kwargs + destination, clock, store, retry_interval, backoff_on_failure, **kwargs ) ) class RetryDestinationLimiter(object): - def __init__(self, destination, clock, store, retry_interval, - min_retry_interval=10 * 60 * 1000, - max_retry_interval=24 * 60 * 60 * 1000, - multiplier_retry_interval=5, backoff_on_404=False): + def __init__( + self, + destination, + clock, + store, + retry_interval, + min_retry_interval=10 * 60 * 1000, + max_retry_interval=24 * 60 * 60 * 1000, + multiplier_retry_interval=5, + backoff_on_404=False, + backoff_on_failure=True, + ): """Marks the destination as "down" if an exception is thrown in the context, except for CodeMessageException with code < 500. @@ -128,6 +134,9 @@ class RetryDestinationLimiter(object): multiplier_retry_interval (int): The multiplier to use to increase the retry interval after a failed request. backoff_on_404 (bool): Back off if we get a 404 + + backoff_on_failure (bool): set to False if we should not increase the + retry interval on a failure. """ self.clock = clock self.store = store @@ -138,6 +147,7 @@ class RetryDestinationLimiter(object): self.max_retry_interval = max_retry_interval self.multiplier_retry_interval = multiplier_retry_interval self.backoff_on_404 = backoff_on_404 + self.backoff_on_failure = backoff_on_failure def __enter__(self): pass @@ -173,10 +183,13 @@ class RetryDestinationLimiter(object): if not self.retry_interval: return - logger.debug("Connection to %s was successful; clearing backoff", - self.destination) + logger.debug( + "Connection to %s was successful; clearing backoff", self.destination + ) retry_last_ts = 0 self.retry_interval = 0 + elif not self.backoff_on_failure: + return else: # We couldn't connect. if self.retry_interval: @@ -190,7 +203,10 @@ class RetryDestinationLimiter(object): logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", - self.destination, exc_type, exc_val, self.retry_interval + self.destination, + exc_type, + exc_val, + self.retry_interval, ) retry_last_ts = int(self.clock.time_msec()) @@ -201,9 +217,7 @@ class RetryDestinationLimiter(object): self.destination, retry_last_ts, self.retry_interval ) except Exception: - logger.exception( - "Failed to store destination_retry_timings", - ) + logger.exception("Failed to store destination_retry_timings") # we deliberately do this in the background. synapse.util.logcontext.run_in_background(store_retry_timings) From def5ea4062295759d7c28d9c2302187871a1bc72 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 15:36:41 +0100 Subject: [PATCH 84/92] Don't bomb out on direct key fetches as soon as one fails --- synapse/crypto/keyring.py | 58 ++++++++++++++++++++++-------------- tests/crypto/test_keyring.py | 12 ++++---- 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index bef6498f4b..5660c96023 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -46,6 +46,7 @@ from synapse.api.errors import ( ) from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext, unwrapFirstError +from synapse.util.async_helpers import yieldable_gather_results from synapse.util.logcontext import ( LoggingContext, PreserveLoggingContext, @@ -169,7 +170,12 @@ class Keyring(object): ) ) - logger.debug("Verifying for %s with key_ids %s", server_name, key_ids) + logger.debug( + "Verifying for %s with key_ids %s, min_validity %i", + server_name, + key_ids, + validity_time, + ) # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( @@ -744,34 +750,42 @@ class ServerKeyFetcher(BaseV2KeyFetcher): self.clock = hs.get_clock() self.client = hs.get_http_client() - @defer.inlineCallbacks def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" - # TODO make this more resilient - results = yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.get_server_verify_key_v2_direct, - server_name, - server_keys.keys(), - ) - for server_name, server_keys in keys_to_fetch.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - merged = {} - for result in results: - merged.update(result) + results = {} - defer.returnValue( - {server_name: keys for server_name, keys in merged.items() if keys} + @defer.inlineCallbacks + def get_key(key_to_fetch_item): + server_name, key_ids = key_to_fetch_item + try: + keys = yield self.get_server_verify_key_v2_direct(server_name, key_ids) + results[server_name] = keys + except KeyLookupError as e: + logger.warning( + "Error looking up keys %s from %s: %s", key_ids, server_name, e + ) + except Exception: + logger.exception("Error getting keys %s from %s", key_ids, server_name) + + return yieldable_gather_results(get_key, keys_to_fetch.items()).addCallback( + lambda _: results ) @defer.inlineCallbacks def get_server_verify_key_v2_direct(self, server_name, key_ids): + """ + + Args: + server_name (str): + key_ids (iterable[str]): + + Returns: + Deferred[dict[str, FetchKeyResult]]: map from key ID to lookup result + + Raises: + KeyLookupError if there was a problem making the lookup + """ keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: @@ -823,7 +837,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): ) keys.update(response_keys) - defer.returnValue({server_name: keys}) + defer.returnValue(keys) @defer.inlineCallbacks diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 096401938d..4cff7e36c8 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -25,11 +25,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.crypto.keyring import ( - KeyLookupError, - PerspectivesKeyFetcher, - ServerKeyFetcher, -) +from synapse.crypto.keyring import PerspectivesKeyFetcher, ServerKeyFetcher from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -364,9 +360,11 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) ) - # change the server name: it should cause a rejection + # change the server name: the result should be ignored response["server_name"] = "OTHER_SERVER" - self.get_failure(fetcher.get_keys(keys_to_fetch), KeyLookupError) + + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) + self.assertEqual(keys, {}) class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): From c5d60eadd5949ab4c12857e0830eb0afbb857f72 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 18:07:19 +0100 Subject: [PATCH 85/92] Notary server: make requests to origins in parallel ... else we're guaranteed to time out. --- synapse/crypto/keyring.py | 10 +++++++++- synapse/rest/key/v2/remote_key_resource.py | 12 ++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5660c96023..6dae713ebc 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -751,7 +751,15 @@ class ServerKeyFetcher(BaseV2KeyFetcher): self.client = hs.get_http_client() def get_keys(self, keys_to_fetch): - """see KeyFetcher.get_keys""" + """ + Args: + keys_to_fetch (dict[str, iterable[str]]): + the keys to be fetched. server_name -> key_ids + + Returns: + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: + map from server_name -> key_id -> FetchKeyResult + """ results = {} diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 21c3c807b9..8a730bbc35 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -20,7 +20,7 @@ from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError -from synapse.crypto.keyring import KeyLookupError, ServerKeyFetcher +from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler from synapse.http.servlet import parse_integer, parse_json_object_from_request @@ -215,15 +215,7 @@ class RemoteKey(Resource): json_results.add(bytes(result["key_json"])) if cache_misses and query_remote_on_cache_miss: - for server_name, key_ids in cache_misses.items(): - try: - yield self.fetcher.get_server_verify_key_v2_direct( - server_name, key_ids - ) - except KeyLookupError as e: - logger.info("Failed to fetch key: %s", e) - except Exception: - logger.exception("Failed to get key for %r", server_name) + yield self.fetcher.get_keys(cache_misses) yield self.query_keys( request, query, query_remote_on_cache_miss=False ) From a3f2d000e031f2e9b6e76f679967fd0c0ba890f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 23:12:48 +0100 Subject: [PATCH 86/92] changelog --- changelog.d/5333.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5333.bugfix diff --git a/changelog.d/5333.bugfix b/changelog.d/5333.bugfix new file mode 100644 index 0000000000..cb05a6dd63 --- /dev/null +++ b/changelog.d/5333.bugfix @@ -0,0 +1 @@ +Fix various problems which made the signing-key notary server time out for some requests. \ No newline at end of file From b2b90b7d34bf9afc437df6a2e58ab89cfd8ab91f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Jun 2019 15:54:27 +1000 Subject: [PATCH 87/92] Hawkowl/fix missing auth (#5328) --- changelog.d/5328.misc | 1 + synapse/rest/client/v1/voip.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5328.misc diff --git a/changelog.d/5328.misc b/changelog.d/5328.misc new file mode 100644 index 0000000000..e1b9dc58a3 --- /dev/null +++ b/changelog.d/5328.misc @@ -0,0 +1 @@ +The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 0975df84cf..6381049210 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -29,6 +29,7 @@ class VoipRestServlet(RestServlet): def __init__(self, hs): super(VoipRestServlet, self).__init__() self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): From 5bdb189f86b462890ff55c9244506b0c41fed856 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 11:14:16 +0100 Subject: [PATCH 88/92] Improve docstrings on MatrixFederationClient. (#5332) --- changelog.d/5332.misc | 1 + synapse/http/matrixfederationclient.py | 71 ++++++++++++++++++++------ 2 files changed, 56 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5332.misc diff --git a/changelog.d/5332.misc b/changelog.d/5332.misc new file mode 100644 index 0000000000..dcfac4eac9 --- /dev/null +++ b/changelog.d/5332.misc @@ -0,0 +1 @@ +Improve docstrings on MatrixFederationClient. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 8197619a78..663ea72a7a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -285,7 +285,24 @@ class MatrixFederationHttpClient(object): request (MatrixFederationRequest): details of request to be sent timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server). 60s by default. + (including connecting to the server), *for each attempt*. + 60s by default. + + long_retries (bool): whether to use the long retry algorithm. + + The regular retry algorithm makes 4 attempts, with intervals + [0.5s, 1s, 2s]. + + The long retry algorithm makes 11 attempts, with intervals + [4s, 16s, 60s, 60s, ...] + + Both algorithms add -20%/+40% jitter to the retry intervals. + + Note that the above intervals are *in addition* to the time spent + waiting for the request to complete (up to `timeout` ms). + + NB: the long retry algorithm takes over 20 minutes to complete, with + a default timeout of 60s! ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -566,10 +583,14 @@ class MatrixFederationHttpClient(object): the request body. This will be encoded as JSON. json_data_callback (callable): A callable returning the dict to use as the request body. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): True if we should count a 404 response as @@ -627,15 +648,22 @@ class MatrixFederationHttpClient(object): Args: destination (str): The remote server to send the HTTP request to. + path (str): The HTTP path. + data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + args (dict): query params Returns: Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The @@ -686,14 +714,19 @@ class MatrixFederationHttpClient(object): Args: destination (str): The remote server to send the HTTP request to. + path (str): The HTTP path. + args (dict|None): A dictionary used to create query strings, defaults to None. - timeout (int): How long to try (in ms) the destination for before - giving up. None indicates no timeout and that the request will - be retried. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. @@ -742,12 +775,18 @@ class MatrixFederationHttpClient(object): destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + + args (dict): query params Returns: Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. From 4d08b8f30c6a10caaa570bd93059d496b66185a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 11:53:07 +0100 Subject: [PATCH 89/92] Don't do long retries when calling the key notary server. (#5334) It takes at least 20 minutes to work through the long_retries schedule (11 attempts, each with a 60 second timeout, and 60 seconds between each request), so if the notary server isn't returning within the timeout, we'll just end up blocking whatever request is happening for 20 minutes. Ain't nobody got time for that. --- changelog.d/5334.bugfix | 1 + synapse/crypto/keyring.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/5334.bugfix diff --git a/changelog.d/5334.bugfix b/changelog.d/5334.bugfix new file mode 100644 index 0000000000..ed141e0918 --- /dev/null +++ b/changelog.d/5334.bugfix @@ -0,0 +1 @@ +Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6dae713ebc..0fd15287e7 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -644,7 +644,6 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): for server_name, server_keys in keys_to_fetch.items() } }, - long_retries=True, ) except (NotRetryingDestination, RequestSendFailed) as e: raise_from(KeyLookupError("Failed to connect to remote server"), e) From ac3cc3236748877b692e6c6c771019fdb23d3e71 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 13:47:44 +0100 Subject: [PATCH 90/92] Make account validity renewal emails work when email notifs are disabled --- changelog.d/5341.bugfix | 1 + synapse/config/emailconfig.py | 101 +++++++++++++++++++--------------- 2 files changed, 59 insertions(+), 43 deletions(-) create mode 100644 changelog.d/5341.bugfix diff --git a/changelog.d/5341.bugfix b/changelog.d/5341.bugfix new file mode 100644 index 0000000000..a7aaa95f39 --- /dev/null +++ b/changelog.d/5341.bugfix @@ -0,0 +1 @@ +Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 342a6ce5fd..cf4875f1f3 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,12 +31,49 @@ logger = logging.getLogger(__name__) class EmailConfig(Config): def read_config(self, config): + # TODO: We should separate better the email configuration from the notification + # and account validity config. + self.email_enable_notifs = False email_config = config.get("email", {}) - self.email_enable_notifs = email_config.get("enable_notifs", False) - if self.email_enable_notifs: + self.email_smtp_host = email_config.get("smtp_host", None) + self.email_smtp_port = email_config.get("smtp_port", None) + self.email_smtp_user = email_config.get("smtp_user", None) + self.email_smtp_pass = email_config.get("smtp_pass", None) + self.require_transport_security = email_config.get( + "require_transport_security", False + ) + if "app_name" in email_config: + self.email_app_name = email_config["app_name"] + else: + self.email_app_name = "Matrix" + + self.email_notif_from = email_config.get("notif_from", None) + # make sure it's valid + parsed = email.utils.parseaddr(self.email_notif_from) + if self.email_notif_from and parsed[1] == '': + raise RuntimeError("Invalid notif_from address") + + template_dir = email_config.get("template_dir") + # we need an absolute path, because we change directory after starting (and + # we don't yet know what auxilliary templates like mail.css we will need). + # (Note that loading as package_resources with jinja.PackageLoader doesn't + # work for the same reason.) + if not template_dir: + template_dir = pkg_resources.resource_filename( + 'synapse', 'res/templates' + ) + + self.email_template_dir = os.path.abspath(template_dir) + + self.email_enable_notifs = email_config.get("enable_notifs", False) + account_validity_renewal_enabled = config.get( + "account_validity", {}, + ).get("renew_at") + + if self.email_enable_notifs or account_validity_renewal_enabled: # make sure we can import the required deps import jinja2 import bleach @@ -42,6 +81,7 @@ class EmailConfig(Config): jinja2 bleach + if self.email_enable_notifs: required = [ "smtp_host", "smtp_port", @@ -66,34 +106,13 @@ class EmailConfig(Config): "email.enable_notifs is True but no public_baseurl is set" ) - self.email_smtp_host = email_config["smtp_host"] - self.email_smtp_port = email_config["smtp_port"] - self.email_notif_from = email_config["notif_from"] self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] - self.email_expiry_template_html = email_config.get( - "expiry_template_html", "notice_expiry.html", - ) - self.email_expiry_template_text = email_config.get( - "expiry_template_text", "notice_expiry.txt", - ) - - template_dir = email_config.get("template_dir") - # we need an absolute path, because we change directory after starting (and - # we don't yet know what auxilliary templates like mail.css we will need). - # (Note that loading as package_resources with jinja.PackageLoader doesn't - # work for the same reason.) - if not template_dir: - template_dir = pkg_resources.resource_filename( - 'synapse', 'res/templates' - ) - template_dir = os.path.abspath(template_dir) for f in self.email_notif_template_text, self.email_notif_template_html: - p = os.path.join(template_dir, f) + p = os.path.join(self.email_template_dir, f) if not os.path.isfile(p): raise ConfigError("Unable to find email template file %s" % (p, )) - self.email_template_dir = template_dir self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True @@ -101,29 +120,25 @@ class EmailConfig(Config): self.email_riot_base_url = email_config.get( "riot_base_url", None ) - self.email_smtp_user = email_config.get( - "smtp_user", None - ) - self.email_smtp_pass = email_config.get( - "smtp_pass", None - ) - self.require_transport_security = email_config.get( - "require_transport_security", False - ) - if "app_name" in email_config: - self.email_app_name = email_config["app_name"] - else: - self.email_app_name = "Matrix" - - # make sure it's valid - parsed = email.utils.parseaddr(self.email_notif_from) - if parsed[1] == '': - raise RuntimeError("Invalid notif_from address") else: self.email_enable_notifs = False # Not much point setting defaults for the rest: it would be an # error for them to be used. + if account_validity_renewal_enabled: + self.email_expiry_template_html = email_config.get( + "expiry_template_html", "notice_expiry.html", + ) + self.email_expiry_template_text = email_config.get( + "expiry_template_text", "notice_expiry.txt", + ) + + for f in self.email_expiry_template_text, self.email_expiry_template_html: + p = os.path.join(self.email_template_dir, f) + if not os.path.isfile(p): + raise ConfigError("Unable to find email template file %s" % (p, )) + + def default_config(self, config_dir_path, server_name, **kwargs): return """ # Enable sending emails for notification events or expiry notices From 1cc5fc1f6c316e8ea1c50669cd80f4a7d441570a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 13:51:23 +0100 Subject: [PATCH 91/92] Lint --- synapse/config/emailconfig.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index cf4875f1f3..7ca3505895 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -138,7 +138,6 @@ class EmailConfig(Config): if not os.path.isfile(p): raise ConfigError("Unable to find email template file %s" % (p, )) - def default_config(self, config_dir_path, server_name, **kwargs): return """ # Enable sending emails for notification events or expiry notices From 2f62e1f6ff671bf6404bd90b1d945f8d029f0d37 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 14:24:36 +0100 Subject: [PATCH 92/92] Only parse from email if provided --- synapse/config/emailconfig.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 7ca3505895..8400471f40 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -51,10 +51,11 @@ class EmailConfig(Config): self.email_app_name = "Matrix" self.email_notif_from = email_config.get("notif_from", None) - # make sure it's valid - parsed = email.utils.parseaddr(self.email_notif_from) - if self.email_notif_from and parsed[1] == '': - raise RuntimeError("Invalid notif_from address") + if self.email_notif_from is not None: + # make sure it's valid + parsed = email.utils.parseaddr(self.email_notif_from) + if parsed[1] == '': + raise RuntimeError("Invalid notif_from address") template_dir = email_config.get("template_dir") # we need an absolute path, because we change directory after starting (and