From b43d9a920b434180b0ae12516b19d09011f37c59 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 4 Apr 2019 18:54:03 +0100 Subject: [PATCH 001/429] Fix docstring on get_server_keys_json --- synapse/storage/keys.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 030cd1e5a3..f24ab3eedd 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -188,8 +188,8 @@ class KeyStore(SQLBaseStore): Args: server_keys (list): List of (server_name, key_id, source) triplets. Returns: - Dict mapping (server_name, key_id, source) triplets to dicts with - "ts_valid_until_ms" and "key_json" keys. + Deferred[dict[Tuple[str, str, str|None], list[dict]]]: + Dict mapping (server_name, key_id, source) triplets to lists of dicts """ def _get_server_keys_json_txn(txn): From 30805237fa90d399164d08577f782b7bc7631640 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 4 Apr 2019 19:12:54 +0100 Subject: [PATCH 002/429] add a test for get_keys_from_server --- tests/crypto/test_keyring.py | 63 ++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index c30a1a69e7..1c70cc3f8d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -23,6 +23,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.crypto import keyring +from synapse.crypto.keyring import KeyLookupError from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -202,6 +203,68 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertFalse(d.called) self.get_success(d) + def test_get_keys_from_server(self): + # arbitrarily advance the clock a bit + self.reactor.advance(100) + + SERVER_NAME = "server2" + kr = keyring.Keyring(self.hs) + testkey = signedjson.key.generate_signing_key("ver1") + testverifykey = signedjson.key.get_verify_key(testkey) + testverifykey_id = "ed25519:ver1" + VALID_UNTIL_TS = 1000 + + # valid response + response = { + "server_name": SERVER_NAME, + "old_verify_keys": {}, + "valid_until_ts": VALID_UNTIL_TS, + "verify_keys": { + testverifykey_id: { + "key": signedjson.key.encode_verify_key_base64(testverifykey) + } + }, + } + signedjson.sign.sign_json(response, SERVER_NAME, testkey) + + def get_json(destination, path, **kwargs): + self.assertEqual(destination, SERVER_NAME) + self.assertEqual(path, "/_matrix/key/v2/server/key1") + return response + + self.http_client.get_json.side_effect = get_json + + server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + keys = self.get_success(kr.get_keys_from_server(server_name_and_key_ids)) + k = keys[SERVER_NAME][testverifykey_id] + self.assertEqual(k, testverifykey) + self.assertEqual(k.alg, "ed25519") + self.assertEqual(k.version, "ver1") + + # check that the perspectives store is correctly updated + lookup_triplet = (SERVER_NAME, testverifykey_id, None) + key_json = self.get_success( + self.hs.get_datastore().get_server_keys_json([lookup_triplet]) + ) + res = key_json[lookup_triplet] + self.assertEqual(len(res), 1) + res = res[0] + self.assertEqual(res["key_id"], testverifykey_id) + self.assertEqual(res["from_server"], SERVER_NAME) + self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) + self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) + + # we expect it to be encoded as canonical json *before* it hits the db + self.assertEqual( + bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) + ) + + # change the server name: it should cause a rejection + response["server_name"] = "OTHER_SERVER" + self.get_failure( + kr.get_keys_from_server(server_name_and_key_ids), KeyLookupError + ) + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): From b2d574f1266e726b24458b6d46168f224e944478 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 4 Apr 2019 19:12:54 +0100 Subject: [PATCH 003/429] test for get_keys_from_perspectives --- tests/crypto/test_keyring.py | 67 ++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 1c70cc3f8d..b224fdb23a 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -16,6 +16,7 @@ import time from mock import Mock +import canonicaljson import signedjson.key import signedjson.sign @@ -49,6 +50,9 @@ class MockPerspectiveServer(object): key_id: {"key": signedjson.key.encode_verify_key_base64(verify_key)} }, } + return self.get_signed_response(res) + + def get_signed_response(self, res): signedjson.sign.sign_json(res, self.server_name, self.key) return res @@ -265,6 +269,69 @@ class KeyringTestCase(unittest.HomeserverTestCase): kr.get_keys_from_server(server_name_and_key_ids), KeyLookupError ) + def test_get_keys_from_perspectives(self): + # arbitrarily advance the clock a bit + self.reactor.advance(100) + + SERVER_NAME = "server2" + kr = keyring.Keyring(self.hs) + testkey = signedjson.key.generate_signing_key("ver1") + testverifykey = signedjson.key.get_verify_key(testkey) + testverifykey_id = "ed25519:ver1" + VALID_UNTIL_TS = 200 * 1000 + + # valid response + response = { + "server_name": SERVER_NAME, + "old_verify_keys": {}, + "valid_until_ts": VALID_UNTIL_TS, + "verify_keys": { + testverifykey_id: { + "key": signedjson.key.encode_verify_key_base64(testverifykey) + } + }, + } + + persp_resp = { + "server_keys": [self.mock_perspective_server.get_signed_response(response)] + } + + def post_json(destination, path, data, **kwargs): + self.assertEqual(destination, self.mock_perspective_server.server_name) + self.assertEqual(path, "/_matrix/key/v2/query") + + # check that the request is for the expected key + q = data["server_keys"] + self.assertEqual(list(q[SERVER_NAME].keys()), ["key1"]) + return persp_resp + + self.http_client.post_json.side_effect = post_json + + server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + keys = self.get_success(kr.get_keys_from_perspectives(server_name_and_key_ids)) + self.assertIn(SERVER_NAME, keys) + k = keys[SERVER_NAME][testverifykey_id] + self.assertEqual(k, testverifykey) + self.assertEqual(k.alg, "ed25519") + self.assertEqual(k.version, "ver1") + + # check that the perspectives store is correctly updated + lookup_triplet = (SERVER_NAME, testverifykey_id, None) + key_json = self.get_success( + self.hs.get_datastore().get_server_keys_json([lookup_triplet]) + ) + res = key_json[lookup_triplet] + self.assertEqual(len(res), 1) + res = res[0] + self.assertEqual(res["key_id"], testverifykey_id) + self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) + self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) + + self.assertEqual( + bytes(res["key_json"]), + canonicaljson.encode_canonical_json(persp_resp["server_keys"][0]), + ) + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): From ef27d434d109732bd9624738c3278863412139ad Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 4 Apr 2019 19:12:54 +0100 Subject: [PATCH 004/429] Clean up Keyring.process_v2_response Make this just return the key dict, rather than a single-entry dict mapping the server name to the key dict. It's easy for the caller to get the server name from from the response object anyway. --- synapse/crypto/keyring.py | 69 ++++++++++++++++++++++++++------------- 1 file changed, 46 insertions(+), 23 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 0207cd989a..98b8b15680 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -20,6 +20,7 @@ from collections import namedtuple from six import raise_from from six.moves import urllib +import nacl.signing from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -496,9 +497,9 @@ class Keyring(object): processed_response = yield self.process_v2_response( perspective_name, response, only_from_server=False ) + server_name = response["server_name"] - for server_name, response_keys in processed_response.items(): - keys.setdefault(server_name, {}).update(response_keys) + keys.setdefault(server_name, {}).update(processed_response) yield logcontext.make_deferred_yieldable(defer.gatherResults( [ @@ -517,7 +518,7 @@ class Keyring(object): @defer.inlineCallbacks def get_server_verify_key_v2_direct(self, server_name, key_ids): - keys = {} + keys = {} # type: dict[str, nacl.signing.VerifyKey] for requested_key_id in key_ids: if requested_key_id in keys: @@ -550,24 +551,49 @@ class Keyring(object): keys.update(response_keys) - yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store_keys, - server_name=key_server_name, - from_server=server_name, - verify_keys=verify_keys, - ) - for key_server_name, verify_keys in keys.items() - ], - consumeErrors=True - ).addErrback(unwrapFirstError)) - - defer.returnValue(keys) + yield self.store_keys( + server_name=server_name, + from_server=server_name, + verify_keys=keys, + ) + defer.returnValue({server_name: keys}) @defer.inlineCallbacks - def process_v2_response(self, from_server, response_json, - requested_ids=[], only_from_server=True): + def process_v2_response( + self, from_server, response_json, requested_ids=[], only_from_server=True + ): + """Parse a 'Server Keys' structure from the result of a /key request + + This is used to parse either the entirety of the response from + GET /_matrix/key/v2/server, or a single entry from the list returned by + POST /_matrix/key/v2/query. + + Checks that each signature in the response that claims to come from the origin + server is valid. (Does not check that there actually is such a signature, for + some reason.) + + Stores the json in server_keys_json so that it can be used for future responses + to /_matrix/key/v2/query. + + Args: + from_server (str): the name of the server producing this result: either + the origin server for a /_matrix/key/v2/server request, or the notary + for a /_matrix/key/v2/query. + + response_json (dict): the json-decoded Server Keys response object + + requested_ids (iterable[str]): a list of the key IDs that were requested. + We will store the json for these key ids as well as any that are + actually in the response + + only_from_server (bool): if True, we will check that the server_name in the + the response (ie, the server which originated the key) matches + from_server. + + Returns: + Deferred[dict[str, nacl.signing.VerifyKey]]: + map from key_id to key object + """ time_now_ms = self.clock.time_msec() response_keys = {} verify_keys = {} @@ -589,7 +615,6 @@ class Keyring(object): verify_key.time_added = time_now_ms old_verify_keys[key_id] = verify_key - results = {} server_name = response_json["server_name"] if only_from_server: if server_name != from_server: @@ -643,9 +668,7 @@ class Keyring(object): consumeErrors=True, ).addErrback(unwrapFirstError)) - results[server_name] = response_keys - - defer.returnValue(results) + defer.returnValue(response_keys) def store_keys(self, server_name, from_server, verify_keys): """Store a collection of verify keys for a given server From 6ae9361510eb033d6a4dd9172c5e75bb4d0039dd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 4 Apr 2019 19:12:54 +0100 Subject: [PATCH 005/429] Hoist server_name check out of process_v2_response It's easier to check it in the caller than to complicate the interface with an extra param. --- synapse/crypto/keyring.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 98b8b15680..54af60d711 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -495,7 +495,7 @@ class Keyring(object): ) processed_response = yield self.process_v2_response( - perspective_name, response, only_from_server=False + perspective_name, response ) server_name = response["server_name"] @@ -543,6 +543,11 @@ class Keyring(object): or server_name not in response[u"signatures"]): raise KeyLookupError("Key response not signed by remote server") + if response["server_name"] != server_name: + raise KeyLookupError("Expected a response for server %r not %r" % ( + server_name, response["server_name"] + )) + response_keys = yield self.process_v2_response( from_server=server_name, requested_ids=[requested_key_id], @@ -560,7 +565,7 @@ class Keyring(object): @defer.inlineCallbacks def process_v2_response( - self, from_server, response_json, requested_ids=[], only_from_server=True + self, from_server, response_json, requested_ids=[], ): """Parse a 'Server Keys' structure from the result of a /key request @@ -586,10 +591,6 @@ class Keyring(object): We will store the json for these key ids as well as any that are actually in the response - only_from_server (bool): if True, we will check that the server_name in the - the response (ie, the server which originated the key) matches - from_server. - Returns: Deferred[dict[str, nacl.signing.VerifyKey]]: map from key_id to key object @@ -616,13 +617,6 @@ class Keyring(object): old_verify_keys[key_id] = verify_key server_name = response_json["server_name"] - if only_from_server: - if server_name != from_server: - raise KeyLookupError( - "Expected a response for server %r not %r" % ( - from_server, server_name - ) - ) for key_id in response_json["signatures"].get(server_name, {}): if key_id not in response_json["verify_keys"]: raise KeyLookupError( From b78aac5582da10131d6a2a69c25dcf4a36c528d8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 4 Apr 2019 19:12:54 +0100 Subject: [PATCH 006/429] changelog --- changelog.d/5001.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5001.misc diff --git a/changelog.d/5001.misc b/changelog.d/5001.misc new file mode 100644 index 0000000000..bf590a016d --- /dev/null +++ b/changelog.d/5001.misc @@ -0,0 +1 @@ +Clean up some code in the server-key Keyring. \ No newline at end of file From 7d2a0c848ea1ab22c039e94f5d0bb6ec2f08dd58 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Apr 2019 16:21:12 +0100 Subject: [PATCH 007/429] Fix from_server buglet in get_keys_from_perspectives make sure we store the name of the server the keys came from, rather than the origin server, after doing a fetch-from-perspectives. --- changelog.d/5024.misc | 1 + synapse/crypto/keyring.py | 2 +- tests/crypto/test_keyring.py | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5024.misc diff --git a/changelog.d/5024.misc b/changelog.d/5024.misc new file mode 100644 index 0000000000..07c13f28d0 --- /dev/null +++ b/changelog.d/5024.misc @@ -0,0 +1 @@ +Store the notary server name correctly in server_keys_json. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 54af60d711..04beededdc 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -652,7 +652,7 @@ class Keyring(object): self.store.store_server_keys_json, server_name=server_name, key_id=key_id, - from_server=server_name, + from_server=from_server, ts_now_ms=time_now_ms, ts_expires_ms=ts_valid_until_ms, key_json_bytes=signed_key_json_bytes, diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index b224fdb23a..9af0656a83 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -324,6 +324,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertEqual(len(res), 1) res = res[0] self.assertEqual(res["key_id"], testverifykey_id) + self.assertEqual(res["from_server"], self.mock_perspective_server.server_name) self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) From 2d951686a71fd0c8b927d96fa183747ff2982bf9 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 8 Apr 2019 15:37:26 +0100 Subject: [PATCH 008/429] drop tables listed in #1830 (#4992) Tables dropped: * application_services, * application_services_regex, * transaction_id_to_pdu, * stats_reporting * current_state_resets * event_content_hashes * event_destinations * event_edge_hashes * event_signatures * feedback * room_hosts * state_forward_extremities --- changelog.d/4992.misc | 1 + scripts/synapse_port_db | 5 - synapse/storage/events.py | 12 -- synapse/storage/schema/delta/13/v13.sql | 20 +-- .../schema/delta/14/upgrade_appservice_db.py | 42 ------ .../schema/delta/16/unique_constraints.sql | 8 -- .../schema/delta/24/stats_reporting.sql | 12 +- .../storage/schema/delta/30/state_stream.sql | 9 +- .../schema/delta/32/remove_indices.sql | 4 - .../54/drop_legacy_tables.sql} | 24 ++-- .../schema/full_schemas/11/event_edges.sql | 91 ------------- .../full_schemas/11/event_signatures.sql | 55 -------- synapse/storage/schema/full_schemas/11/im.sql | 123 ------------------ .../storage/schema/full_schemas/11/keys.sql | 31 ----- .../full_schemas/11/media_repository.sql | 65 --------- .../schema/full_schemas/11/presence.sql | 35 ----- .../schema/full_schemas/11/profiles.sql | 19 --- .../schema/full_schemas/11/redactions.sql | 22 ---- .../storage/schema/full_schemas/11/state.sql | 40 ------ .../schema/full_schemas/11/transactions.sql | 44 ------- .../storage/schema/full_schemas/11/users.sql | 43 ------ .../full_schemas/16/application_services.sql | 19 +-- .../schema/full_schemas/16/event_edges.sql | 30 +---- .../full_schemas/16/event_signatures.sql | 23 +--- synapse/storage/schema/full_schemas/16/im.sql | 21 +-- .../schema/full_schemas/16/presence.sql | 2 +- 26 files changed, 43 insertions(+), 757 deletions(-) create mode 100644 changelog.d/4992.misc delete mode 100644 synapse/storage/schema/delta/14/upgrade_appservice_db.py rename synapse/storage/schema/{full_schemas/11/room_aliases.sql => delta/54/drop_legacy_tables.sql} (51%) delete mode 100644 synapse/storage/schema/full_schemas/11/event_edges.sql delete mode 100644 synapse/storage/schema/full_schemas/11/event_signatures.sql delete mode 100644 synapse/storage/schema/full_schemas/11/im.sql delete mode 100644 synapse/storage/schema/full_schemas/11/keys.sql delete mode 100644 synapse/storage/schema/full_schemas/11/media_repository.sql delete mode 100644 synapse/storage/schema/full_schemas/11/presence.sql delete mode 100644 synapse/storage/schema/full_schemas/11/profiles.sql delete mode 100644 synapse/storage/schema/full_schemas/11/redactions.sql delete mode 100644 synapse/storage/schema/full_schemas/11/state.sql delete mode 100644 synapse/storage/schema/full_schemas/11/transactions.sql delete mode 100644 synapse/storage/schema/full_schemas/11/users.sql diff --git a/changelog.d/4992.misc b/changelog.d/4992.misc new file mode 100644 index 0000000000..8a9eaea4cf --- /dev/null +++ b/changelog.d/4992.misc @@ -0,0 +1 @@ +Remove legacy tables detailed in #1830. diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 3de394b035..41be9c9220 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -58,15 +58,11 @@ BOOLEAN_COLUMNS = { APPEND_ONLY_TABLES = [ - "event_content_hashes", "event_reference_hashes", - "event_signatures", - "event_edge_hashes", "events", "event_json", "state_events", "room_memberships", - "feedback", "topics", "room_names", "rooms", @@ -88,7 +84,6 @@ APPEND_ONLY_TABLES = [ "event_search", "presence_stream", "push_rules_stream", - "current_state_resets", "ex_outlier_stream", "cache_invalidation_stream", "public_room_list_stream", diff --git a/synapse/storage/events.py b/synapse/storage/events.py index dfda39bbe0..7a7f841c6c 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1179,14 +1179,10 @@ class EventsStore( "events", "event_auth", "event_json", - "event_content_hashes", - "event_destinations", - "event_edge_hashes", "event_edges", "event_forward_extremities", "event_reference_hashes", "event_search", - "event_signatures", "event_to_state_groups", "guest_access", "history_visibility", @@ -1857,16 +1853,12 @@ class EventsStore( # Tables that should be pruned: # event_auth # event_backward_extremities - # event_content_hashes - # event_destinations - # event_edge_hashes # event_edges # event_forward_extremities # event_json # event_push_actions # event_reference_hashes # event_search - # event_signatures # event_to_state_groups # events # rejections @@ -2065,14 +2057,10 @@ class EventsStore( "events", "event_json", "event_auth", - "event_content_hashes", - "event_destinations", - "event_edge_hashes", "event_edges", "event_forward_extremities", "event_reference_hashes", "event_search", - "event_signatures", "rejections", ): logger.info("[purge] removing events from %s", table) diff --git a/synapse/storage/schema/delta/13/v13.sql b/synapse/storage/schema/delta/13/v13.sql index 5eb93b38b2..f8649e5d99 100644 --- a/synapse/storage/schema/delta/13/v13.sql +++ b/synapse/storage/schema/delta/13/v13.sql @@ -13,19 +13,7 @@ * limitations under the License. */ -CREATE TABLE IF NOT EXISTS application_services( - id INTEGER PRIMARY KEY AUTOINCREMENT, - url TEXT, - token TEXT, - hs_token TEXT, - sender TEXT, - UNIQUE(token) -); - -CREATE TABLE IF NOT EXISTS application_services_regex( - id INTEGER PRIMARY KEY AUTOINCREMENT, - as_id BIGINT UNSIGNED NOT NULL, - namespace INTEGER, /* enum[room_id|room_alias|user_id] */ - regex TEXT, - FOREIGN KEY(as_id) REFERENCES application_services(id) -); +/* We used to create a tables called application_services and + * application_services_regex, but these are no longer used and are removed in + * delta 54. + */ diff --git a/synapse/storage/schema/delta/14/upgrade_appservice_db.py b/synapse/storage/schema/delta/14/upgrade_appservice_db.py deleted file mode 100644 index 4d725b92fe..0000000000 --- a/synapse/storage/schema/delta/14/upgrade_appservice_db.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -import simplejson as json - -logger = logging.getLogger(__name__) - - -def run_create(cur, *args, **kwargs): - cur.execute("SELECT id, regex FROM application_services_regex") - for row in cur.fetchall(): - try: - logger.debug("Checking %s..." % row[0]) - json.loads(row[1]) - except ValueError: - # row isn't in json, make it so. - string_regex = row[1] - new_regex = json.dumps({ - "regex": string_regex, - "exclusive": True - }) - cur.execute( - "UPDATE application_services_regex SET regex=? WHERE id=?", - (new_regex, row[0]) - ) - - -def run_upgrade(*args, **kwargs): - pass diff --git a/synapse/storage/schema/delta/16/unique_constraints.sql b/synapse/storage/schema/delta/16/unique_constraints.sql index fecf11118c..5b8de52c33 100644 --- a/synapse/storage/schema/delta/16/unique_constraints.sql +++ b/synapse/storage/schema/delta/16/unique_constraints.sql @@ -17,14 +17,6 @@ DELETE FROM room_memberships WHERE rowid not in ( DROP INDEX IF EXISTS room_memberships_event_id; CREATE UNIQUE INDEX room_memberships_event_id ON room_memberships(event_id); --- -DELETE FROM feedback WHERE rowid not in ( - SELECT MIN(rowid) FROM feedback GROUP BY event_id -); - -DROP INDEX IF EXISTS feedback_event_id; -CREATE UNIQUE INDEX feedback_event_id ON feedback(event_id); - -- DELETE FROM topics WHERE rowid not in ( SELECT MIN(rowid) FROM topics GROUP BY event_id diff --git a/synapse/storage/schema/delta/24/stats_reporting.sql b/synapse/storage/schema/delta/24/stats_reporting.sql index 5f508af7a9..acea7483bd 100644 --- a/synapse/storage/schema/delta/24/stats_reporting.sql +++ b/synapse/storage/schema/delta/24/stats_reporting.sql @@ -1,4 +1,4 @@ -/* Copyright 2015, 2016 OpenMarket Ltd +/* Copyright 2019 New Vector Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,10 +13,6 @@ * limitations under the License. */ --- Should only ever contain one row -CREATE TABLE IF NOT EXISTS stats_reporting( - -- The stream ordering token which was most recently reported as stats - reported_stream_token INTEGER, - -- The time (seconds since epoch) stats were most recently reported - reported_time BIGINT -); + /* We used to create a table called stats_reporting, but this is no longer + * used and is removed in delta 54. + */ \ No newline at end of file diff --git a/synapse/storage/schema/delta/30/state_stream.sql b/synapse/storage/schema/delta/30/state_stream.sql index 706fe1dcf4..e85699e82e 100644 --- a/synapse/storage/schema/delta/30/state_stream.sql +++ b/synapse/storage/schema/delta/30/state_stream.sql @@ -14,15 +14,10 @@ */ -/** - * The positions in the event stream_ordering when the current_state was - * replaced by the state at the event. +/* We used to create a table called current_state_resets, but this is no + * longer used and is removed in delta 54. */ -CREATE TABLE IF NOT EXISTS current_state_resets( - event_stream_ordering BIGINT PRIMARY KEY NOT NULL -); - /* The outlier events that have aquired a state group typically through * backfill. This is tracked separately to the events table, as assigning a * state group change the position of the existing event in the stream diff --git a/synapse/storage/schema/delta/32/remove_indices.sql b/synapse/storage/schema/delta/32/remove_indices.sql index f859be46a6..4219cdd06a 100644 --- a/synapse/storage/schema/delta/32/remove_indices.sql +++ b/synapse/storage/schema/delta/32/remove_indices.sql @@ -24,13 +24,9 @@ DROP INDEX IF EXISTS state_groups_id; -- Duplicate of PRIMARY KEY DROP INDEX IF EXISTS event_to_state_groups_id; -- Duplicate of PRIMARY KEY DROP INDEX IF EXISTS event_push_actions_room_id_event_id_user_id_profile_tag; -- Duplicate of UNIQUE CONSTRAINT -DROP INDEX IF EXISTS event_destinations_id; -- Prefix of UNIQUE CONSTRAINT DROP INDEX IF EXISTS st_extrem_id; -- Prefix of UNIQUE CONSTRAINT -DROP INDEX IF EXISTS event_content_hashes_id; -- Prefix of UNIQUE CONSTRAINT DROP INDEX IF EXISTS event_signatures_id; -- Prefix of UNIQUE CONSTRAINT -DROP INDEX IF EXISTS event_edge_hashes_id; -- Prefix of UNIQUE CONSTRAINT DROP INDEX IF EXISTS redactions_event_id; -- Duplicate of UNIQUE CONSTRAINT -DROP INDEX IF EXISTS room_hosts_room_id; -- Prefix of UNIQUE CONSTRAINT -- The following indices were unused DROP INDEX IF EXISTS remote_media_cache_thumbnails_media_id; diff --git a/synapse/storage/schema/full_schemas/11/room_aliases.sql b/synapse/storage/schema/delta/54/drop_legacy_tables.sql similarity index 51% rename from synapse/storage/schema/full_schemas/11/room_aliases.sql rename to synapse/storage/schema/delta/54/drop_legacy_tables.sql index 71a91f8ec9..77b39dc2d2 100644 --- a/synapse/storage/schema/full_schemas/11/room_aliases.sql +++ b/synapse/storage/schema/delta/54/drop_legacy_tables.sql @@ -1,4 +1,4 @@ -/* Copyright 2014-2016 OpenMarket Ltd +/* Copyright 2019 New Vector Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,12 +13,18 @@ * limitations under the License. */ -CREATE TABLE IF NOT EXISTS room_aliases( - room_alias TEXT NOT NULL, - room_id TEXT NOT NULL -); +DROP TABLE IF EXISTS application_services; +DROP TABLE IF EXISTS application_services_regex; +DROP TABLE IF EXISTS transaction_id_to_pdu; +DROP TABLE IF EXISTS stats_reporting; +DROP TABLE IF EXISTS current_state_resets; +DROP TABLE IF EXISTS event_content_hashes; +DROP TABLE IF EXISTS event_destinations; +DROP TABLE IF EXISTS event_edge_hashes; +DROP TABLE IF EXISTS event_signatures; +DROP TABLE IF EXISTS feedback; +DROP TABLE IF EXISTS room_hosts; +DROP TABLE IF EXISTS state_forward_extremities; + + -CREATE TABLE IF NOT EXISTS room_alias_servers( - room_alias TEXT NOT NULL, - server TEXT NOT NULL -); diff --git a/synapse/storage/schema/full_schemas/11/event_edges.sql b/synapse/storage/schema/full_schemas/11/event_edges.sql deleted file mode 100644 index bccd1c6f74..0000000000 --- a/synapse/storage/schema/full_schemas/11/event_edges.sql +++ /dev/null @@ -1,91 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - UNIQUE (event_id, room_id) -); - -CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id); -CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_backward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - UNIQUE (event_id, room_id) -); - -CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id); -CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_edges( - event_id TEXT NOT NULL, - prev_event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - -- We no longer insert prev_state into this table, so all new rows will have - -- is_state as false. - is_state BOOL NOT NULL, - UNIQUE (event_id, prev_event_id, room_id, is_state) -); - -CREATE INDEX ev_edges_id ON event_edges(event_id); -CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id); - - -CREATE TABLE IF NOT EXISTS room_depth( - room_id TEXT NOT NULL, - min_depth INTEGER NOT NULL, - UNIQUE (room_id) -); - -CREATE INDEX room_depth_room ON room_depth(room_id); - - -create TABLE IF NOT EXISTS event_destinations( - event_id TEXT NOT NULL, - destination TEXT NOT NULL, - delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered - UNIQUE (event_id, destination) -); - -CREATE INDEX event_destinations_id ON event_destinations(event_id); - - -CREATE TABLE IF NOT EXISTS state_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - UNIQUE (event_id, room_id) -); - -CREATE INDEX st_extrem_keys ON state_forward_extremities( - room_id, type, state_key -); -CREATE INDEX st_extrem_id ON state_forward_extremities(event_id); - - -CREATE TABLE IF NOT EXISTS event_auth( - event_id TEXT NOT NULL, - auth_id TEXT NOT NULL, - room_id TEXT NOT NULL, - UNIQUE (event_id, auth_id, room_id) -); - -CREATE INDEX evauth_edges_id ON event_auth(event_id); -CREATE INDEX evauth_edges_auth_id ON event_auth(auth_id); diff --git a/synapse/storage/schema/full_schemas/11/event_signatures.sql b/synapse/storage/schema/full_schemas/11/event_signatures.sql deleted file mode 100644 index 00ce85980e..0000000000 --- a/synapse/storage/schema/full_schemas/11/event_signatures.sql +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS event_content_hashes ( - event_id TEXT, - algorithm TEXT, - hash bytea, - UNIQUE (event_id, algorithm) -); - -CREATE INDEX event_content_hashes_id ON event_content_hashes(event_id); - - -CREATE TABLE IF NOT EXISTS event_reference_hashes ( - event_id TEXT, - algorithm TEXT, - hash bytea, - UNIQUE (event_id, algorithm) -); - -CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id); - - -CREATE TABLE IF NOT EXISTS event_signatures ( - event_id TEXT, - signature_name TEXT, - key_id TEXT, - signature bytea, - UNIQUE (event_id, signature_name, key_id) -); - -CREATE INDEX event_signatures_id ON event_signatures(event_id); - - -CREATE TABLE IF NOT EXISTS event_edge_hashes( - event_id TEXT, - prev_event_id TEXT, - algorithm TEXT, - hash bytea, - UNIQUE (event_id, prev_event_id, algorithm) -); - -CREATE INDEX event_edge_hashes_id ON event_edge_hashes(event_id); diff --git a/synapse/storage/schema/full_schemas/11/im.sql b/synapse/storage/schema/full_schemas/11/im.sql deleted file mode 100644 index dfbbf9fd54..0000000000 --- a/synapse/storage/schema/full_schemas/11/im.sql +++ /dev/null @@ -1,123 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS events( - stream_ordering INTEGER PRIMARY KEY AUTOINCREMENT, - topological_ordering BIGINT NOT NULL, - event_id TEXT NOT NULL, - type TEXT NOT NULL, - room_id TEXT NOT NULL, - content TEXT NOT NULL, - unrecognized_keys TEXT, - processed BOOL NOT NULL, - outlier BOOL NOT NULL, - depth BIGINT DEFAULT 0 NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX events_stream_ordering ON events (stream_ordering); -CREATE INDEX events_topological_ordering ON events (topological_ordering); -CREATE INDEX events_room_id ON events (room_id); - - -CREATE TABLE IF NOT EXISTS event_json( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - internal_metadata TEXT NOT NULL, - json TEXT NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX event_json_room_id ON event_json(room_id); - - -CREATE TABLE IF NOT EXISTS state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - prev_state TEXT, - UNIQUE (event_id) -); - -CREATE INDEX state_events_room_id ON state_events (room_id); -CREATE INDEX state_events_type ON state_events (type); -CREATE INDEX state_events_state_key ON state_events (state_key); - - -CREATE TABLE IF NOT EXISTS current_state_events( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - UNIQUE (room_id, type, state_key) -); - -CREATE INDEX curr_events_event_id ON current_state_events (event_id); -CREATE INDEX current_state_events_room_id ON current_state_events (room_id); -CREATE INDEX current_state_events_type ON current_state_events (type); -CREATE INDEX current_state_events_state_key ON current_state_events (state_key); - -CREATE TABLE IF NOT EXISTS room_memberships( - event_id TEXT NOT NULL, - user_id TEXT NOT NULL, - sender TEXT NOT NULL, - room_id TEXT NOT NULL, - membership TEXT NOT NULL -); - -CREATE INDEX room_memberships_event_id ON room_memberships (event_id); -CREATE INDEX room_memberships_room_id ON room_memberships (room_id); -CREATE INDEX room_memberships_user_id ON room_memberships (user_id); - -CREATE TABLE IF NOT EXISTS feedback( - event_id TEXT NOT NULL, - feedback_type TEXT, - target_event_id TEXT, - sender TEXT, - room_id TEXT -); - -CREATE TABLE IF NOT EXISTS topics( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - topic TEXT NOT NULL -); - -CREATE INDEX topics_event_id ON topics(event_id); -CREATE INDEX topics_room_id ON topics(room_id); - -CREATE TABLE IF NOT EXISTS room_names( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - name TEXT NOT NULL -); - -CREATE INDEX room_names_event_id ON room_names(event_id); -CREATE INDEX room_names_room_id ON room_names(room_id); - -CREATE TABLE IF NOT EXISTS rooms( - room_id TEXT PRIMARY KEY NOT NULL, - is_public BOOL, - creator TEXT -); - -CREATE TABLE IF NOT EXISTS room_hosts( - room_id TEXT NOT NULL, - host TEXT NOT NULL, - UNIQUE (room_id, host) -); - -CREATE INDEX room_hosts_room_id ON room_hosts (room_id); diff --git a/synapse/storage/schema/full_schemas/11/keys.sql b/synapse/storage/schema/full_schemas/11/keys.sql deleted file mode 100644 index ca0ca1b694..0000000000 --- a/synapse/storage/schema/full_schemas/11/keys.sql +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS server_tls_certificates( - server_name TEXT, -- Server name. - fingerprint TEXT, -- Certificate fingerprint. - from_server TEXT, -- Which key server the certificate was fetched from. - ts_added_ms BIGINT, -- When the certifcate was added. - tls_certificate bytea, -- DER encoded x509 certificate. - UNIQUE (server_name, fingerprint) -); - -CREATE TABLE IF NOT EXISTS server_signature_keys( - server_name TEXT, -- Server name. - key_id TEXT, -- Key version. - from_server TEXT, -- Which key server the key was fetched form. - ts_added_ms BIGINT, -- When the key was added. - verify_key bytea, -- NACL verification key. - UNIQUE (server_name, key_id) -); diff --git a/synapse/storage/schema/full_schemas/11/media_repository.sql b/synapse/storage/schema/full_schemas/11/media_repository.sql deleted file mode 100644 index 9c264d6ece..0000000000 --- a/synapse/storage/schema/full_schemas/11/media_repository.sql +++ /dev/null @@ -1,65 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS local_media_repository ( - media_id TEXT, -- The id used to refer to the media. - media_type TEXT, -- The MIME-type of the media. - media_length INTEGER, -- Length of the media in bytes. - created_ts BIGINT, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - user_id TEXT, -- The user who uploaded the file. - UNIQUE (media_id) -); - -CREATE TABLE IF NOT EXISTS local_media_repository_thumbnails ( - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_method TEXT, -- The method used to make the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - UNIQUE ( - media_id, thumbnail_width, thumbnail_height, thumbnail_type - ) -); - -CREATE INDEX local_media_repository_thumbnails_media_id - ON local_media_repository_thumbnails (media_id); - -CREATE TABLE IF NOT EXISTS remote_media_cache ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media on that server. - media_type TEXT, -- The MIME-type of the media. - created_ts BIGINT, -- When the content was uploaded in ms. - upload_name TEXT, -- The name the media was uploaded with. - media_length INTEGER, -- Length of the media in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - UNIQUE (media_origin, media_id) -); - -CREATE TABLE IF NOT EXISTS remote_media_cache_thumbnails ( - media_origin TEXT, -- The remote HS the media came from. - media_id TEXT, -- The id used to refer to the media. - thumbnail_width INTEGER, -- The width of the thumbnail in pixels. - thumbnail_height INTEGER, -- The height of the thumbnail in pixels. - thumbnail_method TEXT, -- The method used to make the thumbnail - thumbnail_type TEXT, -- The MIME-type of the thumbnail. - thumbnail_length INTEGER, -- The length of the thumbnail in bytes. - filesystem_id TEXT, -- The name used to store the media on disk. - UNIQUE ( - media_origin, media_id, thumbnail_width, thumbnail_height, - thumbnail_type - ) -); diff --git a/synapse/storage/schema/full_schemas/11/presence.sql b/synapse/storage/schema/full_schemas/11/presence.sql deleted file mode 100644 index 492725994c..0000000000 --- a/synapse/storage/schema/full_schemas/11/presence.sql +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS presence( - user_id TEXT NOT NULL, - state VARCHAR(20), - status_msg TEXT, - mtime BIGINT -- miliseconds since last state change -); - --- For each of /my/ users which possibly-remote users are allowed to see their --- presence state -CREATE TABLE IF NOT EXISTS presence_allow_inbound( - observed_user_id TEXT NOT NULL, - observer_user_id TEXT NOT NULL -- a UserID, -); - --- For each of /my/ users (watcher), which possibly-remote users are they --- watching? -CREATE TABLE IF NOT EXISTS presence_list( - user_id TEXT NOT NULL, - observed_user_id TEXT NOT NULL, -- a UserID, - accepted BOOLEAN NOT NULL -); diff --git a/synapse/storage/schema/full_schemas/11/profiles.sql b/synapse/storage/schema/full_schemas/11/profiles.sql deleted file mode 100644 index b314e6df75..0000000000 --- a/synapse/storage/schema/full_schemas/11/profiles.sql +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS profiles( - user_id TEXT NOT NULL, - displayname TEXT, - avatar_url TEXT -); diff --git a/synapse/storage/schema/full_schemas/11/redactions.sql b/synapse/storage/schema/full_schemas/11/redactions.sql deleted file mode 100644 index 318f0d9aa5..0000000000 --- a/synapse/storage/schema/full_schemas/11/redactions.sql +++ /dev/null @@ -1,22 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS redactions ( - event_id TEXT NOT NULL, - redacts TEXT NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX redactions_event_id ON redactions (event_id); -CREATE INDEX redactions_redacts ON redactions (redacts); diff --git a/synapse/storage/schema/full_schemas/11/state.sql b/synapse/storage/schema/full_schemas/11/state.sql deleted file mode 100644 index b901e0f017..0000000000 --- a/synapse/storage/schema/full_schemas/11/state.sql +++ /dev/null @@ -1,40 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -CREATE TABLE IF NOT EXISTS state_groups( - id INTEGER PRIMARY KEY, - room_id TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS state_groups_state( - state_group INTEGER NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - event_id TEXT NOT NULL -); - -CREATE TABLE IF NOT EXISTS event_to_state_groups( - event_id TEXT NOT NULL, - state_group INTEGER NOT NULL, - UNIQUE (event_id) -); - -CREATE INDEX state_groups_id ON state_groups(id); - -CREATE INDEX state_groups_state_id ON state_groups_state(state_group); -CREATE INDEX state_groups_state_tuple ON state_groups_state(room_id, type, state_key); -CREATE INDEX event_to_state_groups_id ON event_to_state_groups(event_id); diff --git a/synapse/storage/schema/full_schemas/11/transactions.sql b/synapse/storage/schema/full_schemas/11/transactions.sql deleted file mode 100644 index f6a058832e..0000000000 --- a/synapse/storage/schema/full_schemas/11/transactions.sql +++ /dev/null @@ -1,44 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ --- Stores what transaction ids we have received and what our response was -CREATE TABLE IF NOT EXISTS received_transactions( - transaction_id TEXT, - origin TEXT, - ts BIGINT, - response_code INTEGER, - response_json bytea, - has_been_referenced SMALLINT DEFAULT 0, -- Whether thishas been referenced by a prev_tx - UNIQUE (transaction_id, origin) -); - -CREATE INDEX transactions_have_ref ON received_transactions(origin, has_been_referenced);-- WHERE has_been_referenced = 0; - --- For sent transactions only. -CREATE TABLE IF NOT EXISTS transaction_id_to_pdu( - transaction_id INTEGER, - destination TEXT, - pdu_id TEXT, - pdu_origin TEXT -); - -CREATE INDEX transaction_id_to_pdu_tx ON transaction_id_to_pdu(transaction_id, destination); -CREATE INDEX transaction_id_to_pdu_dest ON transaction_id_to_pdu(destination); - --- To track destination health -CREATE TABLE IF NOT EXISTS destinations( - destination TEXT PRIMARY KEY, - retry_last_ts BIGINT, - retry_interval INTEGER -); diff --git a/synapse/storage/schema/full_schemas/11/users.sql b/synapse/storage/schema/full_schemas/11/users.sql deleted file mode 100644 index 6c1d4c34a1..0000000000 --- a/synapse/storage/schema/full_schemas/11/users.sql +++ /dev/null @@ -1,43 +0,0 @@ -/* Copyright 2014-2016 OpenMarket Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -CREATE TABLE IF NOT EXISTS users( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT, - password_hash TEXT, - creation_ts BIGINT, - admin SMALLINT DEFAULT 0 NOT NULL, - UNIQUE(name) -); - -CREATE TABLE IF NOT EXISTS access_tokens( - id INTEGER PRIMARY KEY AUTOINCREMENT, - user_id TEXT NOT NULL, - device_id TEXT, - token TEXT NOT NULL, - last_used BIGINT, - UNIQUE(token) -); - -CREATE TABLE IF NOT EXISTS user_ips ( - user TEXT NOT NULL, - access_token TEXT NOT NULL, - device_id TEXT, - ip TEXT NOT NULL, - user_agent TEXT NOT NULL, - last_seen BIGINT NOT NULL, - UNIQUE (user, access_token, ip, user_agent) -); - -CREATE INDEX user_ips_user ON user_ips(user); diff --git a/synapse/storage/schema/full_schemas/16/application_services.sql b/synapse/storage/schema/full_schemas/16/application_services.sql index aee0e68473..883fcd10b2 100644 --- a/synapse/storage/schema/full_schemas/16/application_services.sql +++ b/synapse/storage/schema/full_schemas/16/application_services.sql @@ -13,22 +13,11 @@ * limitations under the License. */ -CREATE TABLE IF NOT EXISTS application_services( - id BIGINT PRIMARY KEY, - url TEXT, - token TEXT, - hs_token TEXT, - sender TEXT, - UNIQUE(token) -); +/* We used to create tables called application_services and + * application_services_regex, but these are no longer used and are removed in + * delta 54. + */ -CREATE TABLE IF NOT EXISTS application_services_regex( - id BIGINT PRIMARY KEY, - as_id BIGINT NOT NULL, - namespace INTEGER, /* enum[room_id|room_alias|user_id] */ - regex TEXT, - FOREIGN KEY(as_id) REFERENCES application_services(id) -); CREATE TABLE IF NOT EXISTS application_services_state( as_id TEXT PRIMARY KEY, diff --git a/synapse/storage/schema/full_schemas/16/event_edges.sql b/synapse/storage/schema/full_schemas/16/event_edges.sql index 6b5a5a88fa..10ce2aa7a0 100644 --- a/synapse/storage/schema/full_schemas/16/event_edges.sql +++ b/synapse/storage/schema/full_schemas/16/event_edges.sql @@ -13,6 +13,11 @@ * limitations under the License. */ +/* We used to create tables called event_destinations and + * state_forward_extremities, but these are no longer used and are removed in + * delta 54. + */ + CREATE TABLE IF NOT EXISTS event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, @@ -54,31 +59,6 @@ CREATE TABLE IF NOT EXISTS room_depth( CREATE INDEX room_depth_room ON room_depth(room_id); - -create TABLE IF NOT EXISTS event_destinations( - event_id TEXT NOT NULL, - destination TEXT NOT NULL, - delivered_ts BIGINT DEFAULT 0, -- or 0 if not delivered - UNIQUE (event_id, destination) -); - -CREATE INDEX event_destinations_id ON event_destinations(event_id); - - -CREATE TABLE IF NOT EXISTS state_forward_extremities( - event_id TEXT NOT NULL, - room_id TEXT NOT NULL, - type TEXT NOT NULL, - state_key TEXT NOT NULL, - UNIQUE (event_id, room_id) -); - -CREATE INDEX st_extrem_keys ON state_forward_extremities( - room_id, type, state_key -); -CREATE INDEX st_extrem_id ON state_forward_extremities(event_id); - - CREATE TABLE IF NOT EXISTS event_auth( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, diff --git a/synapse/storage/schema/full_schemas/16/event_signatures.sql b/synapse/storage/schema/full_schemas/16/event_signatures.sql index 00ce85980e..95826da431 100644 --- a/synapse/storage/schema/full_schemas/16/event_signatures.sql +++ b/synapse/storage/schema/full_schemas/16/event_signatures.sql @@ -13,15 +13,9 @@ * limitations under the License. */ -CREATE TABLE IF NOT EXISTS event_content_hashes ( - event_id TEXT, - algorithm TEXT, - hash bytea, - UNIQUE (event_id, algorithm) -); - -CREATE INDEX event_content_hashes_id ON event_content_hashes(event_id); - + /* We used to create tables called event_content_hashes and event_edge_hashes, + * but these are no longer used and are removed in delta 54. + */ CREATE TABLE IF NOT EXISTS event_reference_hashes ( event_id TEXT, @@ -42,14 +36,3 @@ CREATE TABLE IF NOT EXISTS event_signatures ( ); CREATE INDEX event_signatures_id ON event_signatures(event_id); - - -CREATE TABLE IF NOT EXISTS event_edge_hashes( - event_id TEXT, - prev_event_id TEXT, - algorithm TEXT, - hash bytea, - UNIQUE (event_id, prev_event_id, algorithm) -); - -CREATE INDEX event_edge_hashes_id ON event_edge_hashes(event_id); diff --git a/synapse/storage/schema/full_schemas/16/im.sql b/synapse/storage/schema/full_schemas/16/im.sql index 5f5cb8d01d..a1a2aa8e5b 100644 --- a/synapse/storage/schema/full_schemas/16/im.sql +++ b/synapse/storage/schema/full_schemas/16/im.sql @@ -13,6 +13,10 @@ * limitations under the License. */ +/* We used to create tables called room_hosts and feedback, + * but these are no longer used and are removed in delta 54. + */ + CREATE TABLE IF NOT EXISTS events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, @@ -91,15 +95,6 @@ CREATE TABLE IF NOT EXISTS room_memberships( CREATE INDEX room_memberships_room_id ON room_memberships (room_id); CREATE INDEX room_memberships_user_id ON room_memberships (user_id); -CREATE TABLE IF NOT EXISTS feedback( - event_id TEXT NOT NULL, - feedback_type TEXT, - target_event_id TEXT, - sender TEXT, - room_id TEXT, - UNIQUE (event_id) -); - CREATE TABLE IF NOT EXISTS topics( event_id TEXT NOT NULL, room_id TEXT NOT NULL, @@ -123,11 +118,3 @@ CREATE TABLE IF NOT EXISTS rooms( is_public BOOL, creator TEXT ); - -CREATE TABLE IF NOT EXISTS room_hosts( - room_id TEXT NOT NULL, - host TEXT NOT NULL, - UNIQUE (room_id, host) -); - -CREATE INDEX room_hosts_room_id ON room_hosts (room_id); diff --git a/synapse/storage/schema/full_schemas/16/presence.sql b/synapse/storage/schema/full_schemas/16/presence.sql index 0892c4cf96..01d2d8f833 100644 --- a/synapse/storage/schema/full_schemas/16/presence.sql +++ b/synapse/storage/schema/full_schemas/16/presence.sql @@ -28,5 +28,5 @@ CREATE TABLE IF NOT EXISTS presence_allow_inbound( UNIQUE (observed_user_id, observer_user_id) ); --- We used to create a table called presence_list, but this is no longer used +-- We used to create a table called presence_list, but this is no longer used -- and is removed in delta 54. \ No newline at end of file From b25e387c0d0eef77e0ba20cb9f12b67272664bae Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 8 Apr 2019 15:47:39 +0100 Subject: [PATCH 009/429] add context to phonehome stats (#5020) add context to phonehome stats --- changelog.d/5020.feature | 1 + docs/sample_config.yaml | 3 +++ synapse/app/homeserver.py | 2 +- synapse/config/server.py | 4 ++++ 4 files changed, 9 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5020.feature diff --git a/changelog.d/5020.feature b/changelog.d/5020.feature new file mode 100644 index 0000000000..71f7a8db2e --- /dev/null +++ b/changelog.d/5020.feature @@ -0,0 +1 @@ +Add context to phonehome stats. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f6b3fac6cd..a380b5f9a8 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -236,6 +236,9 @@ listeners: # - medium: 'email' # address: 'reserved_user@example.com' +# Used by phonehome stats to group together related servers. +#server_context: context + ## TLS ## diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 869c028d1f..79be977ea6 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -518,6 +518,7 @@ def run(hs): uptime = 0 stats["homeserver"] = hs.config.server_name + stats["server_context"] = hs.config.server_context stats["timestamp"] = now stats["uptime_seconds"] = uptime version = sys.version_info @@ -558,7 +559,6 @@ def run(hs): stats["database_engine"] = hs.get_datastore().database_engine_name stats["database_server_version"] = hs.get_datastore().get_server_version() - logger.info("Reporting stats to matrix.org: %s" % (stats,)) try: yield hs.get_simple_http_client().put_json( diff --git a/synapse/config/server.py b/synapse/config/server.py index 08e4e45482..c5e5679d52 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -37,6 +37,7 @@ class ServerConfig(Config): def read_config(self, config): self.server_name = config["server_name"] + self.server_context = config.get("server_context", None) try: parse_and_validate_server_name(self.server_name) @@ -484,6 +485,9 @@ class ServerConfig(Config): #mau_limit_reserved_threepids: # - medium: 'email' # address: 'reserved_user@example.com' + + # Used by phonehome stats to group together related servers. + #server_context: context """ % locals() def read_arguments(self, args): From 3352baac4b03f3414e0a006b9413b65454d1fe91 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 8 Apr 2019 21:50:18 +0100 Subject: [PATCH 010/429] Remove unused server_tls_certificates functions (#5028) These have been unused since #4120, and with the demise of perspectives, it is unlikely that they will ever be used again. --- changelog.d/4992.misc | 2 +- changelog.d/5028.misc | 1 + synapse/replication/slave/storage/keys.py | 3 -- synapse/storage/keys.py | 49 +------------------ .../schema/delta/54/drop_legacy_tables.sql | 4 +- .../storage/schema/full_schemas/16/keys.sql | 11 ++--- 6 files changed, 7 insertions(+), 63 deletions(-) create mode 100644 changelog.d/5028.misc diff --git a/changelog.d/4992.misc b/changelog.d/4992.misc index 8a9eaea4cf..3ee4228c09 100644 --- a/changelog.d/4992.misc +++ b/changelog.d/4992.misc @@ -1 +1 @@ -Remove legacy tables detailed in #1830. +Remove a number of unused tables from the database schema. diff --git a/changelog.d/5028.misc b/changelog.d/5028.misc new file mode 100644 index 0000000000..3ee4228c09 --- /dev/null +++ b/changelog.d/5028.misc @@ -0,0 +1 @@ +Remove a number of unused tables from the database schema. diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py index 8032f53fec..de00660c0e 100644 --- a/synapse/replication/slave/storage/keys.py +++ b/synapse/replication/slave/storage/keys.py @@ -27,8 +27,5 @@ class SlavedKeyStore(BaseSlavedStore): get_server_verify_keys = __func__(DataStore.get_server_verify_keys) store_server_verify_key = __func__(DataStore.store_server_verify_key) - get_server_certificate = __func__(DataStore.get_server_certificate) - store_server_certificate = __func__(DataStore.store_server_certificate) - get_server_keys_json = __func__(DataStore.get_server_keys_json) store_server_keys_json = __func__(DataStore.store_server_keys_json) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index f24ab3eedd..47a9aa784b 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -13,14 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib import logging import six from signedjson.key import decode_verify_key_bytes -import OpenSSL from twisted.internet import defer from synapse.util.caches.descriptors import cachedInlineCallbacks @@ -38,53 +36,8 @@ else: class KeyStore(SQLBaseStore): - """Persistence for signature verification keys and tls X.509 certificates + """Persistence for signature verification keys """ - - @defer.inlineCallbacks - def get_server_certificate(self, server_name): - """Retrieve the TLS X.509 certificate for the given server - Args: - server_name (bytes): The name of the server. - Returns: - (OpenSSL.crypto.X509): The tls certificate. - """ - tls_certificate_bytes, = yield self._simple_select_one( - table="server_tls_certificates", - keyvalues={"server_name": server_name}, - retcols=("tls_certificate",), - desc="get_server_certificate", - ) - tls_certificate = OpenSSL.crypto.load_certificate( - OpenSSL.crypto.FILETYPE_ASN1, tls_certificate_bytes - ) - defer.returnValue(tls_certificate) - - def store_server_certificate( - self, server_name, from_server, time_now_ms, tls_certificate - ): - """Stores the TLS X.509 certificate for the given server - Args: - server_name (str): The name of the server. - from_server (str): Where the certificate was looked up - time_now_ms (int): The time now in milliseconds - tls_certificate (OpenSSL.crypto.X509): The X.509 certificate. - """ - tls_certificate_bytes = OpenSSL.crypto.dump_certificate( - OpenSSL.crypto.FILETYPE_ASN1, tls_certificate - ) - fingerprint = hashlib.sha256(tls_certificate_bytes).hexdigest() - return self._simple_upsert( - table="server_tls_certificates", - keyvalues={"server_name": server_name, "fingerprint": fingerprint}, - values={ - "from_server": from_server, - "ts_added_ms": time_now_ms, - "tls_certificate": db_binary_type(tls_certificate_bytes), - }, - desc="store_server_certificate", - ) - @cachedInlineCallbacks() def _get_server_verify_key(self, server_name, key_id): verify_key_bytes = yield self._simple_select_one_onecol( diff --git a/synapse/storage/schema/delta/54/drop_legacy_tables.sql b/synapse/storage/schema/delta/54/drop_legacy_tables.sql index 77b39dc2d2..ecca005d9b 100644 --- a/synapse/storage/schema/delta/54/drop_legacy_tables.sql +++ b/synapse/storage/schema/delta/54/drop_legacy_tables.sql @@ -24,7 +24,5 @@ DROP TABLE IF EXISTS event_edge_hashes; DROP TABLE IF EXISTS event_signatures; DROP TABLE IF EXISTS feedback; DROP TABLE IF EXISTS room_hosts; +DROP TABLE IF EXISTS server_tls_certificates; DROP TABLE IF EXISTS state_forward_extremities; - - - diff --git a/synapse/storage/schema/full_schemas/16/keys.sql b/synapse/storage/schema/full_schemas/16/keys.sql index ca0ca1b694..11cdffdbb3 100644 --- a/synapse/storage/schema/full_schemas/16/keys.sql +++ b/synapse/storage/schema/full_schemas/16/keys.sql @@ -12,14 +12,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -CREATE TABLE IF NOT EXISTS server_tls_certificates( - server_name TEXT, -- Server name. - fingerprint TEXT, -- Certificate fingerprint. - from_server TEXT, -- Which key server the certificate was fetched from. - ts_added_ms BIGINT, -- When the certifcate was added. - tls_certificate bytea, -- DER encoded x509 certificate. - UNIQUE (server_name, fingerprint) -); + +-- we used to create a table called server_tls_certificates, but this is no +-- longer used, and is removed in delta 54. CREATE TABLE IF NOT EXISTS server_signature_keys( server_name TEXT, -- Server name. From f88a9e632341e955cf211d0ef3377c0f2b0ecf03 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 8 Apr 2019 15:25:51 +0100 Subject: [PATCH 011/429] Remove redundant merged_keys dict There's no point in collecting a merged dict of keys: it is sufficient to consider just the new keys which have been fetched by the most recent key_fetch_fns. --- synapse/crypto/keyring.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 04beededdc..ede120b2a6 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -275,10 +275,6 @@ class Keyring(object): @defer.inlineCallbacks def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): - # dict[str, dict[str, VerifyKey]]: results so far. - # map server_name -> key_id -> VerifyKey - merged_results = {} - # dict[str, set(str)]: keys to fetch for each server missing_keys = {} for verify_request in verify_requests: @@ -288,21 +284,22 @@ class Keyring(object): for fn in key_fetch_fns: results = yield fn(missing_keys.items()) - merged_results.update(results) # We now need to figure out which verify requests we have keys # for and which we don't missing_keys = {} requests_missing_keys = [] for verify_request in verify_requests: - server_name = verify_request.server_name - result_keys = merged_results[server_name] - if verify_request.deferred.called: # We've already called this deferred, which probably # means that we've already found a key for it. continue + server_name = verify_request.server_name + + # see if any of the keys we got this time are sufficient to + # complete this VerifyKeyRequest. + result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: if key_id in result_keys: with PreserveLoggingContext(): From f50efcb65d794985185f5cc82c697673f50e4c47 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 8 Apr 2019 23:51:52 +0100 Subject: [PATCH 012/429] Replace SlavedKeyStore with a shim since we're pulling everything out of KeyStore anyway, we may as well simplify it. --- synapse/replication/slave/storage/keys.py | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/synapse/replication/slave/storage/keys.py b/synapse/replication/slave/storage/keys.py index de00660c0e..cc6f7f009f 100644 --- a/synapse/replication/slave/storage/keys.py +++ b/synapse/replication/slave/storage/keys.py @@ -13,19 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.storage import DataStore -from synapse.storage.keys import KeyStore +from synapse.storage import KeyStore -from ._base import BaseSlavedStore, __func__ +# KeyStore isn't really safe to use from a worker, but for now we do so and hope that +# the races it creates aren't too bad. - -class SlavedKeyStore(BaseSlavedStore): - _get_server_verify_key = KeyStore.__dict__[ - "_get_server_verify_key" - ] - - get_server_verify_keys = __func__(DataStore.get_server_verify_keys) - store_server_verify_key = __func__(DataStore.store_server_verify_key) - - get_server_keys_json = __func__(DataStore.get_server_keys_json) - store_server_keys_json = __func__(DataStore.store_server_keys_json) +SlavedKeyStore = KeyStore From 0084309cd26d28fc8d2c152b4a7f13479fae16f9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 8 Apr 2019 22:00:11 +0100 Subject: [PATCH 013/429] Rewrite test_keys as a HomeserverTestCase --- tests/storage/test_keys.py | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 0d2dc9f325..7170ae76c7 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -15,33 +15,29 @@ import signedjson.key -from twisted.internet import defer - import tests.unittest -import tests.utils + +KEY_1 = signedjson.key.decode_verify_key_base64( + "ed25519", "key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" +) +KEY_2 = signedjson.key.decode_verify_key_base64( + "ed25519", "key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" +) -class KeyStoreTestCase(tests.unittest.TestCase): - - @defer.inlineCallbacks - def setUp(self): - hs = yield tests.utils.setup_test_homeserver(self.addCleanup) - self.store = hs.get_datastore() - - @defer.inlineCallbacks +class KeyStoreTestCase(tests.unittest.HomeserverTestCase): def test_get_server_verify_keys(self): - key1 = signedjson.key.decode_verify_key_base64( - "ed25519", "key1", "fP5l4JzpZPq/zdbBg5xx6lQGAAOM9/3w94cqiJ5jPrw" - ) - key2 = signedjson.key.decode_verify_key_base64( - "ed25519", "key2", "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" - ) - yield self.store.store_server_verify_key("server1", "from_server", 0, key1) - yield self.store.store_server_verify_key("server1", "from_server", 0, key2) + store = self.hs.get_datastore() - res = yield self.store.get_server_verify_keys( + d = store.store_server_verify_key("server1", "from_server", 0, KEY_1) + self.get_success(d) + d = store.store_server_verify_key("server1", "from_server", 0, KEY_2) + self.get_success(d) + + d = store.get_server_verify_keys( "server1", ["ed25519:key1", "ed25519:key2", "ed25519:key3"] ) + res = self.get_success(d) self.assertEqual(len(res.keys()), 2) self.assertEqual(res["ed25519:key1"].version, "key1") From 18b69be00f9fa79cf2b237992ef1f0094d1dc453 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 8 Apr 2019 14:51:07 +0100 Subject: [PATCH 014/429] Rewrite Datastore.get_server_verify_keys Rewrite this so that it doesn't hammer the database. --- synapse/crypto/keyring.py | 38 +++++++++----------- synapse/storage/keys.py | 74 +++++++++++++++++++++++--------------- tests/storage/test_keys.py | 53 +++++++++++++++++++++++++-- 3 files changed, 113 insertions(+), 52 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index ede120b2a6..834b107705 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -301,13 +301,12 @@ class Keyring(object): # complete this VerifyKeyRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: - if key_id in result_keys: + key = result_keys.get(key_id) + if key: with PreserveLoggingContext(): - verify_request.deferred.callback(( - server_name, - key_id, - result_keys[key_id], - )) + verify_request.deferred.callback( + (server_name, key_id, key) + ) break else: # The else block is only reached if the loop above @@ -341,27 +340,24 @@ class Keyring(object): @defer.inlineCallbacks def get_keys_from_store(self, server_name_and_key_ids): """ - Args: - server_name_and_key_ids (list[(str, iterable[str])]): + server_name_and_key_ids (iterable(Tuple[str, iterable[str]]): list of (server_name, iterable[key_id]) tuples to fetch keys for Returns: - Deferred: resolves to dict[str, dict[str, VerifyKey]]: map from + Deferred: resolves to dict[str, dict[str, VerifyKey|None]]: map from server_name -> key_id -> VerifyKey """ - res = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store.get_server_verify_keys, - server_name, key_ids, - ).addCallback(lambda ks, server: (server, ks), server_name) - for server_name, key_ids in server_name_and_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) - - defer.returnValue(dict(res)) + keys_to_fetch = ( + (server_name, key_id) + for server_name, key_ids in server_name_and_key_ids + for key_id in key_ids + ) + res = yield self.store.get_server_verify_keys(keys_to_fetch) + keys = {} + for (server_name, key_id), key in res.items(): + keys.setdefault(server_name, {})[key_id] = key + defer.returnValue(keys) @defer.inlineCallbacks def get_keys_from_perspectives(self, server_name_and_key_ids): diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 47a9aa784b..7036541792 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,15 +14,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools import logging import six from signedjson.key import decode_verify_key_bytes -from twisted.internet import defer - -from synapse.util.caches.descriptors import cachedInlineCallbacks +from synapse.util import batch_iter +from synapse.util.caches.descriptors import cached, cachedList from ._base import SQLBaseStore @@ -38,36 +39,50 @@ else: class KeyStore(SQLBaseStore): """Persistence for signature verification keys """ - @cachedInlineCallbacks() - def _get_server_verify_key(self, server_name, key_id): - verify_key_bytes = yield self._simple_select_one_onecol( - table="server_signature_keys", - keyvalues={"server_name": server_name, "key_id": key_id}, - retcol="verify_key", - desc="_get_server_verify_key", - allow_none=True, - ) - if verify_key_bytes: - defer.returnValue(decode_verify_key_bytes(key_id, bytes(verify_key_bytes))) + @cached() + def _get_server_verify_key(self, server_name_and_key_id): + raise NotImplementedError() - @defer.inlineCallbacks - def get_server_verify_keys(self, server_name, key_ids): - """Retrieve the NACL verification key for a given server for the given - key_ids + @cachedList( + cached_method_name="_get_server_verify_key", list_name="server_name_and_key_ids" + ) + def get_server_verify_keys(self, server_name_and_key_ids): + """ Args: - server_name (str): The name of the server. - key_ids (iterable[str]): key_ids to try and look up. + server_name_and_key_ids (iterable[Tuple[str, str]]): + iterable of (server_name, key-id) tuples to fetch keys for + Returns: - Deferred: resolves to dict[str, VerifyKey]: map from - key_id to verification key. + Deferred: resolves to dict[Tuple[str, str], VerifyKey|None]: + map from (server_name, key_id) -> VerifyKey, or None if the key is + unknown """ keys = {} - for key_id in key_ids: - key = yield self._get_server_verify_key(server_name, key_id) - if key: - keys[key_id] = key - defer.returnValue(keys) + + def _get_keys(txn, batch): + """Processes a batch of keys to fetch, and adds the result to `keys`.""" + + # batch_iter always returns tuples so it's safe to do len(batch) + sql = ( + "SELECT server_name, key_id, verify_key FROM server_signature_keys " + "WHERE 1=0" + ) + " OR (server_name=? AND key_id=?)" * len(batch) + + txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) + + for row in txn: + server_name, key_id, key_bytes = row + keys[(server_name, key_id)] = decode_verify_key_bytes( + key_id, bytes(key_bytes) + ) + + def _txn(txn): + for batch in batch_iter(server_name_and_key_ids, 50): + _get_keys(txn, batch) + return keys + + return self.runInteraction("get_server_verify_keys", _txn) def store_server_verify_key( self, server_name, from_server, time_now_ms, verify_key @@ -93,8 +108,11 @@ class KeyStore(SQLBaseStore): "verify_key": db_binary_type(verify_key.encode()), }, ) + # invalidate takes a tuple corresponding to the params of + # _get_server_verify_key. _get_server_verify_key only takes one + # param, which is itself the 2-tuple (server_name, key_id). txn.call_after( - self._get_server_verify_key.invalidate, (server_name, key_id) + self._get_server_verify_key.invalidate, ((server_name, key_id),) ) return self.runInteraction("store_server_verify_key", _txn) diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 7170ae76c7..6bfaa00fe9 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -15,6 +15,8 @@ import signedjson.key +from twisted.internet.defer import Deferred + import tests.unittest KEY_1 = signedjson.key.decode_verify_key_base64( @@ -35,10 +37,55 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): self.get_success(d) d = store.get_server_verify_keys( - "server1", ["ed25519:key1", "ed25519:key2", "ed25519:key3"] + [ + ("server1", "ed25519:key1"), + ("server1", "ed25519:key2"), + ("server1", "ed25519:key3"), + ] ) res = self.get_success(d) + self.assertEqual(len(res.keys()), 3) + self.assertEqual(res[("server1", "ed25519:key1")].version, "key1") + self.assertEqual(res[("server1", "ed25519:key2")].version, "key2") + + # non-existent result gives None + self.assertIsNone(res[("server1", "ed25519:key3")]) + + def test_cache(self): + """Check that updates correctly invalidate the cache.""" + + store = self.hs.get_datastore() + + key_id_1 = "ed25519:key1" + key_id_2 = "ed25519:key2" + + d = store.store_server_verify_key("srv1", "from_server", 0, KEY_1) + self.get_success(d) + d = store.store_server_verify_key("srv1", "from_server", 0, KEY_2) + self.get_success(d) + + d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + res = self.get_success(d) self.assertEqual(len(res.keys()), 2) - self.assertEqual(res["ed25519:key1"].version, "key1") - self.assertEqual(res["ed25519:key2"].version, "key2") + self.assertEqual(res[("srv1", key_id_1)], KEY_1) + self.assertEqual(res[("srv1", key_id_2)], KEY_2) + + # we should be able to look up the same thing again without a db hit + res = store.get_server_verify_keys([("srv1", key_id_1)]) + if isinstance(res, Deferred): + res = self.successResultOf(res) + self.assertEqual(len(res.keys()), 1) + self.assertEqual(res[("srv1", key_id_1)], KEY_1) + + new_key_2 = signedjson.key.get_verify_key( + signedjson.key.generate_signing_key("key2") + ) + d = store.store_server_verify_key("srv1", "from_server", 10, new_key_2) + self.get_success(d) + + d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + res = self.get_success(d) + self.assertEqual(len(res.keys()), 2) + self.assertEqual(res[("srv1", key_id_1)], KEY_1) + self.assertEqual(res[("srv1", key_id_2)], new_key_2) From 1f1e8dd8ec1dee9256f6f9573d4baad1b018be9d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 8 Apr 2019 23:27:37 +0100 Subject: [PATCH 015/429] changelog --- changelog.d/5030.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5030.misc diff --git a/changelog.d/5030.misc b/changelog.d/5030.misc new file mode 100644 index 0000000000..3456eb5381 --- /dev/null +++ b/changelog.d/5030.misc @@ -0,0 +1 @@ +Rewrite Datastore.get_server_verify_keys to reduce the number of database transactions. From 4abf5aa81af68e5dc9ac4fc383f75504bbc8cf97 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 9 Apr 2019 11:29:50 +0100 Subject: [PATCH 016/429] Bump psycopg requirement (#5032) --- changelog.d/5032.bugfix | 1 + synapse/python_dependencies.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5032.bugfix diff --git a/changelog.d/5032.bugfix b/changelog.d/5032.bugfix new file mode 100644 index 0000000000..cd71180ce9 --- /dev/null +++ b/changelog.d/5032.bugfix @@ -0,0 +1 @@ +Fix "cannot import name execute_batch" error with postgres. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f71e21ff4d..62c1748665 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -74,7 +74,9 @@ REQUIREMENTS = [ CONDITIONAL_REQUIREMENTS = { "email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"], "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"], - "postgres": ["psycopg2>=2.6"], + + # we use execute_batch, which arrived in psycopg 2.7. + "postgres": ["psycopg2>=2.7"], # ConsentResource uses select_autoescape, which arrived in jinja 2.9 "resources.consent": ["Jinja2>=2.9"], From 50d2a3059db29e7c9c7ccc9b005cec8497827e4b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Apr 2019 14:36:40 +0100 Subject: [PATCH 017/429] Fix schema upgrade when dropping tables We need to drop tables in the correct order due to foreign table constraints (on `application_services`), otherwise the DROP TABLE command will fail. Introduced in #4992. --- synapse/storage/schema/delta/54/drop_legacy_tables.sql | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/storage/schema/delta/54/drop_legacy_tables.sql b/synapse/storage/schema/delta/54/drop_legacy_tables.sql index ecca005d9b..dbbe682697 100644 --- a/synapse/storage/schema/delta/54/drop_legacy_tables.sql +++ b/synapse/storage/schema/delta/54/drop_legacy_tables.sql @@ -13,8 +13,10 @@ * limitations under the License. */ -DROP TABLE IF EXISTS application_services; +-- we need to do this first due to foreign constraints DROP TABLE IF EXISTS application_services_regex; + +DROP TABLE IF EXISTS application_services; DROP TABLE IF EXISTS transaction_id_to_pdu; DROP TABLE IF EXISTS stats_reporting; DROP TABLE IF EXISTS current_state_resets; From 5e45b558b0f054a2ba41224548d76dc3c6af3481 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Apr 2019 14:39:36 +0100 Subject: [PATCH 018/429] Newsfile --- changelog.d/5033.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5033.misc diff --git a/changelog.d/5033.misc b/changelog.d/5033.misc new file mode 100644 index 0000000000..3ee4228c09 --- /dev/null +++ b/changelog.d/5033.misc @@ -0,0 +1 @@ +Remove a number of unused tables from the database schema. From 747aa9f8cad92ffcda51b2aa07987c87f4353649 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 8 Apr 2019 17:10:55 +0100 Subject: [PATCH 019/429] Add account expiration feature --- changelog.d/5027.feature | 1 + docs/sample_config.yaml | 6 +++ synapse/api/auth.py | 12 +++++ synapse/api/errors.py | 1 + synapse/config/registration.py | 17 +++++++ synapse/storage/prepare_database.py | 2 +- synapse/storage/registration.py | 34 +++++++++++++ .../schema/delta/54/account_validity.sql | 20 ++++++++ tests/rest/client/v2_alpha/test_register.py | 51 ++++++++++++++++++- tests/test_state.py | 4 +- 10 files changed, 144 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5027.feature create mode 100644 synapse/storage/schema/delta/54/account_validity.sql diff --git a/changelog.d/5027.feature b/changelog.d/5027.feature new file mode 100644 index 0000000000..12766a82a7 --- /dev/null +++ b/changelog.d/5027.feature @@ -0,0 +1 @@ +Add time-based account expiration. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 4ada0fba0e..5594c8b9af 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -643,6 +643,12 @@ uploads_path: "DATADIR/uploads" # #enable_registration: false +# Optional account validity parameter. This allows for, e.g., accounts to +# be denied any request after a given period. +# +#account_validity: +# period: 6w + # The user must provide all of the below types of 3PID when registering. # #registrations_require_3pid: diff --git a/synapse/api/auth.py b/synapse/api/auth.py index e8112d5f05..976e0dd18b 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -64,6 +64,8 @@ class Auth(object): self.token_cache = LruCache(CACHE_SIZE_FACTOR * 10000) register_cache("cache", "token_cache", self.token_cache) + self._account_validity = hs.config.account_validity + @defer.inlineCallbacks def check_from_context(self, room_version, event, context, do_sig_check=True): prev_state_ids = yield context.get_prev_state_ids(self.store) @@ -226,6 +228,16 @@ class Auth(object): token_id = user_info["token_id"] is_guest = user_info["is_guest"] + # Deny the request if the user account has expired. + if self._account_validity.enabled: + expiration_ts = yield self.store.get_expiration_ts_for_user(user) + if self.clock.time_msec() >= expiration_ts: + raise AuthError( + 403, + "User account has expired", + errcode=Codes.EXPIRED_ACCOUNT, + ) + # device_id may not be present if get_user_by_access_token has been # stubbed out. device_id = user_info.get("device_id") diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 0b464834ce..4c33450e7f 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -60,6 +60,7 @@ class Codes(object): UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION" INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION" WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION" + EXPIRED_ACCOUNT = "ORG_MATRIX_EXPIRED_ACCOUNT" class CodeMessageException(RuntimeError): diff --git a/synapse/config/registration.py b/synapse/config/registration.py index f6b2b9ceee..b7a7b4f1cf 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -20,6 +20,15 @@ from synapse.types import RoomAlias from synapse.util.stringutils import random_string_with_symbols +class AccountValidityConfig(Config): + def __init__(self, config): + self.enabled = (len(config) > 0) + + period = config.get("period", None) + if period: + self.period = self.parse_duration(period) + + class RegistrationConfig(Config): def read_config(self, config): @@ -31,6 +40,8 @@ class RegistrationConfig(Config): strtobool(str(config["disable_registration"])) ) + self.account_validity = AccountValidityConfig(config.get("account_validity", {})) + self.registrations_require_3pid = config.get("registrations_require_3pid", []) self.allowed_local_3pids = config.get("allowed_local_3pids", []) self.registration_shared_secret = config.get("registration_shared_secret") @@ -75,6 +86,12 @@ class RegistrationConfig(Config): # #enable_registration: false + # Optional account validity parameter. This allows for, e.g., accounts to + # be denied any request after a given period. + # + #account_validity: + # period: 6w + # The user must provide all of the below types of 3PID when registering. # #registrations_require_3pid: diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index fa36daac52..e042221774 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 53 +SCHEMA_VERSION = 54 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9b6c28892c..eede8ae4d2 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -86,6 +86,26 @@ class RegistrationWorkerStore(SQLBaseStore): token ) + @cachedInlineCallbacks() + def get_expiration_ts_for_user(self, user): + """Get the expiration timestamp for the account bearing a given user ID. + + Args: + user (str): The ID of the user. + Returns: + defer.Deferred: None, if the account has no expiration timestamp, + otherwise int representation of the timestamp (as a number of + milliseconds since epoch). + """ + res = yield self._simple_select_one_onecol( + table="account_validity", + keyvalues={"user_id": user.to_string()}, + retcol="expiration_ts_ms", + allow_none=True, + desc="get_expiration_date_for_user", + ) + defer.returnValue(res) + @defer.inlineCallbacks def is_server_admin(self, user): res = yield self._simple_select_one_onecol( @@ -351,6 +371,8 @@ class RegistrationStore(RegistrationWorkerStore, columns=["creation_ts"], ) + self._account_validity = hs.config.account_validity + # we no longer use refresh tokens, but it's possible that some people # might have a background update queued to build this index. Just # clear the background update. @@ -485,6 +507,18 @@ class RegistrationStore(RegistrationWorkerStore, "user_type": user_type, } ) + + if self._account_validity.enabled: + now_ms = self.clock.time_msec() + expiration_ts = now_ms + self._account_validity.period + self._simple_insert_txn( + txn, + "account_validity", + values={ + "user_id": user_id, + "expiration_ts_ms": expiration_ts, + } + ) except self.database_engine.module.IntegrityError: raise StoreError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE diff --git a/synapse/storage/schema/delta/54/account_validity.sql b/synapse/storage/schema/delta/54/account_validity.sql new file mode 100644 index 0000000000..57249262d7 --- /dev/null +++ b/synapse/storage/schema/delta/54/account_validity.sql @@ -0,0 +1,20 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Track what users are in public rooms. +CREATE TABLE IF NOT EXISTS account_validity ( + user_id TEXT PRIMARY KEY, + expiration_ts_ms BIGINT NOT NULL +); diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index a45e6e5e1f..d3611ed21f 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,15 +1,18 @@ +import datetime import json from synapse.api.constants import LoginType +from synapse.api.errors import Codes from synapse.appservice import ApplicationService -from synapse.rest.client.v2_alpha.register import register_servlets +from synapse.rest.client.v1 import admin, login +from synapse.rest.client.v2_alpha import register, sync from tests import unittest class RegisterRestServletTestCase(unittest.HomeserverTestCase): - servlets = [register_servlets] + servlets = [register.register_servlets] def make_homeserver(self, reactor, clock): @@ -181,3 +184,47 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + + +class AccountValidityTestCase(unittest.HomeserverTestCase): + + servlets = [ + register.register_servlets, + admin.register_servlets, + login.register_servlets, + sync.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config.enable_registration = True + config.account_validity.enabled = True + config.account_validity.period = 604800000 # Time in ms for 1 week + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def test_validity_period(self): + self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + + # The specific endpoint doesn't matter, all we need is an authenticated + # endpoint. + request, channel = self.make_request( + b"GET", "/sync", access_token=tok, + ) + self.render(request) + + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.reactor.advance(datetime.timedelta(weeks=1).total_seconds()) + + request, channel = self.make_request( + b"GET", "/sync", access_token=tok, + ) + self.render(request) + + self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEquals( + channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result, + ) diff --git a/tests/test_state.py b/tests/test_state.py index e20c33322a..ce2b7eb7ed 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -24,7 +24,7 @@ from synapse.state import StateHandler, StateResolutionHandler from tests import unittest -from .utils import MockClock +from .utils import MockClock, default_config _next_event_id = 1000 @@ -159,6 +159,7 @@ class StateTestCase(unittest.TestCase): self.store = StateGroupStore() hs = Mock( spec_set=[ + "config", "get_datastore", "get_auth", "get_state_handler", @@ -166,6 +167,7 @@ class StateTestCase(unittest.TestCase): "get_state_resolution_handler", ] ) + hs.config = default_config("tesths") hs.get_datastore.return_value = self.store hs.get_state_handler.return_value = None hs.get_clock.return_value = MockClock() From a0fc256d6570652cedf9a68606e7849ff233e4e4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Apr 2019 17:03:35 +0100 Subject: [PATCH 020/429] Limit in flight DNS requests This is to work around a bug in twisted where a large number of concurrent DNS requests cause it to tight loop forever. c.f. https://twistedmatrix.com/trac/ticket/9620#ticket --- synapse/app/_base.py | 83 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 82 insertions(+), 1 deletion(-) diff --git a/synapse/app/_base.py b/synapse/app/_base.py index d4c6c4c8e2..08199a5e8d 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -22,13 +22,14 @@ import traceback import psutil from daemonize import Daemonize -from twisted.internet import error, reactor +from twisted.internet import defer, error, reactor from twisted.protocols.tls import TLSMemoryBIOFactory import synapse from synapse.app import check_bind_error from synapse.crypto import context_factory from synapse.util import PreserveLoggingContext +from synapse.util.async_helpers import Linearizer from synapse.util.rlimit import change_resource_limit from synapse.util.versionstring import get_version_string @@ -99,6 +100,8 @@ def start_reactor( logger (logging.Logger): logger instance to pass to Daemonize """ + install_dns_limiter(reactor) + def run(): # make sure that we run the reactor with the sentinel log context, # otherwise other PreserveLoggingContext instances will get confused @@ -312,3 +315,81 @@ def setup_sentry(hs): name = hs.config.worker_name if hs.config.worker_name else "master" scope.set_tag("worker_app", app) scope.set_tag("worker_name", name) + + +def install_dns_limiter(reactor, max_dns_requests_in_flight=100): + """Replaces the resolver with one that limits the number of in flight DNS + requests. + + This is to workaround https://twistedmatrix.com/trac/ticket/9620, where we + can run out of file descriptors and infinite loop if we attempt to do too + many DNS queries at once + """ + new_resolver = _LimitedHostnameResolver( + reactor.nameResolver, max_dns_requests_in_flight, + ) + + reactor.installNameResolver(new_resolver) + + +class _LimitedHostnameResolver(object): + """Wraps a IHostnameResolver, limiting the number of in-flight DNS lookups. + """ + + def __init__(self, resolver, max_dns_requests_in_flight): + self._resolver = resolver + self._limiter = Linearizer( + name="dns_client_limiter", max_count=max_dns_requests_in_flight, + ) + + def resolveHostName(self, resolutionReceiver, hostName, portNumber=0, + addressTypes=None, transportSemantics='TCP'): + # Note this is happening deep within the reactor, so we don't need to + # worry about log contexts. + + # We need this function to return `resolutionReceiver` so we do all the + # actual logic involving deferreds in a separate function. + self._resolve( + resolutionReceiver, hostName, portNumber, + addressTypes, transportSemantics, + ) + + return resolutionReceiver + + @defer.inlineCallbacks + def _resolve(self, resolutionReceiver, hostName, portNumber=0, + addressTypes=None, transportSemantics='TCP'): + + with (yield self._limiter.queue(())): + # resolveHostName doesn't return a Deferred, so we need to hook into + # the receiver interface to get told when resolution has finished. + + deferred = defer.Deferred() + receiver = _DeferredResolutionReceiver(resolutionReceiver, deferred) + + self._resolver.resolveHostName( + receiver, hostName, portNumber, + addressTypes, transportSemantics, + ) + + yield deferred + + +class _DeferredResolutionReceiver(object): + """Wraps a IResolutionReceiver and simply resolves the given deferred when + resolution is complete + """ + + def __init__(self, receiver, deferred): + self._receiver = receiver + self._deferred = deferred + + def resolutionBegan(self, resolutionInProgress): + self._receiver.resolutionBegan(resolutionInProgress) + + def addressResolved(self, address): + self._receiver.addressResolved(address) + + def resolutionComplete(self): + self._deferred.callback(()) + self._receiver.resolutionComplete() From 02491e009d64b0458abcde188e021bbf5a288053 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 9 Apr 2019 17:18:21 +0100 Subject: [PATCH 021/429] Newsfile --- changelog.d/5037.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5037.bugfix diff --git a/changelog.d/5037.bugfix b/changelog.d/5037.bugfix new file mode 100644 index 0000000000..c3af418ddf --- /dev/null +++ b/changelog.d/5037.bugfix @@ -0,0 +1 @@ +Workaround bug in twisted where attempting too many concurrent DNS requests could cause it to hang due to running out of file descriptors. From 329688c161b98a17d04806e461754eedd61c9a7f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 10 Apr 2019 07:23:48 +0100 Subject: [PATCH 022/429] Fix disappearing exceptions in manhole. (#5035) Avoid sending syntax errors from the manhole to sentry. --- changelog.d/5035.bugfix | 1 + synapse/util/manhole.py | 59 +++++++++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5035.bugfix diff --git a/changelog.d/5035.bugfix b/changelog.d/5035.bugfix new file mode 100644 index 0000000000..85e154027e --- /dev/null +++ b/changelog.d/5035.bugfix @@ -0,0 +1 @@ +Fix disappearing exceptions in manhole. diff --git a/synapse/util/manhole.py b/synapse/util/manhole.py index 9cb7e9c9ab..628a2962d9 100644 --- a/synapse/util/manhole.py +++ b/synapse/util/manhole.py @@ -1,4 +1,5 @@ # Copyright 2016 OpenMarket Ltd +# Copyright 2019 New Vector Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,10 +12,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import sys +import traceback from twisted.conch import manhole_ssh from twisted.conch.insults import insults -from twisted.conch.manhole import ColoredManhole +from twisted.conch.manhole import ColoredManhole, ManholeInterpreter from twisted.conch.ssh.keys import Key from twisted.cred import checkers, portal @@ -79,7 +82,7 @@ def manhole(username, password, globals): rlm = manhole_ssh.TerminalRealm() rlm.chainedProtocolFactory = lambda: insults.ServerProtocol( - ColoredManhole, + SynapseManhole, dict(globals, __name__="__console__") ) @@ -88,3 +91,55 @@ def manhole(username, password, globals): factory.privateKeys[b'ssh-rsa'] = Key.fromString(PRIVATE_KEY) return factory + + +class SynapseManhole(ColoredManhole): + """Overrides connectionMade to create our own ManholeInterpreter""" + def connectionMade(self): + super(SynapseManhole, self).connectionMade() + + # replace the manhole interpreter with our own impl + self.interpreter = SynapseManholeInterpreter(self, self.namespace) + + # this would also be a good place to add more keyHandlers. + + +class SynapseManholeInterpreter(ManholeInterpreter): + def showsyntaxerror(self, filename=None): + """Display the syntax error that just occurred. + + Overrides the base implementation, ignoring sys.excepthook. We always want + any syntax errors to be sent to the terminal, rather than sentry. + """ + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + if filename and type is SyntaxError: + # Work hard to stuff the correct filename in the exception + try: + msg, (dummy_filename, lineno, offset, line) = value.args + except ValueError: + # Not the format we expect; leave it alone + pass + else: + # Stuff in the right filename + value = SyntaxError(msg, (filename, lineno, offset, line)) + sys.last_value = value + lines = traceback.format_exception_only(type, value) + self.write(''.join(lines)) + + def showtraceback(self): + """Display the exception that just occurred. + + Overrides the base implementation, ignoring sys.excepthook. We always want + any syntax errors to be sent to the terminal, rather than sentry. + """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb + try: + # We remove the first stack item because it is our own code. + lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) + self.write(''.join(lines)) + finally: + last_tb = ei = None From caa76e6021b1a8b3e6da0c2a8b1935b8dc96ed8f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 11 Apr 2019 17:08:13 +0100 Subject: [PATCH 023/429] Remove periods from copyright headers (#5046) --- changelog.d/5046.misc | 1 + synapse/api/constants.py | 2 +- synapse/api/errors.py | 2 +- synapse/api/urls.py | 2 +- synapse/config/saml2_config.py | 2 +- synapse/crypto/keyring.py | 2 +- synapse/events/spamcheck.py | 2 +- synapse/rest/media/v1/_base.py | 2 +- synapse/rest/well_known.py | 2 +- synapse/util/async_helpers.py | 2 +- tests/crypto/test_keyring.py | 2 +- tests/util/test_linearizer.py | 2 +- 12 files changed, 12 insertions(+), 11 deletions(-) create mode 100644 changelog.d/5046.misc diff --git a/changelog.d/5046.misc b/changelog.d/5046.misc new file mode 100644 index 0000000000..eb966a5ae6 --- /dev/null +++ b/changelog.d/5046.misc @@ -0,0 +1 @@ +Remove extraneous period from copyright headers. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index dd373fa4b8..0860b75905 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017 Vector Creations Ltd -# Copyright 2018 New Vector Ltd. +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 4c33450e7f..ff89259dec 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd. +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 8102176653..cb71d80875 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd. +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/config/saml2_config.py b/synapse/config/saml2_config.py index 39b9eb29c2..aa6eac271f 100644 --- a/synapse/config/saml2_config.py +++ b/synapse/config/saml2_config.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd. +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 834b107705..ed2e994437 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2017, 2018 New Vector Ltd. +# Copyright 2017, 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 633e068eb8..6058077f75 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd. +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 953d89bd82..e2b5df701d 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2019 New Vector Ltd. +# Copyright 2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index c0a4ae93e5..ab901e63f2 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2018 New Vector Ltd. +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index f0e4a0e10c..2f16f23d91 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd. +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 9af0656a83..f5bd7a1aa1 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2017 New Vector Ltd. +# Copyright 2017 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/util/test_linearizer.py b/tests/util/test_linearizer.py index 61a55b461b..ec7ba9719c 100644 --- a/tests/util/test_linearizer.py +++ b/tests/util/test_linearizer.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd. +# Copyright 2018 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 6b0ddf8ee56e56bf02be62194a0f7e8838fa9e07 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 13 Apr 2019 13:10:46 +0100 Subject: [PATCH 024/429] update grafana dashboard --- contrib/grafana/synapse.json | 5212 +--------------------------------- 1 file changed, 1 insertion(+), 5211 deletions(-) diff --git a/contrib/grafana/synapse.json b/contrib/grafana/synapse.json index dc3f4a1d1c..4abeb7d392 100644 --- a/contrib/grafana/synapse.json +++ b/contrib/grafana/synapse.json @@ -1,5211 +1 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "5.2.4" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph", - "version": "5.0.0" - }, - { - "type": "panel", - "id": "heatmap", - "name": "Heatmap", - "version": "5.0.0" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "5.0.0" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "$datasource", - "enable": false, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "limit": 100, - "name": "Annotations & Alerts", - "showIn": 0, - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "iteration": 1537878047048, - "links": [ - { - "asDropdown": true, - "icon": "external link", - "keepTime": true, - "tags": [ - "matrix" - ], - "title": "Dashboards", - "type": "dashboards" - } - ], - "panels": [ - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 73, - "panels": [], - "title": "Overview", - "type": "row" - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 1 - }, - "id": 75, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} ", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "CPU usage", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "percentunit", - "label": null, - "logBase": 1, - "max": "1", - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cards": { - "cardPadding": 0, - "cardRound": null - }, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateSpectral", - "exponent": 0.5, - "mode": "spectrum" - }, - "dataFormat": "tsbuckets", - "datasource": "$datasource", - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 1 - }, - "heatmap": {}, - "highlightCards": true, - "id": 85, - "legend": { - "show": false - }, - "links": [], - "targets": [ - { - "expr": "sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)", - "format": "heatmap", - "intervalFactor": 1, - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "title": "Event Send Time", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": null, - "format": "s", - "logBase": 2, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 10 - }, - "id": 33, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "", - "refId": "A", - "step": 20, - "target": "" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Events Persisted", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 17 - }, - "id": 54, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 18 - }, - "id": 34, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": true, - "targets": [ - { - "expr": "process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}} {{index}}", - "refId": "A", - "step": 20, - "target": "" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Memory", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "bytes", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 18 - }, - "id": 37, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/max$/", - "color": "#890F02", - "fill": 0, - "legend": false - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "hide": false, - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A", - "step": 20 - }, - { - "expr": "process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "hide": true, - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} max", - "refId": "B", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Open FDs", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 25 - }, - "id": 50, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Avg reactor tick time", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 25 - }, - "id": 105, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} 99%", - "refId": "A", - "step": 20 - }, - { - "expr": "histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 95%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} 90%", - "refId": "C" - }, - { - "expr": "", - "format": "time_series", - "intervalFactor": 1, - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Reactor tick quantiles", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 32 - }, - "id": 53, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Up", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 32 - }, - "id": 49, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/^up/", - "legend": false, - "yaxis": 2 - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Prometheus scrape time", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "decimals": 0, - "format": "none", - "label": "", - "logBase": 1, - "max": "0", - "min": "-1", - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 39 - }, - "id": 5, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [ - { - "alias": "/user/" - }, - { - "alias": "/system/" - } - ], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} system ", - "metric": "", - "refId": "B", - "step": 20 - }, - { - "expr": "rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} user", - "refId": "A", - "step": 20 - } - ], - "thresholds": [ - { - "colorMode": "custom", - "line": true, - "lineColor": "rgba(216, 200, 27, 0.27)", - "op": "gt", - "value": 0.5 - }, - { - "colorMode": "custom", - "line": true, - "lineColor": "rgba(234, 112, 112, 0.22)", - "op": "gt", - "value": 0.8 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "CPU", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "percentunit", - "label": "", - "logBase": 1, - "max": "1.2", - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Process info", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 18 - }, - "id": 56, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 47 - }, - "id": 40, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "D" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Events/s Local vs Remote", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": "", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 47 - }, - "id": 46, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "instant": false, - "intervalFactor": 2, - "legendFormat": "{{type}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Events/s by Type", - "tooltip": { - "shared": false, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": { - "irc-freenode (local)": "#EAB839" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 54 - }, - "id": 44, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{origin_entity}} ({{origin_type}})", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Events/s by Origin", - "tooltip": { - "shared": false, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": 1, - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 54 - }, - "id": 45, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\"}[$bucket_size])) by (origin_type, origin_entity)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{origin_entity}} ({{origin_type}})", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Memberships/s by Origin", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Event persist rates", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 19 - }, - "id": 57, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, - "fill": 2, - "grid": {}, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 62 - }, - "id": 4, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [ - { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(216, 200, 27, 0.27)", - "op": "gt", - "value": 100 - }, - { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(234, 112, 112, 0.22)", - "op": "gt", - "value": 250 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Request Count by arrival time", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 62 - }, - "id": 32, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{method}} {{servlet}} {{job}}-{{index}}", - "refId": "A", - "step": 20, - "target": "" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Top 10 Request Counts", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, - "fill": 2, - "grid": {}, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 70 - }, - "id": 23, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [ - { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(216, 200, 27, 0.27)", - "op": "gt", - "value": 100, - "yaxis": "left" - }, - { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(234, 112, 112, 0.22)", - "op": "gt", - "value": 250, - "yaxis": "left" - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Total CPU Usage by Endpoint", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, - "fill": 2, - "grid": {}, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 70 - }, - "id": 52, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": true, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "(rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_response_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [ - { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(216, 200, 27, 0.27)", - "op": "gt", - "value": 100 - }, - { - "colorMode": "custom", - "fill": true, - "fillColor": "rgba(234, 112, 112, 0.22)", - "op": "gt", - "value": 250 - } - ], - "timeFrom": null, - "timeShift": null, - "title": "Average CPU Usage by Endpoint", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 78 - }, - "id": 7, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_http_server_response_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "DB Usage by endpoint", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": null, - "editable": true, - "error": false, - "fill": 2, - "grid": {}, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 78 - }, - "id": 47, - "legend": { - "alignAsTable": true, - "avg": true, - "current": false, - "hideEmpty": false, - "hideZero": true, - "max": true, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": true - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])/rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}} {{tag}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Non-sync avg response time", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 86 - }, - "id": 103, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{method}} {{servlet}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Requests in flight", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Requests", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 20 - }, - "id": 97, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 49 - }, - "id": 99, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{name}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "CPU usage by background jobs", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 49 - }, - "id": 101, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{name}}", - "refId": "A" - }, - { - "expr": "", - "format": "time_series", - "intervalFactor": 1, - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "DB usage by background jobs (including scheduling time)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "title": "Background jobs", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 21 - }, - "id": 81, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 64 - }, - "id": 79, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_federation_client_sent_transactions{instance=\"$instance\", job=~\"$job\", index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "txn rate", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Outgoing federation transaction rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 64 - }, - "id": 83, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_federation_server_received_pdus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "pdus", - "refId": "A" - }, - { - "expr": "rate(synapse_federation_server_received_edus{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "edus", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Incoming PDU/EDU rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "title": "Federation", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 22 - }, - "id": 60, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 65 - }, - "id": 51, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_push_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "processed {{job}}", - "refId": "A", - "step": 20 - }, - { - "expr": "rate(synapse_push_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "failed {{job}}", - "refId": "B", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "HTTP Push rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Pushes", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 23 - }, - "id": 58, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 24 - }, - "id": 48, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Avg time waiting for db conn", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "s", - "label": "", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "description": "Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 24 - }, - "id": 104, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{job}} {{index}} 99%", - "refId": "A", - "step": 20 - }, - { - "expr": "histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}} {{index}} 95%", - "refId": "B" - }, - { - "expr": "histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}} {{index}} 90%", - "refId": "C" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Db scheduling time quantiles", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "s", - "label": "", - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 0, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 31 - }, - "id": 10, - "legend": { - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{desc}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Top DB transactions by txn rate", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 31 - }, - "id": 11, - "legend": { - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": true, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": true, - "targets": [ - { - "expr": "rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "instant": false, - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{desc}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Top DB transactions by total txn time", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Database", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 24 - }, - "id": 59, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 13, - "w": 12, - "x": 0, - "y": 60 - }, - "id": 12, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{block_name}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Total CPU Usage by Block", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 13, - "w": 12, - "x": 12, - "y": 60 - }, - "id": 26, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{block_name}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average CPU Time per Block", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 13, - "w": 12, - "x": 0, - "y": 73 - }, - "id": 13, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}} {{block_name}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Total DB Usage by Block", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "percentunit", - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 13, - "w": 12, - "x": 12, - "y": 73 - }, - "id": 27, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{block_name}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average Database Time per Block", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 13, - "w": 12, - "x": 0, - "y": 86 - }, - "id": 28, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{block_name}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average Transactions per Block", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "none", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 13, - "w": 12, - "x": 12, - "y": 86 - }, - "id": 25, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{block_name}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average Wallclock Time per Block", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Per-block metrics", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 25 - }, - "id": 61, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": 2, - "editable": true, - "error": false, - "fill": 0, - "grid": {}, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 68 - }, - "id": 1, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{name}} {{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Cache Hit Ratio", - "tooltip": { - "msResolution": true, - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "percentunit", - "label": "", - "logBase": 1, - "max": "1", - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": false - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 10, - "w": 12, - "x": 12, - "y": 68 - }, - "id": 8, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideZero": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "hide": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{name}} {{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Cache Size", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 10, - "w": 12, - "x": 0, - "y": 78 - }, - "id": 38, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "hideZero": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{name}} {{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Cache request rate", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "rps", - "logBase": 1, - "max": null, - "min": 0, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 10, - "w": 12, - "x": 12, - "y": 78 - }, - "id": 39, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{name}} {{job}}-{{index}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Top 10 cache misses", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "rps", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 88 - }, - "id": 65, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{name}} {{job}}-{{index}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Cache eviction rate", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "transparent": false, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "hertz", - "label": "entries / second", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Caches", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 26 - }, - "id": 62, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 27 - }, - "id": 91, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": true, - "steppedLine": false, - "targets": [ - { - "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])", - "format": "time_series", - "instant": false, - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} gen {{gen}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Total GC time by bucket (10m smoothing)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "percentunit", - "label": null, - "logBase": 1, - "max": null, - "min": "0", - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "decimals": 3, - "editable": true, - "error": false, - "fill": 1, - "grid": {}, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 27 - }, - "id": 21, - "legend": { - "alignAsTable": true, - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "nullPointMode": "null as zero", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}} {{index}} gen {{gen}} ", - "refId": "A", - "step": 20, - "target": "" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Average GC Time Per Collection", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "cumulative" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "s", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 36 - }, - "id": 89, - "legend": { - "avg": false, - "current": false, - "hideEmpty": true, - "hideZero": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} gen {{gen}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Currently allocated objects", - "tooltip": { - "shared": false, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 36 - }, - "id": 93, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} gen {{gen}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Object counts per collection", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 45 - }, - "id": 95, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} gen {{gen}}", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "GC frequency", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "cards": { - "cardPadding": 0, - "cardRound": null - }, - "color": { - "cardColor": "#b4ff00", - "colorScale": "sqrt", - "colorScheme": "interpolateSpectral", - "exponent": 0.5, - "max": null, - "min": 0, - "mode": "spectrum" - }, - "dataFormat": "tsbuckets", - "datasource": "${DS_PROMETHEUS}", - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 45 - }, - "heatmap": {}, - "highlightCards": true, - "id": 87, - "legend": { - "show": true - }, - "links": [], - "targets": [ - { - "expr": "sum(rate(python_gc_time_bucket[$bucket_size])) by (le)", - "format": "heatmap", - "intervalFactor": 1, - "legendFormat": "{{le}}", - "refId": "A" - } - ], - "title": "GC durations", - "tooltip": { - "show": true, - "showHistogram": false - }, - "type": "heatmap", - "xAxis": { - "show": true - }, - "xBucketNumber": null, - "xBucketSize": null, - "yAxis": { - "decimals": null, - "format": "s", - "logBase": 1, - "max": null, - "min": null, - "show": true, - "splitFactor": null - }, - "yBucketBound": "auto", - "yBucketNumber": null, - "yBucketSize": null - } - ], - "repeat": null, - "title": "GC", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 27 - }, - "id": 63, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 97 - }, - "id": 2, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "user started/stopped syncing", - "refId": "A", - "step": 20 - }, - { - "expr": "rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "federation ack", - "refId": "B", - "step": 20 - }, - { - "expr": "rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "remove pusher", - "refId": "C", - "step": 20 - }, - { - "expr": "rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "invalidate cache", - "refId": "D", - "step": 20 - }, - { - "expr": "rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "user ip cache", - "refId": "E", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Rate of events on replication master", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 97 - }, - "id": 41, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])", - "format": "time_series", - "interval": "", - "intervalFactor": 2, - "legendFormat": "{{stream_name}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Outgoing stream updates", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 104 - }, - "id": 42, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{command}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Rate of incoming commands", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 104 - }, - "id": 43, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)", - "format": "time_series", - "intervalFactor": 2, - "legendFormat": "{{job}}-{{index}} {{command}}", - "refId": "A", - "step": 20 - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Rate of outgoing commands", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "hertz", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "repeat": null, - "title": "Replication", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 28 - }, - "id": 69, - "panels": [ - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 29 - }, - "id": 67, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": " synapse_event_persisted_position{instance=\"$instance\",job=\"synapse\"} - ignoring(index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} ", - "refId": "A" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Event processing lag", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "short", - "label": "events", - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - }, - { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": "$datasource", - "fill": 1, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 29 - }, - "id": 71, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "connected", - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "expr": "time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}", - "format": "time_series", - "hide": false, - "intervalFactor": 1, - "legendFormat": "{{job}}-{{index}} {{name}}", - "refId": "B" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Age of last processed event", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "buckets": null, - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": null - } - } - ], - "title": "Event processing loop positions", - "type": "row" - } - ], - "refresh": "1m", - "schemaVersion": 16, - "style": "dark", - "tags": [ - "matrix" - ], - "templating": { - "list": [ - { - "current": { - "text": "Prometheus", - "value": "Prometheus" - }, - "hide": 0, - "label": null, - "name": "datasource", - "options": [], - "query": "prometheus", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "allFormat": "glob", - "auto": true, - "auto_count": 100, - "auto_min": "30s", - "current": { - "text": "auto", - "value": "$__auto_interval_bucket_size" - }, - "datasource": null, - "hide": 0, - "includeAll": false, - "label": "Bucket Size", - "multi": false, - "multiFormat": "glob", - "name": "bucket_size", - "options": [ - { - "selected": true, - "text": "auto", - "value": "$__auto_interval_bucket_size" - }, - { - "selected": false, - "text": "30s", - "value": "30s" - }, - { - "selected": false, - "text": "1m", - "value": "1m" - }, - { - "selected": false, - "text": "2m", - "value": "2m" - }, - { - "selected": false, - "text": "5m", - "value": "5m" - }, - { - "selected": false, - "text": "10m", - "value": "10m" - }, - { - "selected": false, - "text": "15m", - "value": "15m" - } - ], - "query": "30s,1m,2m,5m,10m,15m", - "refresh": 2, - "type": "interval" - }, - { - "allValue": null, - "current": {}, - "datasource": "$datasource", - "hide": 0, - "includeAll": false, - "label": null, - "multi": false, - "name": "instance", - "options": [], - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, instance)", - "refresh": 2, - "regex": "", - "sort": 0, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allFormat": "regex wildcard", - "allValue": "", - "current": {}, - "datasource": "$datasource", - "hide": 0, - "hideLabel": false, - "includeAll": true, - "label": "Job", - "multi": true, - "multiFormat": "regex values", - "name": "job", - "options": [], - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, job)", - "refresh": 2, - "refresh_on_load": false, - "regex": "", - "sort": 1, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - }, - { - "allFormat": "regex wildcard", - "allValue": ".*", - "current": {}, - "datasource": "$datasource", - "hide": 0, - "hideLabel": false, - "includeAll": true, - "label": "", - "multi": true, - "multiFormat": "regex values", - "name": "index", - "options": [], - "query": "label_values(synapse_util_metrics_block_ru_utime_seconds, index)", - "refresh": 2, - "refresh_on_load": false, - "regex": "", - "sort": 3, - "tagValuesQuery": "", - "tags": [], - "tagsQuery": "", - "type": "query", - "useTags": false - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "now": true, - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "Synapse", - "uid": "000000012", - "version": 3 -} \ No newline at end of file +{"annotations":{"list":[{"builtIn":1,"datasource":"$datasource","enable":false,"hide":true,"iconColor":"rgba(0, 211, 255, 1)","limit":100,"name":"Annotations \u0026 Alerts","showIn":0,"type":"dashboard"}]},"editable":true,"gnetId":null,"graphTooltip":0,"id":4,"iteration":1554391266679,"links":[{"asDropdown":true,"icon":"external link","keepTime":true,"tags":["matrix"],"title":"Dashboards","type":"dashboards"}],"panels":[{"collapsed":false,"gridPos":{"h":1,"w":24,"x":0,"y":0},"id":73,"panels":[],"title":"Overview","type":"row"},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":1},"id":75,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(process_cpu_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} ","refId":"A"}],"thresholds":[{"colorMode":"critical","fill":true,"line":true,"op":"gt","value":1,"yaxis":"left"}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU usage","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":null,"logBase":1,"max":"1.5","min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":9,"w":12,"x":12,"y":1},"id":33,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":false,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_storage_events_persisted_events{instance=\"$instance\"}[$bucket_size])) without (job,index)","format":"time_series","intervalFactor":2,"legendFormat":"","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events Persisted","tooltip":{"shared":true,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"cards":{"cardPadding":0,"cardRound":null},"color":{"cardColor":"#b4ff00","colorScale":"sqrt","colorScheme":"interpolateSpectral","exponent":0.5,"mode":"spectrum"},"dataFormat":"tsbuckets","datasource":"$datasource","gridPos":{"h":9,"w":12,"x":0,"y":10},"heatmap":{},"hideZeroBuckets":true,"highlightCards":true,"id":85,"legend":{"show":false},"links":[],"reverseYBuckets":false,"targets":[{"expr":"sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\"}[$bucket_size])) by (le)","format":"heatmap","intervalFactor":1,"legendFormat":"{{le}}","refId":"A"}],"title":"Event Send Time","tooltip":{"show":true,"showHistogram":false},"type":"heatmap","xAxis":{"show":true},"xBucketNumber":null,"xBucketSize":null,"yAxis":{"decimals":null,"format":"s","logBase":2,"max":null,"min":null,"show":true,"splitFactor":null},"yBucketBound":"auto","yBucketNumber":null,"yBucketSize":null},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":0,"gridPos":{"h":9,"w":12,"x":12,"y":10},"id":107,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","repeat":null,"repeatDirection":"h","seriesOverrides":[{"alias":"mean","linewidth":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"99%","refId":"A"},{"expr":"histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","intervalFactor":1,"legendFormat":"95%","refId":"B"},{"expr":"histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","intervalFactor":1,"legendFormat":"90%","refId":"C"},{"expr":"histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method))","format":"time_series","intervalFactor":1,"legendFormat":"50%","refId":"D"},{"expr":"sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\"}[$bucket_size])) without (job, index, method)","format":"time_series","intervalFactor":1,"legendFormat":"mean","refId":"E"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Event send time quantiles","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":0,"gridPos":{"h":9,"w":12,"x":0,"y":19},"id":118,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","repeatDirection":"h","seriesOverrides":[{"alias":"mean","linewidth":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 99%","refId":"A"},{"expr":"histogram_quantile(0.95, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 95%","refId":"B"},{"expr":"histogram_quantile(0.90, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 90%","refId":"C"},{"expr":"histogram_quantile(0.50, sum(rate(synapse_http_server_response_time_seconds_bucket{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 50%","refId":"D"},{"expr":"sum(rate(synapse_http_server_response_time_seconds_sum{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method) / sum(rate(synapse_http_server_response_time_seconds_count{servlet='RoomSendEventRestServlet',instance=\"$instance\",code=~\"2..\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (method)","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} mean","refId":"E"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Event send time quantiles by worker","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":28},"id":54,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":7,"w":12,"x":0,"y":29},"id":5,"legend":{"alignAsTable":false,"avg":false,"current":false,"hideEmpty":false,"hideZero":false,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/user/"},{"alias":"/system/"}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(process_cpu_system_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} system ","metric":"","refId":"B","step":20},{"expr":"rate(process_cpu_user_seconds_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","hide":false,"interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} user","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fillColor":"rgba(255, 255, 255, 1)","line":true,"lineColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":0.5},{"colorMode":"custom","fillColor":"rgba(255, 255, 255, 1)","line":true,"lineColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":0.8}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":"","logBase":1,"max":"1.2","min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":29},"id":37,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/max$/","color":"#890F02","fill":0,"legend":false}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"process_open_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":false,"intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20},{"expr":"process_max_fds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":true,"intervalFactor":2,"legendFormat":"{{job}}-{{index}} max","refId":"B","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Open FDs","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"none","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":0,"grid":{},"gridPos":{"h":7,"w":12,"x":0,"y":36},"id":34,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"process_resident_memory_bytes{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","intervalFactor":2,"legendFormat":"{{job}} {{index}}","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Memory","tooltip":{"shared":true,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"bytes","logBase":1,"max":null,"min":"0","show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","description":"Shows the time in which the given percentage of reactor ticks completed, over the sampled timespan","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":36},"id":105,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} 99%","refId":"A","step":20},{"expr":"histogram_quantile(0.95, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 95%","refId":"B"},{"expr":"histogram_quantile(0.90, rate(python_twisted_reactor_tick_time_bucket{index=~\"$index\",instance=\"$instance\",job=~\"$job\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} 90%","refId":"C"},{"expr":"","format":"time_series","intervalFactor":1,"refId":"D"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Reactor tick quantiles","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":43},"id":50,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_twisted_reactor_tick_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_twisted_reactor_tick_time_count[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Avg reactor tick time","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":43},"id":49,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/^up/","legend":false,"yaxis":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"scrape_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Prometheus scrape time","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","label":null,"logBase":1,"max":null,"min":"0","show":true},{"decimals":0,"format":"none","label":"","logBase":1,"max":"0","min":"-1","show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":0,"gridPos":{"h":7,"w":12,"x":0,"y":50},"id":53,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"min_over_time(up{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Up","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":50},"id":120,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":2,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":true,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","instant":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A"},{"expr":"rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","instant":false,"interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"B"}],"thresholds":[{"colorMode":"critical","fill":true,"line":true,"op":"gt","value":1,"yaxis":"left"}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Stacked CPU usage","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Process info","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":29},"id":56,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":58},"id":40,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_events_persisted_by_source_type{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{type}}","refId":"D"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events/s Local vs Remote","tooltip":{"shared":true,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":"","logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":58},"id":46,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_events_persisted_by_event_type{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","instant":false,"intervalFactor":2,"legendFormat":"{{type}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events/s by Type","tooltip":{"shared":false,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{"irc-freenode (local)":"#EAB839"},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":65},"id":44,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_events_persisted_by_origin{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{origin_entity}} ({{origin_type}})","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Events/s by Origin","tooltip":{"shared":false,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":1,"fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":65},"id":45,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_storage_events_persisted_events_sep{job=~\"$job\",index=~\"$index\", type=\"m.room.member\",instance=\"$instance\", origin_type=\"local\"}[$bucket_size])) by (origin_type, origin_entity)","format":"time_series","intervalFactor":2,"legendFormat":"{{origin_entity}} ({{origin_type}})","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Memberships/s by Origin","tooltip":{"shared":true,"sort":2,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Event persist rates","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":30},"id":57,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":0,"y":67},"id":4,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":false,"hideZero":true,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fill":true,"fillColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":100},{"colorMode":"custom","fill":true,"fillColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":250}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Request Count by arrival time","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":8,"w":12,"x":12,"y":67},"id":32,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",index=~\"$index\",method!=\"OPTIONS\"}[$bucket_size]) and topk(10,synapse_http_server_requests_received{instance=\"$instance\",job=~\"$job\",method!=\"OPTIONS\"})","format":"time_series","intervalFactor":2,"legendFormat":"{{method}} {{servlet}} {{job}}-{{index}}","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top 10 Request Counts","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":0,"y":75},"id":23,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":false,"hideZero":true,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fill":true,"fillColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":100,"yaxis":"left"},{"colorMode":"custom","fill":true,"fillColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":250,"yaxis":"left"}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total CPU Usage by Endpoint","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":12,"y":75},"id":52,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":false,"hideZero":true,"max":false,"min":false,"rightSide":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"(rate(synapse_http_server_response_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_http_server_response_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) / rate(synapse_http_server_response_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[{"colorMode":"custom","fill":true,"fillColor":"rgba(216, 200, 27, 0.27)","op":"gt","value":100},{"colorMode":"custom","fill":true,"fillColor":"rgba(234, 112, 112, 0.22)","op":"gt","value":250}],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average CPU Usage by Endpoint","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":8,"w":12,"x":0,"y":83},"id":7,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_server_response_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}} {{tag}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"DB Usage by endpoint","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":null,"editable":true,"error":false,"fill":2,"grid":{},"gridPos":{"h":8,"w":12,"x":12,"y":83},"id":47,"legend":{"alignAsTable":true,"avg":true,"current":false,"hideEmpty":false,"hideZero":true,"max":true,"min":false,"rightSide":false,"show":true,"total":false,"values":true},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"(sum(rate(synapse_http_server_response_time_seconds_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))/(sum(rate(synapse_http_server_response_time_seconds_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\",tag!=\"incremental_sync\"}[$bucket_size])) without (code))","format":"time_series","hide":false,"interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Non-sync avg response time","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":91},"id":103,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"topk(10,synapse_http_server_in_flight_requests_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"})","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{method}} {{servlet}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Requests in flight","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Requests","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":31},"id":97,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":32},"id":99,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_background_process_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])+rate(synapse_background_process_ru_stime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"CPU usage by background jobs","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":32},"id":101,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_background_process_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_background_process_db_sched_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","hide":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"A"},{"expr":"","format":"time_series","intervalFactor":1,"refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"DB usage by background jobs (including scheduling time)","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"title":"Background jobs","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":32},"id":81,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":61},"id":79,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_federation_client_sent_transactions{instance=\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"txn rate","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing federation transaction rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":61},"id":83,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_federation_server_received_pdus{instance=~\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"pdus","refId":"A"},{"expr":"sum(rate(synapse_federation_server_received_edus{instance=~\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"edus","refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Incoming PDU/EDU rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":70},"id":109,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum(rate(synapse_federation_client_sent_pdu_destinations:total{instance=\"$instance\"}[$bucket_size]))","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"pdus","refId":"A"},{"expr":"sum(rate(synapse_federation_client_sent_edus{instance=\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"edus","refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing PDU/EDU rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":70},"id":111,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_federation_client_sent_edus_by_type{instance=\"$instance\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{type}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing EDUs by type","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"title":"Federation","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":33},"id":60,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":62},"id":51,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_http_httppusher_http_pushes_processed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) \u003e 0","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"processed {{job}}","refId":"A","step":20},{"expr":"rate(synapse_http_httppusher_http_pushes_failed{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) and on (instance, job, index) (synapse_http_httppusher_http_pushes_failed + synapse_http_httppusher_http_pushes_processed) \u003e 0","format":"time_series","intervalFactor":2,"legendFormat":"failed {{job}}","refId":"B","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"HTTP Push rate","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Pushes","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":34},"id":58,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":63},"id":48,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Avg time waiting for db conn","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"s","label":"","logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","description":"Shows the time in which the given percentage of database queries were scheduled, over the sampled timespan","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":63},"id":104,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"histogram_quantile(0.99, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","hide":false,"intervalFactor":1,"legendFormat":"{{job}} {{index}} 99%","refId":"A","step":20},{"expr":"histogram_quantile(0.95, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}} {{index}} 95%","refId":"B"},{"expr":"histogram_quantile(0.90, rate(synapse_storage_schedule_time_bucket{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","intervalFactor":1,"legendFormat":"{{job}} {{index}} 90%","refId":"C"},{"expr":"rate(synapse_storage_schedule_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(synapse_storage_schedule_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}} {{index}} mean","refId":"D"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Db scheduling time quantiles","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"s","label":"","logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":0,"grid":{},"gridPos":{"h":7,"w":12,"x":0,"y":70},"id":10,"legend":{"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"topk(10, rate(synapse_storage_transaction_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]))","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{desc}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top DB transactions by txn rate","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":7,"w":12,"x":12,"y":70},"id":11,"legend":{"avg":false,"current":false,"hideEmpty":true,"hideZero":true,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":true,"steppedLine":true,"targets":[{"expr":"rate(synapse_storage_transaction_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","instant":false,"interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{desc}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top DB transactions by total txn time","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Database","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":35},"id":59,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":0,"y":36},"id":12,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total CPU Usage by Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":12,"y":36},"id":26,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"(rate(synapse_util_metrics_block_ru_utime_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) + rate(synapse_util_metrics_block_ru_stime_seconds[$bucket_size])) / rate(synapse_util_metrics_block_count[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average CPU Time per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":0,"y":49},"id":13,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\",block_name!=\"wrapped_request_handler\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total DB Usage by Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"percentunit","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":12,"y":49},"id":27,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average Database Time per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":0,"y":62},"id":28,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":false,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_db_txn_duration_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_db_txn_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average Transactions per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"none","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":13,"w":12,"x":12,"y":62},"id":25,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":false,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_metrics_block_time_seconds{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size]) / rate(synapse_util_metrics_block_count[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{block_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average Wallclock Time per Block","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Per-block metrics","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":36},"id":61,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":2,"editable":true,"error":false,"fill":0,"grid":{},"gridPos":{"h":10,"w":12,"x":0,"y":37},"id":1,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideEmpty":true,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_caches_cache:hits{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])/rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache Hit Ratio","tooltip":{"msResolution":true,"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":"","logBase":1,"max":"1","min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":false}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":10,"w":12,"x":12,"y":37},"id":8,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"connected","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"synapse_util_caches_cache:size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":false,"interval":"","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache Size","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":10,"w":12,"x":0,"y":47},"id":38,"legend":{"alignAsTable":true,"avg":false,"current":false,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"connected","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_caches_cache:total{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache request rate","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"rps","logBase":1,"max":null,"min":0,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":10,"w":12,"x":12,"y":47},"id":39,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"topk(10, rate(synapse_util_caches_cache:total{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size]) - rate(synapse_util_caches_cache:hits{job=\"$job\",instance=\"$instance\"}[$bucket_size]))","format":"time_series","intervalFactor":2,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Top 10 cache misses","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"rps","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":57},"id":65,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_util_caches_cache:evicted_size{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{name}} {{job}}-{{index}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Cache eviction rate","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"hertz","label":"entries / second","logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Caches","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":37},"id":62,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":66},"id":91,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":true,"steppedLine":false,"targets":[{"expr":"rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[10m])","format":"time_series","instant":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Total GC time by bucket (10m smoothing)","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"decimals":null,"format":"percentunit","label":null,"logBase":1,"max":null,"min":"0","show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","decimals":3,"editable":true,"error":false,"fill":1,"grid":{},"gridPos":{"h":9,"w":12,"x":12,"y":66},"id":21,"legend":{"alignAsTable":true,"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":2,"links":[],"nullPointMode":"null as zero","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_gc_time_sum{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"{{job}} {{index}} gen {{gen}} ","refId":"A","step":20,"target":""}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Average GC Time Per Collection","tooltip":{"shared":false,"sort":0,"value_type":"cumulative"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"s","logBase":1,"max":null,"min":null,"show":true},{"format":"short","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","description":"'gen 0' shows the number of objects allocated since the last gen0 GC.\n'gen 1' / 'gen 2' show the number of gen0/gen1 GCs since the last gen1/gen2 GC.","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":75},"id":89,"legend":{"avg":false,"current":false,"hideEmpty":true,"hideZero":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[{"alias":"/gen 0$/","yaxis":2}],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"python_gc_counts{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Allocation counts","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":"Gen N-1 GCs since last Gen N GC","logBase":1,"max":null,"min":null,"show":true},{"decimals":null,"format":"short","label":"Objects since last Gen 0 GC","logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":75},"id":93,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"connected","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_gc_unreachable_total{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])/rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Object counts per collection","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":84},"id":95,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(python_gc_time_count{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} gen {{gen}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"GC frequency","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"cards":{"cardPadding":0,"cardRound":null},"color":{"cardColor":"#b4ff00","colorScale":"sqrt","colorScheme":"interpolateSpectral","exponent":0.5,"max":null,"min":0,"mode":"spectrum"},"dataFormat":"tsbuckets","datasource":"Prometheus","gridPos":{"h":9,"w":12,"x":12,"y":84},"heatmap":{},"hideZeroBuckets":true,"highlightCards":true,"id":87,"legend":{"show":true},"links":[],"reverseYBuckets":false,"targets":[{"expr":"sum(rate(python_gc_time_bucket[$bucket_size])) by (le)","format":"heatmap","intervalFactor":1,"legendFormat":"{{le}}","refId":"A"}],"title":"GC durations","tooltip":{"show":true,"showHistogram":false},"type":"heatmap","xAxis":{"show":true},"xBucketNumber":null,"xBucketSize":null,"yAxis":{"decimals":null,"format":"s","logBase":1,"max":null,"min":null,"show":true,"splitFactor":null},"yBucketBound":"auto","yBucketNumber":null,"yBucketSize":null}],"repeat":null,"title":"GC","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":38},"id":63,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":67},"id":2,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_replication_tcp_resource_user_sync{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"user started/stopped syncing","refId":"A","step":20},{"expr":"rate(synapse_replication_tcp_resource_federation_ack{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"federation ack","refId":"B","step":20},{"expr":"rate(synapse_replication_tcp_resource_remove_pusher{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"remove pusher","refId":"C","step":20},{"expr":"rate(synapse_replication_tcp_resource_invalidate_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"invalidate cache","refId":"D","step":20},{"expr":"rate(synapse_replication_tcp_resource_user_ip_cache{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])","format":"time_series","intervalFactor":2,"legendFormat":"user ip cache","refId":"E","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Rate of events on replication master","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":67},"id":41,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_replication_tcp_resource_stream_updates{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","interval":"","intervalFactor":2,"legendFormat":"{{stream_name}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Outgoing stream updates","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":74},"id":42,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum (rate(synapse_replication_tcp_protocol_inbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{command}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Rate of incoming commands","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":74},"id":43,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"sum (rate(synapse_replication_tcp_protocol_outbound_commands{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}[$bucket_size])) without (name, conn_id)","format":"time_series","intervalFactor":2,"legendFormat":"{{job}}-{{index}} {{command}}","refId":"A","step":20}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Rate of outgoing commands","tooltip":{"shared":false,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":0,"y":81},"id":113,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"synapse_replication_tcp_resource_connections_per_stream{job=~\"$job\",index=~\"$index\",instance=\"$instance\"}","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{stream_name}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Replication connections","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":7,"w":12,"x":12,"y":81},"id":115,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"null","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"rate(synapse_replication_tcp_protocol_close_reason{job=\"$job\",index=~\"$index\",instance=\"$instance\"}[$bucket_size])","format":"time_series","intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{reason_type}}","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Replication connection close reasons","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"hertz","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"repeat":null,"title":"Replication","type":"row"},{"collapsed":true,"gridPos":{"h":1,"w":24,"x":0,"y":39},"id":69,"panels":[{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":0,"y":68},"id":67,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"connected","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":" synapse_event_persisted_position{instance=\"$instance\",job=\"synapse\"} - ignoring(index, job, name) group_right() synapse_event_processing_positions{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","interval":"","intervalFactor":1,"legendFormat":"{{job}}-{{index}} ","refId":"A"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Event processing lag","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"short","label":"events","logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}},{"aliasColors":{},"bars":false,"dashLength":10,"dashes":false,"datasource":"$datasource","fill":1,"gridPos":{"h":9,"w":12,"x":12,"y":68},"id":71,"legend":{"avg":false,"current":false,"max":false,"min":false,"show":true,"total":false,"values":false},"lines":true,"linewidth":1,"links":[],"nullPointMode":"connected","paceLength":10,"percentage":false,"pointradius":5,"points":false,"renderer":"flot","seriesOverrides":[],"spaceLength":10,"stack":false,"steppedLine":false,"targets":[{"expr":"time()*1000-synapse_event_processing_last_ts{instance=\"$instance\",job=~\"$job\",index=~\"$index\"}","format":"time_series","hide":false,"intervalFactor":1,"legendFormat":"{{job}}-{{index}} {{name}}","refId":"B"}],"thresholds":[],"timeFrom":null,"timeRegions":[],"timeShift":null,"title":"Age of last processed event","tooltip":{"shared":true,"sort":0,"value_type":"individual"},"type":"graph","xaxis":{"buckets":null,"mode":"time","name":null,"show":true,"values":[]},"yaxes":[{"format":"ms","label":null,"logBase":1,"max":null,"min":null,"show":true},{"format":"short","label":null,"logBase":1,"max":null,"min":null,"show":true}],"yaxis":{"align":false,"alignLevel":null}}],"title":"Event processing loop positions","type":"row"}],"refresh":"1m","schemaVersion":18,"style":"dark","tags":["matrix"],"templating":{"list":[{"current":{"selected":true,"text":"Prometheus","value":"Prometheus"},"hide":0,"includeAll":false,"label":null,"multi":false,"name":"datasource","options":[],"query":"prometheus","refresh":1,"regex":"","skipUrlSync":false,"type":"datasource"},{"allFormat":"glob","auto":true,"auto_count":100,"auto_min":"30s","current":{"text":"auto","value":"$__auto_interval_bucket_size"},"datasource":null,"hide":0,"includeAll":false,"label":"Bucket Size","multi":false,"multiFormat":"glob","name":"bucket_size","options":[{"selected":true,"text":"auto","value":"$__auto_interval_bucket_size"},{"selected":false,"text":"30s","value":"30s"},{"selected":false,"text":"1m","value":"1m"},{"selected":false,"text":"2m","value":"2m"},{"selected":false,"text":"5m","value":"5m"},{"selected":false,"text":"10m","value":"10m"},{"selected":false,"text":"15m","value":"15m"}],"query":"30s,1m,2m,5m,10m,15m","refresh":2,"skipUrlSync":false,"type":"interval"},{"allValue":null,"current":{"text":"matrix.org","value":"matrix.org"},"datasource":"$datasource","definition":"","hide":0,"includeAll":false,"label":null,"multi":false,"name":"instance","options":[],"query":"label_values(synapse_util_metrics_block_ru_utime_seconds, instance)","refresh":2,"regex":"","skipUrlSync":false,"sort":0,"tagValuesQuery":"","tags":[],"tagsQuery":"","type":"query","useTags":false},{"allFormat":"regex wildcard","allValue":"","current":{"text":"synapse","value":["synapse"]},"datasource":"$datasource","definition":"","hide":0,"hideLabel":false,"includeAll":true,"label":"Job","multi":true,"multiFormat":"regex values","name":"job","options":[],"query":"label_values(synapse_util_metrics_block_ru_utime_seconds, job)","refresh":2,"refresh_on_load":false,"regex":"","skipUrlSync":false,"sort":1,"tagValuesQuery":"","tags":[],"tagsQuery":"","type":"query","useTags":false},{"allFormat":"regex wildcard","allValue":".*","current":{"text":"All","value":"$__all"},"datasource":"$datasource","definition":"","hide":0,"hideLabel":false,"includeAll":true,"label":"","multi":true,"multiFormat":"regex values","name":"index","options":[],"query":"label_values(synapse_util_metrics_block_ru_utime_seconds, index)","refresh":2,"refresh_on_load":false,"regex":"","skipUrlSync":false,"sort":3,"tagValuesQuery":"","tags":[],"tagsQuery":"","type":"query","useTags":false}]},"time":{"from":"now-1h","to":"now"},"timepicker":{"now":true,"refresh_intervals":["5s","10s","30s","1m","5m","15m","30m","1h","2h","1d"],"time_options":["5m","15m","1h","6h","12h","24h","2d","7d","30d"]},"timezone":"","title":"Synapse","uid":"000000012","version":25} From d5adf297e6d6688d9410e39d7d90a0f9f01173eb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Apr 2019 17:13:16 +0100 Subject: [PATCH 025/429] Move some rest endpoints to client reader --- docs/workers.rst | 3 +++ synapse/app/client_reader.py | 11 +++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/workers.rst b/docs/workers.rst index d80fc04d2e..d9d545610b 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -227,6 +227,9 @@ following regular expressions:: ^/_matrix/client/(api/v1|r0|unstable)/account/3pid$ ^/_matrix/client/(api/v1|r0|unstable)/keys/query$ ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$ + ^/_matrix/client/versions$ + ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$ + ^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$ Additionally, the following REST endpoints can be handled, but all requests must be routed to the same instance:: diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index beaea64a61..1e9e686107 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -45,6 +45,7 @@ from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.tcp.client import ReplicationClientHandler from synapse.rest.client.v1.login import LoginRestServlet +from synapse.rest.client.v1.push_rule import PushRuleRestServlet from synapse.rest.client.v1.room import ( JoinedRoomMemberListRestServlet, PublicRoomListRestServlet, @@ -52,9 +53,11 @@ from synapse.rest.client.v1.room import ( RoomMemberListRestServlet, RoomStateRestServlet, ) +from synapse.rest.client.v1.voip import VoipRestServlet from synapse.rest.client.v2_alpha.account import ThreepidRestServlet from synapse.rest.client.v2_alpha.keys import KeyChangesServlet, KeyQueryServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet +from synapse.rest.client.versions import VersionsRestServlet from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree @@ -109,12 +112,12 @@ class ClientReaderServer(HomeServer): ThreepidRestServlet(self).register(resource) KeyQueryServlet(self).register(resource) KeyChangesServlet(self).register(resource) + VoipRestServlet(self).register(resource) + PushRuleRestServlet(self).register(resource) + VersionsRestServlet(self).register(resource) resources.update({ - "/_matrix/client/r0": resource, - "/_matrix/client/unstable": resource, - "/_matrix/client/v2_alpha": resource, - "/_matrix/client/api/v1": resource, + "/_matrix/client": resource, }) root_resource = create_resource_tree(resources, NoResource()) From 208251956dcc2aa18ec72c5a74d5d2e67546a68e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Apr 2019 17:17:31 +0100 Subject: [PATCH 026/429] Newsfile --- changelog.d/5063.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5063.feature diff --git a/changelog.d/5063.feature b/changelog.d/5063.feature new file mode 100644 index 0000000000..fd7b80018e --- /dev/null +++ b/changelog.d/5063.feature @@ -0,0 +1 @@ +Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. From ec638a16022d29cbab32ac8ddb4cddb7f99d6495 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Apr 2019 18:40:15 +0100 Subject: [PATCH 027/429] Only handle GET requests for /push_rules --- docs/workers.rst | 3 +++ synapse/rest/client/v1/push_rule.py | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/docs/workers.rst b/docs/workers.rst index d9d545610b..aa4e7a120b 100644 --- a/docs/workers.rst +++ b/docs/workers.rst @@ -229,6 +229,9 @@ following regular expressions:: ^/_matrix/client/(api/v1|r0|unstable)/keys/changes$ ^/_matrix/client/versions$ ^/_matrix/client/(api/v1|r0|unstable)/voip/turnServer$ + +Additionally, the following REST endpoints can be handled for GET requests:: + ^/_matrix/client/(api/v1|r0|unstable)/pushrules/.*$ Additionally, the following REST endpoints can be handled, but all requests must diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index c654f9b5f0..7e9d95de26 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -39,9 +39,13 @@ class PushRuleRestServlet(ClientV1RestServlet): super(PushRuleRestServlet, self).__init__(hs) self.store = hs.get_datastore() self.notifier = hs.get_notifier() + self._is_worker = hs.config.worker_app is not None @defer.inlineCallbacks def on_PUT(self, request): + if self._is_worker: + raise Exception("Cannot handle PUT /push_rules on worker") + spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath]) try: priority_class = _priority_class_from_spec(spec) @@ -103,6 +107,9 @@ class PushRuleRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_DELETE(self, request): + if self._is_worker: + raise Exception("Cannot handle DELETE /push_rules on worker") + spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath]) requester = yield self.auth.get_user_by_req(request) From 38642614cffb107bb69abf215eb5144681bc4491 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Apr 2019 19:21:32 +0100 Subject: [PATCH 028/429] VersionRestServlet doesn't take a param --- synapse/app/client_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 1e9e686107..864f1eac48 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -114,7 +114,7 @@ class ClientReaderServer(HomeServer): KeyChangesServlet(self).register(resource) VoipRestServlet(self).register(resource) PushRuleRestServlet(self).register(resource) - VersionsRestServlet(self).register(resource) + VersionsRestServlet().register(resource) resources.update({ "/_matrix/client": resource, From 468b2bcb2ee42e877f6610e2704ef7faa73df344 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 15 Apr 2019 19:41:25 +0100 Subject: [PATCH 029/429] Newsfile --- changelog.d/5065.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5065.feature diff --git a/changelog.d/5065.feature b/changelog.d/5065.feature new file mode 100644 index 0000000000..fd7b80018e --- /dev/null +++ b/changelog.d/5065.feature @@ -0,0 +1 @@ +Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. From a137f4eac028fefda0787a168dd8853653155bc3 Mon Sep 17 00:00:00 2001 From: Silke Hofstra Date: Tue, 16 Apr 2019 12:41:17 +0200 Subject: [PATCH 030/429] Add systemd-python to optional dependencies (#4339) Using systemd-python allows for logging to the systemd journal, as is documented in: `synapse/contrib/systemd/log_config.yaml`. Signed-off-by: Silke Hofstra --- changelog.d/4339.feature | 1 + docker/Dockerfile-dhvirtualenv | 2 ++ setup.py | 6 +----- synapse/python_dependencies.py | 14 +++++++++----- 4 files changed, 13 insertions(+), 10 deletions(-) create mode 100644 changelog.d/4339.feature diff --git a/changelog.d/4339.feature b/changelog.d/4339.feature new file mode 100644 index 0000000000..cecff97b80 --- /dev/null +++ b/changelog.d/4339.feature @@ -0,0 +1 @@ +Add systemd-python to the optional dependencies to enable logging to the systemd journal. Install with `pip install matrix-synapse[systemd]`. diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 224c92352d..9c4c9a5d80 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -50,7 +50,9 @@ RUN apt-get update -qq -o Acquire::Languages=none \ debhelper \ devscripts \ dh-systemd \ + libsystemd-dev \ lsb-release \ + pkg-config \ python3-dev \ python3-pip \ python3-setuptools \ diff --git a/setup.py b/setup.py index 55b1b10a77..55663e9cac 100755 --- a/setup.py +++ b/setup.py @@ -86,13 +86,9 @@ long_description = read_file(("README.rst",)) REQUIREMENTS = dependencies['REQUIREMENTS'] CONDITIONAL_REQUIREMENTS = dependencies['CONDITIONAL_REQUIREMENTS'] +ALL_OPTIONAL_REQUIREMENTS = dependencies['ALL_OPTIONAL_REQUIREMENTS'] # Make `pip install matrix-synapse[all]` install all the optional dependencies. -ALL_OPTIONAL_REQUIREMENTS = set() - -for optional_deps in CONDITIONAL_REQUIREMENTS.values(): - ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS - CONDITIONAL_REQUIREMENTS["all"] = list(ALL_OPTIONAL_REQUIREMENTS) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 62c1748665..779f36dbed 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -86,18 +86,22 @@ CONDITIONAL_REQUIREMENTS = { "acme": ["txacme>=0.9.2"], "saml2": ["pysaml2>=4.5.0"], + "systemd": ["systemd-python>=231"], "url_preview": ["lxml>=3.5.0"], "test": ["mock>=2.0", "parameterized"], "sentry": ["sentry-sdk>=0.7.2"], } +ALL_OPTIONAL_REQUIREMENTS = set() + +for name, optional_deps in CONDITIONAL_REQUIREMENTS.items(): + # Exclude systemd as it's a system-based requirement. + if name not in ["systemd"]: + ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS + def list_requirements(): - deps = set(REQUIREMENTS) - for opt in CONDITIONAL_REQUIREMENTS.values(): - deps = set(opt) | deps - - return list(deps) + return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS) class DependencyException(Exception): From 3f22e993f096ceec21027cc52b93cefa5979382c Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Tue, 16 Apr 2019 08:31:59 -0600 Subject: [PATCH 031/429] Use packages.matrix.org for packages (#5067) * Use packages.matrix.org for packages See https://github.com/vector-im/riot-web/issues/9497 (applies to more than just Olm) * changelog --- INSTALL.md | 6 +++--- changelog.d/5067.misc | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5067.misc diff --git a/INSTALL.md b/INSTALL.md index a5c3c6efaa..d0e74be18c 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -257,13 +257,13 @@ https://github.com/spantaleev/matrix-docker-ansible-deploy #### Matrix.org packages Matrix.org provides Debian/Ubuntu packages of the latest stable version of -Synapse via https://matrix.org/packages/debian/. To use them: +Synapse via https://packages.matrix.org/debian/. To use them: ``` sudo apt install -y lsb-release curl apt-transport-https -echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" | +echo "deb https://packages.matrix.org/debian `lsb_release -cs` main" | sudo tee /etc/apt/sources.list.d/matrix-org.list -curl "https://matrix.org/packages/debian/repo-key.asc" | +curl "https://packages.matrix.org/debian/repo-key.asc" | sudo apt-key add - sudo apt update sudo apt install matrix-synapse-py3 diff --git a/changelog.d/5067.misc b/changelog.d/5067.misc new file mode 100644 index 0000000000..bbb4337dbf --- /dev/null +++ b/changelog.d/5067.misc @@ -0,0 +1 @@ +Update documentation for where to get Synapse packages. From ad010f630616fd0211706293e49fed38d037ceb5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Apr 2019 17:42:50 +0100 Subject: [PATCH 032/429] Remove usage of request.postpath This is an undocumented variable in twisted, and relies on the servlet being mounted in the right way. This also breaks getting push rules on workers. --- synapse/rest/client/v1/push_rule.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 7e9d95de26..506ec95ddd 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -31,7 +31,7 @@ from .base import ClientV1RestServlet, client_path_patterns class PushRuleRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/pushrules/.*$") + PATTERNS = client_path_patterns("/(?Ppushrules/.*)$") SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = ( "Unrecognised request: You probably wanted a trailing slash") @@ -42,11 +42,11 @@ class PushRuleRestServlet(ClientV1RestServlet): self._is_worker = hs.config.worker_app is not None @defer.inlineCallbacks - def on_PUT(self, request): + def on_PUT(self, request, path): if self._is_worker: raise Exception("Cannot handle PUT /push_rules on worker") - spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath]) + spec = _rule_spec_from_path([x for x in path.split("/")]) try: priority_class = _priority_class_from_spec(spec) except InvalidRuleException as e: @@ -106,11 +106,11 @@ class PushRuleRestServlet(ClientV1RestServlet): defer.returnValue((200, {})) @defer.inlineCallbacks - def on_DELETE(self, request): + def on_DELETE(self, request, path): if self._is_worker: raise Exception("Cannot handle DELETE /push_rules on worker") - spec = _rule_spec_from_path([x.decode('utf8') for x in request.postpath]) + spec = _rule_spec_from_path([x for x in path.split("/")]) requester = yield self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -130,7 +130,7 @@ class PushRuleRestServlet(ClientV1RestServlet): raise @defer.inlineCallbacks - def on_GET(self, request): + def on_GET(self, request, path): requester = yield self.auth.get_user_by_req(request) user_id = requester.user.to_string() @@ -141,7 +141,7 @@ class PushRuleRestServlet(ClientV1RestServlet): rules = format_push_rules_for_user(requester.user, rules) - path = [x.decode('utf8') for x in request.postpath][1:] + path = [x for x in path.split("/")][1:] if path == []: # we're a reference impl: pedantry is our job. @@ -157,7 +157,7 @@ class PushRuleRestServlet(ClientV1RestServlet): else: raise UnrecognizedRequestError() - def on_OPTIONS(self, _): + def on_OPTIONS(self, request, path): return 200, {} def notify_user(self, user_id): From 14d5ad7d2b0d4163dae62de7a5699838b1ffdcbe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 16 Apr 2019 17:52:00 +0100 Subject: [PATCH 033/429] Newsfile --- changelog.d/5070.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5070.feature diff --git a/changelog.d/5070.feature b/changelog.d/5070.feature new file mode 100644 index 0000000000..fd7b80018e --- /dev/null +++ b/changelog.d/5070.feature @@ -0,0 +1 @@ +Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. From 600ec04739a3fd7a2697a837f6e232c970bd97d3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 17 Apr 2019 12:01:59 +0100 Subject: [PATCH 034/429] Make sure we're not registering the same 3pid twice --- changelog.d/5071.bugfix | 1 + synapse/rest/client/v2_alpha/register.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) create mode 100644 changelog.d/5071.bugfix diff --git a/changelog.d/5071.bugfix b/changelog.d/5071.bugfix new file mode 100644 index 0000000000..ddf7ab5fa8 --- /dev/null +++ b/changelog.d/5071.bugfix @@ -0,0 +1 @@ +Make sure we're not registering the same 3pid twice on registration. diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 6d235262c8..dc3e265bcd 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -391,6 +391,13 @@ class RegisterRestServlet(RestServlet): # the user-facing checks will probably already have happened in # /register/email/requestToken when we requested a 3pid, but that's not # guaranteed. + # + # Also check that we're not trying to register a 3pid that's already + # been registered. + # + # This has probably happened in /register/email/requestToken as well, + # but if a user hits this endpoint twice then clicks on each link from + # the two activation emails, they would register the same 3pid twice. if auth_result: for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: @@ -406,6 +413,17 @@ class RegisterRestServlet(RestServlet): Codes.THREEPID_DENIED, ) + existingUid = yield self.store.get_user_id_by_threepid( + medium, address, + ) + + if existingUid is not None: + raise SynapseError( + 400, + "%s is already in use" % medium, + Codes.THREEPID_IN_USE, + ) + if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", From 20f0617e87924c929f0db0c06d30de0c8d15081c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 10 Apr 2019 17:58:47 +0100 Subject: [PATCH 035/429] Send out emails with links to extend an account's validity period --- changelog.d/5047.feature | 1 + docs/sample_config.yaml | 29 ++- synapse/api/auth.py | 5 +- synapse/config/emailconfig.py | 7 +- synapse/config/registration.py | 52 +++- synapse/handlers/account_validity.py | 228 ++++++++++++++++++ synapse/push/mailer.py | 14 +- synapse/push/pusher.py | 6 +- synapse/res/templates/mail-expiry.css | 4 + synapse/res/templates/notice_expiry.html | 43 ++++ synapse/res/templates/notice_expiry.txt | 7 + synapse/rest/__init__.py | 2 + .../rest/client/v2_alpha/account_validity.py | 62 +++++ synapse/server.py | 5 + synapse/storage/registration.py | 168 +++++++++++-- .../schema/delta/54/account_validity.sql | 9 +- tests/rest/client/v2_alpha/test_register.py | 100 +++++++- 17 files changed, 699 insertions(+), 43 deletions(-) create mode 100644 changelog.d/5047.feature create mode 100644 synapse/handlers/account_validity.py create mode 100644 synapse/res/templates/mail-expiry.css create mode 100644 synapse/res/templates/notice_expiry.html create mode 100644 synapse/res/templates/notice_expiry.txt create mode 100644 synapse/rest/client/v2_alpha/account_validity.py diff --git a/changelog.d/5047.feature b/changelog.d/5047.feature new file mode 100644 index 0000000000..12766a82a7 --- /dev/null +++ b/changelog.d/5047.feature @@ -0,0 +1 @@ +Add time-based account expiration. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 5594c8b9af..8bbd437239 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -643,11 +643,31 @@ uploads_path: "DATADIR/uploads" # #enable_registration: false -# Optional account validity parameter. This allows for, e.g., accounts to -# be denied any request after a given period. +# Optional account validity configuration. This allows for accounts to be denied +# any request after a given period. +# +# ``enabled`` defines whether the account validity feature is enabled. Defaults +# to False. +# +# ``period`` allows setting the period after which an account is valid +# after its registration. When renewing the account, its validity period +# will be extended by this amount of time. This parameter is required when using +# the account validity feature. +# +# ``renew_at`` is the amount of time before an account's expiry date at which +# Synapse will send an email to the account's email address with a renewal link. +# This needs the ``email`` and ``public_baseurl`` configuration sections to be +# filled. +# +# ``renew_email_subject`` is the subject of the email sent out with the renewal +# link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter +# from the ``email`` section. # #account_validity: +# enabled: True # period: 6w +# renew_at: 1w +# renew_email_subject: "Renew your %(app)s account" # The user must provide all of the below types of 3PID when registering. # @@ -890,7 +910,7 @@ password_config: -# Enable sending emails for notification events +# Enable sending emails for notification events or expiry notices # Defining a custom URL for Riot is only needed if email notifications # should contain links to a self-hosted installation of Riot; when set # the "app_name" setting is ignored. @@ -912,6 +932,9 @@ password_config: # #template_dir: res/templates # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt +# # Templates for account expiry notices. +# expiry_template_html: notice_expiry.html +# expiry_template_text: notice_expiry.txt # notif_for_new_users: True # riot_base_url: "http://localhost/riot" diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 976e0dd18b..4482962510 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -230,8 +230,9 @@ class Auth(object): # Deny the request if the user account has expired. if self._account_validity.enabled: - expiration_ts = yield self.store.get_expiration_ts_for_user(user) - if self.clock.time_msec() >= expiration_ts: + user_id = user.to_string() + expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) + if expiration_ts and self.clock.time_msec() >= expiration_ts: raise AuthError( 403, "User account has expired", diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 93d70cff14..60827be72f 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -71,6 +71,8 @@ class EmailConfig(Config): self.email_notif_from = email_config["notif_from"] self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] + self.email_expiry_template_html = email_config["expiry_template_html"] + self.email_expiry_template_text = email_config["expiry_template_text"] template_dir = email_config.get("template_dir") # we need an absolute path, because we change directory after starting (and @@ -120,7 +122,7 @@ class EmailConfig(Config): def default_config(self, config_dir_path, server_name, **kwargs): return """ - # Enable sending emails for notification events + # Enable sending emails for notification events or expiry notices # Defining a custom URL for Riot is only needed if email notifications # should contain links to a self-hosted installation of Riot; when set # the "app_name" setting is ignored. @@ -142,6 +144,9 @@ class EmailConfig(Config): # #template_dir: res/templates # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt + # # Templates for account expiry notices. + # expiry_template_html: notice_expiry.html + # expiry_template_text: notice_expiry.txt # notif_for_new_users: True # riot_base_url: "http://localhost/riot" """ diff --git a/synapse/config/registration.py b/synapse/config/registration.py index b7a7b4f1cf..129c208204 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -21,12 +21,26 @@ from synapse.util.stringutils import random_string_with_symbols class AccountValidityConfig(Config): - def __init__(self, config): - self.enabled = (len(config) > 0) + def __init__(self, config, synapse_config): + self.enabled = config.get("enabled", False) + self.renew_by_email_enabled = ("renew_at" in config) - period = config.get("period", None) - if period: - self.period = self.parse_duration(period) + if self.enabled: + if "period" in config: + self.period = self.parse_duration(config["period"]) + else: + raise ConfigError("'period' is required when using account validity") + + if "renew_at" in config: + self.renew_at = self.parse_duration(config["renew_at"]) + + if "renew_email_subject" in config: + self.renew_email_subject = config["renew_email_subject"] + else: + self.renew_email_subject = "Renew your %(app)s account" + + if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: + raise ConfigError("Can't send renewal emails without 'public_baseurl'") class RegistrationConfig(Config): @@ -40,7 +54,9 @@ class RegistrationConfig(Config): strtobool(str(config["disable_registration"])) ) - self.account_validity = AccountValidityConfig(config.get("account_validity", {})) + self.account_validity = AccountValidityConfig( + config.get("account_validity", {}), config, + ) self.registrations_require_3pid = config.get("registrations_require_3pid", []) self.allowed_local_3pids = config.get("allowed_local_3pids", []) @@ -86,11 +102,31 @@ class RegistrationConfig(Config): # #enable_registration: false - # Optional account validity parameter. This allows for, e.g., accounts to - # be denied any request after a given period. + # Optional account validity configuration. This allows for accounts to be denied + # any request after a given period. + # + # ``enabled`` defines whether the account validity feature is enabled. Defaults + # to False. + # + # ``period`` allows setting the period after which an account is valid + # after its registration. When renewing the account, its validity period + # will be extended by this amount of time. This parameter is required when using + # the account validity feature. + # + # ``renew_at`` is the amount of time before an account's expiry date at which + # Synapse will send an email to the account's email address with a renewal link. + # This needs the ``email`` and ``public_baseurl`` configuration sections to be + # filled. + # + # ``renew_email_subject`` is the subject of the email sent out with the renewal + # link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter + # from the ``email`` section. # #account_validity: + # enabled: True # period: 6w + # renew_at: 1w + # renew_email_subject: "Renew your %%(app)s account" # The user must provide all of the below types of 3PID when registering. # diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py new file mode 100644 index 0000000000..e82049e42d --- /dev/null +++ b/synapse/handlers/account_validity.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import email.mime.multipart +import email.utils +import logging +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +from twisted.internet import defer + +from synapse.api.errors import StoreError +from synapse.types import UserID +from synapse.util import stringutils +from synapse.util.logcontext import make_deferred_yieldable + +try: + from synapse.push.mailer import load_jinja2_templates +except ImportError: + load_jinja2_templates = None + +logger = logging.getLogger(__name__) + + +class AccountValidityHandler(object): + def __init__(self, hs): + self.hs = hs + self.store = self.hs.get_datastore() + self.sendmail = self.hs.get_sendmail() + self.clock = self.hs.get_clock() + + self._account_validity = self.hs.config.account_validity + + if self._account_validity.renew_by_email_enabled and load_jinja2_templates: + # Don't do email-specific configuration if renewal by email is disabled. + try: + app_name = self.hs.config.email_app_name + + self._subject = self._account_validity.renew_email_subject % { + "app": app_name, + } + + self._from_string = self.hs.config.email_notif_from % { + "app": app_name, + } + except Exception: + # If substitution failed, fall back to the bare strings. + self._subject = self._account_validity.renew_email_subject + self._from_string = self.hs.config.email_notif_from + + self._raw_from = email.utils.parseaddr(self._from_string)[1] + + self._template_html, self._template_text = load_jinja2_templates( + config=self.hs.config, + template_html_name=self.hs.config.email_expiry_template_html, + template_text_name=self.hs.config.email_expiry_template_text, + ) + + # Check the renewal emails to send and send them every 30min. + self.clock.looping_call( + self.send_renewal_emails, + 30 * 60 * 1000, + ) + + @defer.inlineCallbacks + def send_renewal_emails(self): + """Gets the list of users whose account is expiring in the amount of time + configured in the ``renew_at`` parameter from the ``account_validity`` + configuration, and sends renewal emails to all of these users as long as they + have an email 3PID attached to their account. + """ + expiring_users = yield self.store.get_users_expiring_soon() + + if expiring_users: + for user in expiring_users: + yield self._send_renewal_email( + user_id=user["user_id"], + expiration_ts=user["expiration_ts_ms"], + ) + + @defer.inlineCallbacks + def _send_renewal_email(self, user_id, expiration_ts): + """Sends out a renewal email to every email address attached to the given user + with a unique link allowing them to renew their account. + + Args: + user_id (str): ID of the user to send email(s) to. + expiration_ts (int): Timestamp in milliseconds for the expiration date of + this user's account (used in the email templates). + """ + addresses = yield self._get_email_addresses_for_user(user_id) + + # Stop right here if the user doesn't have at least one email address. + # In this case, they will have to ask their server admin to renew their + # account manually. + if not addresses: + return + + try: + user_display_name = yield self.store.get_profile_displayname( + UserID.from_string(user_id).localpart + ) + if user_display_name is None: + user_display_name = user_id + except StoreError: + user_display_name = user_id + + renewal_token = yield self._get_renewal_token(user_id) + url = "%s_matrix/client/unstable/account_validity/renew?token=%s" % ( + self.hs.config.public_baseurl, + renewal_token, + ) + + template_vars = { + "display_name": user_display_name, + "expiration_ts": expiration_ts, + "url": url, + } + + html_text = self._template_html.render(**template_vars) + html_part = MIMEText(html_text, "html", "utf8") + + plain_text = self._template_text.render(**template_vars) + text_part = MIMEText(plain_text, "plain", "utf8") + + for address in addresses: + raw_to = email.utils.parseaddr(address)[1] + + multipart_msg = MIMEMultipart('alternative') + multipart_msg['Subject'] = self._subject + multipart_msg['From'] = self._from_string + multipart_msg['To'] = address + multipart_msg['Date'] = email.utils.formatdate() + multipart_msg['Message-ID'] = email.utils.make_msgid() + multipart_msg.attach(text_part) + multipart_msg.attach(html_part) + + logger.info("Sending renewal email to %s", address) + + yield make_deferred_yieldable(self.sendmail( + self.hs.config.email_smtp_host, + self._raw_from, raw_to, multipart_msg.as_string().encode('utf8'), + reactor=self.hs.get_reactor(), + port=self.hs.config.email_smtp_port, + requireAuthentication=self.hs.config.email_smtp_user is not None, + username=self.hs.config.email_smtp_user, + password=self.hs.config.email_smtp_pass, + requireTransportSecurity=self.hs.config.require_transport_security + )) + + yield self.store.set_renewal_mail_status( + user_id=user_id, + email_sent=True, + ) + + @defer.inlineCallbacks + def _get_email_addresses_for_user(self, user_id): + """Retrieve the list of email addresses attached to a user's account. + + Args: + user_id (str): ID of the user to lookup email addresses for. + + Returns: + defer.Deferred[list[str]]: Email addresses for this account. + """ + threepids = yield self.store.user_get_threepids(user_id) + + addresses = [] + for threepid in threepids: + if threepid["medium"] == "email": + addresses.append(threepid["address"]) + + defer.returnValue(addresses) + + @defer.inlineCallbacks + def _get_renewal_token(self, user_id): + """Generates a 32-byte long random string that will be inserted into the + user's renewal email's unique link, then saves it into the database. + + Args: + user_id (str): ID of the user to generate a string for. + + Returns: + defer.Deferred[str]: The generated string. + + Raises: + StoreError(500): Couldn't generate a unique string after 5 attempts. + """ + attempts = 0 + while attempts < 5: + try: + renewal_token = stringutils.random_string(32) + yield self.store.set_renewal_token_for_user(user_id, renewal_token) + defer.returnValue(renewal_token) + except StoreError: + attempts += 1 + raise StoreError(500, "Couldn't generate a unique string as refresh string.") + + @defer.inlineCallbacks + def renew_account(self, renewal_token): + """Renews the account attached to a given renewal token by pushing back the + expiration date by the current validity period in the server's configuration. + + Args: + renewal_token (str): Token sent with the renewal request. + """ + user_id = yield self.store.get_user_from_renewal_token(renewal_token) + + logger.debug("Renewing an account for user %s", user_id) + + new_expiration_date = self.clock.time_msec() + self._account_validity.period + + yield self.store.renew_account_for_user( + user_id=user_id, + new_expiration_ts=new_expiration_date, + ) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 1eb5be0957..c269bcf4a4 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -521,11 +521,11 @@ def format_ts_filter(value, format): return time.strftime(format, time.localtime(value / 1000)) -def load_jinja2_templates(config): +def load_jinja2_templates(config, template_html_name, template_text_name): """Load the jinja2 email templates from disk Returns: - (notif_template_html, notif_template_text) + (template_html, template_text) """ logger.info("loading email templates from '%s'", config.email_template_dir) loader = jinja2.FileSystemLoader(config.email_template_dir) @@ -533,14 +533,10 @@ def load_jinja2_templates(config): env.filters["format_ts"] = format_ts_filter env.filters["mxc_to_http"] = _create_mxc_to_http_filter(config) - notif_template_html = env.get_template( - config.email_notif_template_html - ) - notif_template_text = env.get_template( - config.email_notif_template_text - ) + template_html = env.get_template(template_html_name) + template_text = env.get_template(template_text_name) - return notif_template_html, notif_template_text + return template_html, template_text def _create_mxc_to_http_filter(config): diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index b33f2a357b..14bc7823cf 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -44,7 +44,11 @@ class PusherFactory(object): if hs.config.email_enable_notifs: self.mailers = {} # app_name -> Mailer - templates = load_jinja2_templates(hs.config) + templates = load_jinja2_templates( + config=hs.config, + template_html_name=hs.config.email_notif_template_html, + template_text_name=hs.config.email_notif_template_text, + ) self.notif_template_html, self.notif_template_text = templates self.pusher_types["email"] = self._create_email_pusher diff --git a/synapse/res/templates/mail-expiry.css b/synapse/res/templates/mail-expiry.css new file mode 100644 index 0000000000..3dea486467 --- /dev/null +++ b/synapse/res/templates/mail-expiry.css @@ -0,0 +1,4 @@ +.noticetext { + margin-top: 10px; + margin-bottom: 10px; +} diff --git a/synapse/res/templates/notice_expiry.html b/synapse/res/templates/notice_expiry.html new file mode 100644 index 0000000000..f0d7c66e1b --- /dev/null +++ b/synapse/res/templates/notice_expiry.html @@ -0,0 +1,43 @@ + + + + + + + + + + + + +
+ + + + + + + + +
+
Hi {{ display_name }},
+
+
Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date.
+
To extend the validity of your account, please click on the link bellow (or copy and paste it into a new browser tab):
+ +
+
+ + diff --git a/synapse/res/templates/notice_expiry.txt b/synapse/res/templates/notice_expiry.txt new file mode 100644 index 0000000000..41f1c4279c --- /dev/null +++ b/synapse/res/templates/notice_expiry.txt @@ -0,0 +1,7 @@ +Hi {{ display_name }}, + +Your account will expire on {{ expiration_ts|format_ts("%d-%m-%Y") }}. This means that you will lose access to your account after this date. + +To extend the validity of your account, please click on the link bellow (or copy and paste it to a new browser tab): + +{{ url }} diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 91f5247d52..a66885d349 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -33,6 +33,7 @@ from synapse.rest.client.v1 import ( from synapse.rest.client.v2_alpha import ( account, account_data, + account_validity, auth, capabilities, devices, @@ -109,3 +110,4 @@ class ClientRestResource(JsonResource): groups.register_servlets(hs, client_resource) room_upgrade_rest_servlet.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) + account_validity.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py new file mode 100644 index 0000000000..1ff6a6b638 --- /dev/null +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.errors import SynapseError +from synapse.http.server import finish_request +from synapse.http.servlet import RestServlet + +from ._base import client_v2_patterns + +logger = logging.getLogger(__name__) + + +class AccountValidityRenewServlet(RestServlet): + PATTERNS = client_v2_patterns("/account_validity/renew$") + SUCCESS_HTML = b"Your account has been successfully renewed." + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(AccountValidityRenewServlet, self).__init__() + + self.hs = hs + self.account_activity_handler = hs.get_account_validity_handler() + + @defer.inlineCallbacks + def on_GET(self, request): + if b"token" not in request.args: + raise SynapseError(400, "Missing renewal token") + renewal_token = request.args[b"token"][0] + + yield self.account_activity_handler.renew_account(renewal_token.decode('utf8')) + + request.setResponseCode(200) + request.setHeader(b"Content-Type", b"text/html; charset=utf-8") + request.setHeader(b"Content-Length", b"%d" % ( + len(AccountValidityRenewServlet.SUCCESS_HTML), + )) + request.write(AccountValidityRenewServlet.SUCCESS_HTML) + finish_request(request) + defer.returnValue(None) + + +def register_servlets(hs, http_server): + AccountValidityRenewServlet(hs).register(http_server) diff --git a/synapse/server.py b/synapse/server.py index dc8f1ccb8c..8c30ac2fa5 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -47,6 +47,7 @@ from synapse.federation.transport.client import TransportLayerClient from synapse.groups.attestations import GroupAttestationSigning, GroupAttestionRenewer from synapse.groups.groups_server import GroupsServerHandler from synapse.handlers import Handlers +from synapse.handlers.account_validity import AccountValidityHandler from synapse.handlers.acme import AcmeHandler from synapse.handlers.appservice import ApplicationServicesHandler from synapse.handlers.auth import AuthHandler, MacaroonGenerator @@ -183,6 +184,7 @@ class HomeServer(object): 'room_context_handler', 'sendmail', 'registration_handler', + 'account_validity_handler', ] REQUIRED_ON_MASTER_STARTUP = [ @@ -506,6 +508,9 @@ class HomeServer(object): def build_registration_handler(self): return RegistrationHandler(self) + def build_account_validity_handler(self): + return AccountValidityHandler(self) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index eede8ae4d2..a78850259f 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -32,6 +32,7 @@ class RegistrationWorkerStore(SQLBaseStore): super(RegistrationWorkerStore, self).__init__(db_conn, hs) self.config = hs.config + self.clock = hs.get_clock() @cached() def get_user_by_id(self, user_id): @@ -87,25 +88,156 @@ class RegistrationWorkerStore(SQLBaseStore): ) @cachedInlineCallbacks() - def get_expiration_ts_for_user(self, user): + def get_expiration_ts_for_user(self, user_id): """Get the expiration timestamp for the account bearing a given user ID. Args: - user (str): The ID of the user. + user_id (str): The ID of the user. Returns: defer.Deferred: None, if the account has no expiration timestamp, - otherwise int representation of the timestamp (as a number of - milliseconds since epoch). + otherwise int representation of the timestamp (as a number of + milliseconds since epoch). """ res = yield self._simple_select_one_onecol( table="account_validity", - keyvalues={"user_id": user.to_string()}, + keyvalues={"user_id": user_id}, retcol="expiration_ts_ms", allow_none=True, - desc="get_expiration_date_for_user", + desc="get_expiration_ts_for_user", ) defer.returnValue(res) + @defer.inlineCallbacks + def renew_account_for_user(self, user_id, new_expiration_ts): + """Updates the account validity table with a new timestamp for a given + user, removes the existing renewal token from this user, and unsets the + flag indicating that an email has been sent for renewing this account. + + Args: + user_id (str): ID of the user whose account validity to renew. + new_expiration_ts: New expiration date, as a timestamp in milliseconds + since epoch. + """ + def renew_account_for_user_txn(txn): + self._simple_update_txn( + txn=txn, + table="account_validity", + keyvalues={"user_id": user_id}, + updatevalues={ + "expiration_ts_ms": new_expiration_ts, + "email_sent": False, + "renewal_token": None, + }, + ) + self._invalidate_cache_and_stream( + txn, self.get_expiration_ts_for_user, (user_id,), + ) + + yield self.runInteraction( + "renew_account_for_user", + renew_account_for_user_txn, + ) + + @defer.inlineCallbacks + def set_renewal_token_for_user(self, user_id, renewal_token): + """Defines a renewal token for a given user. + + Args: + user_id (str): ID of the user to set the renewal token for. + renewal_token (str): Random unique string that will be used to renew the + user's account. + + Raises: + StoreError: The provided token is already set for another user. + """ + yield self._simple_update_one( + table="account_validity", + keyvalues={"user_id": user_id}, + updatevalues={"renewal_token": renewal_token}, + desc="set_renewal_token_for_user", + ) + + @defer.inlineCallbacks + def get_user_from_renewal_token(self, renewal_token): + """Get a user ID from a renewal token. + + Args: + renewal_token (str): The renewal token to perform the lookup with. + + Returns: + defer.Deferred[str]: The ID of the user to which the token belongs. + """ + res = yield self._simple_select_one_onecol( + table="account_validity", + keyvalues={"renewal_token": renewal_token}, + retcol="user_id", + desc="get_user_from_renewal_token", + ) + + defer.returnValue(res) + + @defer.inlineCallbacks + def get_renewal_token_for_user(self, user_id): + """Get the renewal token associated with a given user ID. + + Args: + user_id (str): The user ID to lookup a token for. + + Returns: + defer.Deferred[str]: The renewal token associated with this user ID. + """ + res = yield self._simple_select_one_onecol( + table="account_validity", + keyvalues={"user_id": user_id}, + retcol="renewal_token", + desc="get_renewal_token_for_user", + ) + + defer.returnValue(res) + + @defer.inlineCallbacks + def get_users_expiring_soon(self): + """Selects users whose account will expire in the [now, now + renew_at] time + window (see configuration for account_validity for information on what renew_at + refers to). + + Returns: + Deferred: Resolves to a list[dict[user_id (str), expiration_ts_ms (int)]] + """ + def select_users_txn(txn, now_ms, renew_at): + sql = ( + "SELECT user_id, expiration_ts_ms FROM account_validity" + " WHERE email_sent = ? AND (expiration_ts_ms - ?) <= ?" + ) + values = [False, now_ms, renew_at] + txn.execute(sql, values) + return self.cursor_to_dict(txn) + + res = yield self.runInteraction( + "get_users_expiring_soon", + select_users_txn, + self.clock.time_msec(), self.config.account_validity.renew_at, + ) + + defer.returnValue(res) + + @defer.inlineCallbacks + def set_renewal_mail_status(self, user_id, email_sent): + """Sets or unsets the flag that indicates whether a renewal email has been sent + to the user (and the user hasn't renewed their account yet). + + Args: + user_id (str): ID of the user to set/unset the flag for. + email_sent (bool): Flag which indicates whether a renewal email has been sent + to this user. + """ + yield self._simple_update_one( + table="account_validity", + keyvalues={"user_id": user_id}, + updatevalues={"email_sent": email_sent}, + desc="set_renewal_mail_status", + ) + @defer.inlineCallbacks def is_server_admin(self, user): res = yield self._simple_select_one_onecol( @@ -508,22 +640,24 @@ class RegistrationStore(RegistrationWorkerStore, } ) - if self._account_validity.enabled: - now_ms = self.clock.time_msec() - expiration_ts = now_ms + self._account_validity.period - self._simple_insert_txn( - txn, - "account_validity", - values={ - "user_id": user_id, - "expiration_ts_ms": expiration_ts, - } - ) except self.database_engine.module.IntegrityError: raise StoreError( 400, "User ID already taken.", errcode=Codes.USER_IN_USE ) + if self._account_validity.enabled: + now_ms = self.clock.time_msec() + expiration_ts = now_ms + self._account_validity.period + self._simple_insert_txn( + txn, + "account_validity", + values={ + "user_id": user_id, + "expiration_ts_ms": expiration_ts, + "email_sent": False, + } + ) + if token: # it's possible for this to get a conflict, but only for a single user # since tokens are namespaced based on their user ID diff --git a/synapse/storage/schema/delta/54/account_validity.sql b/synapse/storage/schema/delta/54/account_validity.sql index 57249262d7..2357626000 100644 --- a/synapse/storage/schema/delta/54/account_validity.sql +++ b/synapse/storage/schema/delta/54/account_validity.sql @@ -13,8 +13,15 @@ * limitations under the License. */ +DROP TABLE IF EXISTS account_validity; + -- Track what users are in public rooms. CREATE TABLE IF NOT EXISTS account_validity ( user_id TEXT PRIMARY KEY, - expiration_ts_ms BIGINT NOT NULL + expiration_ts_ms BIGINT NOT NULL, + email_sent BOOLEAN NOT NULL, + renewal_token TEXT ); + +CREATE INDEX account_validity_email_sent_idx ON account_validity(email_sent, expiration_ts_ms) +CREATE UNIQUE INDEX account_validity_renewal_string_idx ON account_validity(renewal_token) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index d3611ed21f..8fb5140a05 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,14 +1,22 @@ import datetime import json +import os + +import pkg_resources from synapse.api.constants import LoginType from synapse.api.errors import Codes from synapse.appservice import ApplicationService from synapse.rest.client.v1 import admin, login -from synapse.rest.client.v2_alpha import register, sync +from synapse.rest.client.v2_alpha import account_validity, register, sync from tests import unittest +try: + from synapse.push.mailer import load_jinja2_templates +except ImportError: + load_jinja2_templates = None + class RegisterRestServletTestCase(unittest.HomeserverTestCase): @@ -197,6 +205,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + # Test for account expiring after a week. config.enable_registration = True config.account_validity.enabled = True config.account_validity.period = 604800000 # Time in ms for 1 week @@ -228,3 +237,92 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): self.assertEquals( channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result, ) + + +class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): + + skip = "No Jinja installed" if not load_jinja2_templates else None + servlets = [ + register.register_servlets, + admin.register_servlets, + login.register_servlets, + sync.register_servlets, + account_validity.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + # Test for account expiring after a week and renewal emails being sent 2 + # days before expiry. + config.enable_registration = True + config.account_validity.enabled = True + config.account_validity.renew_by_email_enabled = True + config.account_validity.period = 604800000 # Time in ms for 1 week + config.account_validity.renew_at = 172800000 # Time in ms for 2 days + config.account_validity.renew_email_subject = "Renew your account" + + # Email config. + self.email_attempts = [] + + def sendmail(*args, **kwargs): + self.email_attempts.append((args, kwargs)) + return + + config.email_template_dir = os.path.abspath( + pkg_resources.resource_filename('synapse', 'res/templates') + ) + config.email_expiry_template_html = "notice_expiry.html" + config.email_expiry_template_text = "notice_expiry.txt" + config.email_smtp_host = "127.0.0.1" + config.email_smtp_port = 20 + config.require_transport_security = False + config.email_smtp_user = None + config.email_smtp_pass = None + config.email_notif_from = "test@example.com" + + self.hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + + self.store = self.hs.get_datastore() + + return self.hs + + def test_renewal_email(self): + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + # We need to manually add an email address otherwise the handler will do + # nothing. + now = self.hs.clock.time_msec() + self.get_success(self.store.user_add_threepid( + user_id=user_id, medium="email", address="kermit@example.com", + validated_at=now, added_at=now, + )) + + # The specific endpoint doesn't matter, all we need is an authenticated + # endpoint. + request, channel = self.make_request( + b"GET", "/sync", access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Move 6 days forward. This should trigger a renewal email to be sent. + self.reactor.advance(datetime.timedelta(days=6).total_seconds()) + self.assertEqual(len(self.email_attempts), 1) + + # Retrieving the URL from the email is too much pain for now, so we + # retrieve the token from the DB. + renewal_token = self.get_success(self.store.get_renewal_token_for_user(user_id)) + url = "/_matrix/client/unstable/account_validity/renew?token=%s" % renewal_token + request, channel = self.make_request(b"GET", url) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # Move 3 days forward. If the renewal failed, every authed request with + # our access token should be denied from now, otherwise they should + # succeed. + self.reactor.advance(datetime.timedelta(days=3).total_seconds()) + request, channel = self.make_request( + b"GET", "/sync", access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) From eaf41a943b2cd3f7f32d142c9552d558eb37a074 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 16 Apr 2019 20:13:59 +0100 Subject: [PATCH 036/429] Add management endpoints for account validity --- changelog.d/5073.feature | 1 + docs/admin_api/account_validity.rst | 42 ++++++++ synapse/api/auth.py | 2 +- synapse/handlers/account_validity.py | 33 ++++++- synapse/rest/client/v1/admin.py | 39 ++++++++ .../rest/client/v2_alpha/account_validity.py | 31 +++++- synapse/storage/registration.py | 29 +++--- tests/rest/client/v2_alpha/test_register.py | 95 +++++++++++++++++-- 8 files changed, 246 insertions(+), 26 deletions(-) create mode 100644 changelog.d/5073.feature create mode 100644 docs/admin_api/account_validity.rst diff --git a/changelog.d/5073.feature b/changelog.d/5073.feature new file mode 100644 index 0000000000..12766a82a7 --- /dev/null +++ b/changelog.d/5073.feature @@ -0,0 +1 @@ +Add time-based account expiration. diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst new file mode 100644 index 0000000000..980ea23605 --- /dev/null +++ b/docs/admin_api/account_validity.rst @@ -0,0 +1,42 @@ +Account validity API +==================== + +This API allows a server administrator to manage the validity of an account. To +use it, you must enable the account validity feature (under +``account_validity``) in Synapse's configuration. + +Renew account +------------- + +This API extends the validity of an account by as much time as configured in the +``period`` parameter from the ``account_validity`` configuration. + +The API is:: + + POST /_matrix/client/unstable/account_validity/send_mail + +with the following body: + +.. code:: json + + { + "user_id": "", + "expiration_ts": 0, + "enable_renewal_emails": true + } + + +``expiration_ts`` is an optional parameter and overrides the expiration date, +which otherwise defaults to now + validity period. + +``enable_renewal_emails`` is also an optional parameter and enables/disables +sending renewal emails to the user. Defaults to true. + +The API returns with the new expiration date for this account, as a timestamp in +milliseconds since epoch: + +.. code:: json + + { + "expiration_ts": 0 + } diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 4482962510..960e66dbdc 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -232,7 +232,7 @@ class Auth(object): if self._account_validity.enabled: user_id = user.to_string() expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) - if expiration_ts and self.clock.time_msec() >= expiration_ts: + if expiration_ts is not None and self.clock.time_msec() >= expiration_ts: raise AuthError( 403, "User account has expired", diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index e82049e42d..261446517d 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -90,6 +90,11 @@ class AccountValidityHandler(object): expiration_ts=user["expiration_ts_ms"], ) + @defer.inlineCallbacks + def send_renewal_email_to_user(self, user_id): + expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) + yield self._send_renewal_email(user_id, expiration_ts) + @defer.inlineCallbacks def _send_renewal_email(self, user_id, expiration_ts): """Sends out a renewal email to every email address attached to the given user @@ -217,12 +222,32 @@ class AccountValidityHandler(object): renewal_token (str): Token sent with the renewal request. """ user_id = yield self.store.get_user_from_renewal_token(renewal_token) - logger.debug("Renewing an account for user %s", user_id) + yield self.renew_account_for_user(user_id) - new_expiration_date = self.clock.time_msec() + self._account_validity.period + @defer.inlineCallbacks + def renew_account_for_user(self, user_id, expiration_ts=None, email_sent=False): + """Renews the account attached to a given user by pushing back the + expiration date by the current validity period in the server's + configuration. - yield self.store.renew_account_for_user( + Args: + renewal_token (str): Token sent with the renewal request. + expiration_ts (int): New expiration date. Defaults to now + validity period. + email_sent (bool): Whether an email has been sent for this validity period. + Defaults to False. + + Returns: + defer.Deferred[int]: New expiration date for this account, as a timestamp + in milliseconds since epoch. + """ + if expiration_ts is None: + expiration_ts = self.clock.time_msec() + self._account_validity.period + + yield self.store.set_account_validity_for_user( user_id=user_id, - new_expiration_ts=new_expiration_date, + expiration_ts=expiration_ts, + email_sent=email_sent, ) + + defer.returnValue(expiration_ts) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index e788769639..d27472c538 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -786,6 +786,44 @@ class SearchUsersRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) +class AccountValidityRenewServlet(ClientV1RestServlet): + PATTERNS = client_path_patterns("/admin/account_validity/validity$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(AccountValidityRenewServlet, self).__init__(hs) + + self.hs = hs + self.account_activity_handler = hs.get_account_validity_handler() + self.auth = hs.get_auth() + + @defer.inlineCallbacks + def on_POST(self, request): + requester = yield self.auth.get_user_by_req(request) + is_admin = yield self.auth.is_server_admin(requester.user) + + if not is_admin: + raise AuthError(403, "You are not a server admin") + + body = parse_json_object_from_request(request) + + if "user_id" not in body: + raise SynapseError(400, "Missing property 'user_id' in the request body") + + expiration_ts = yield self.account_activity_handler.renew_account_for_user( + body["user_id"], body.get("expiration_ts"), + not body.get("enable_renewal_emails", True), + ) + + res = { + "expiration_ts": expiration_ts, + } + defer.returnValue((200, res)) + + def register_servlets(hs, http_server): WhoisRestServlet(hs).register(http_server) PurgeMediaCacheRestServlet(hs).register(http_server) @@ -801,3 +839,4 @@ def register_servlets(hs, http_server): ListMediaInRoom(hs).register(http_server) UserRegisterServlet(hs).register(http_server) VersionServlet(hs).register(http_server) + AccountValidityRenewServlet(hs).register(http_server) diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index 1ff6a6b638..fc8dbeb617 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -17,7 +17,7 @@ import logging from twisted.internet import defer -from synapse.api.errors import SynapseError +from synapse.api.errors import AuthError, SynapseError from synapse.http.server import finish_request from synapse.http.servlet import RestServlet @@ -39,6 +39,7 @@ class AccountValidityRenewServlet(RestServlet): self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -58,5 +59,33 @@ class AccountValidityRenewServlet(RestServlet): defer.returnValue(None) +class AccountValiditySendMailServlet(RestServlet): + PATTERNS = client_v2_patterns("/account_validity/send_mail$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(AccountValiditySendMailServlet, self).__init__() + + self.hs = hs + self.account_activity_handler = hs.get_account_validity_handler() + self.auth = hs.get_auth() + self.account_validity = self.hs.config.account_validity + + @defer.inlineCallbacks + def on_POST(self, request): + if not self.account_validity.renew_by_email_enabled: + raise AuthError(403, "Account renewal via email is disabled on this server.") + + requester = yield self.auth.get_user_by_req(request) + user_id = requester.user.to_string() + yield self.account_activity_handler.send_renewal_email_to_user(user_id) + + defer.returnValue((200, {})) + + def register_servlets(hs, http_server): AccountValidityRenewServlet(hs).register(http_server) + AccountValiditySendMailServlet(hs).register(http_server) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index a78850259f..dfdb4e7e34 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -108,25 +108,30 @@ class RegistrationWorkerStore(SQLBaseStore): defer.returnValue(res) @defer.inlineCallbacks - def renew_account_for_user(self, user_id, new_expiration_ts): - """Updates the account validity table with a new timestamp for a given - user, removes the existing renewal token from this user, and unsets the - flag indicating that an email has been sent for renewing this account. + def set_account_validity_for_user(self, user_id, expiration_ts, email_sent, + renewal_token=None): + """Updates the account validity properties of the given account, with the + given values. Args: - user_id (str): ID of the user whose account validity to renew. - new_expiration_ts: New expiration date, as a timestamp in milliseconds + user_id (str): ID of the account to update properties for. + expiration_ts (int): New expiration date, as a timestamp in milliseconds since epoch. + email_sent (bool): True means a renewal email has been sent for this + account and there's no need to send another one for the current validity + period. + renewal_token (str): Renewal token the user can use to extend the validity + of their account. Defaults to no token. """ - def renew_account_for_user_txn(txn): + def set_account_validity_for_user_txn(txn): self._simple_update_txn( txn=txn, table="account_validity", keyvalues={"user_id": user_id}, updatevalues={ - "expiration_ts_ms": new_expiration_ts, - "email_sent": False, - "renewal_token": None, + "expiration_ts_ms": expiration_ts, + "email_sent": email_sent, + "renewal_token": renewal_token, }, ) self._invalidate_cache_and_stream( @@ -134,8 +139,8 @@ class RegistrationWorkerStore(SQLBaseStore): ) yield self.runInteraction( - "renew_account_for_user", - renew_account_for_user_txn, + "set_account_validity_for_user", + set_account_validity_for_user_txn, ) @defer.inlineCallbacks diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 8fb5140a05..3d44667489 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -201,6 +201,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): admin.register_servlets, login.register_servlets, sync.register_servlets, + account_validity.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -238,6 +239,68 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result, ) + def test_manual_renewal(self): + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + + self.reactor.advance(datetime.timedelta(weeks=1).total_seconds()) + + # If we register the admin user at the beginning of the test, it will + # expire at the same time as the normal user and the renewal request + # will be denied. + self.register_user("admin", "adminpassword", admin=True) + admin_tok = self.login("admin", "adminpassword") + + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": user_id, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # The specific endpoint doesn't matter, all we need is an authenticated + # endpoint. + request, channel = self.make_request( + b"GET", "/sync", access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + def test_manual_expire(self): + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + + self.register_user("admin", "adminpassword", admin=True) + admin_tok = self.login("admin", "adminpassword") + + url = "/_matrix/client/unstable/admin/account_validity/validity" + params = { + "user_id": user_id, + "expiration_ts": 0, + "enable_renewal_emails": False, + } + request_data = json.dumps(params) + request, channel = self.make_request( + b"POST", url, request_data, access_token=admin_tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + # The specific endpoint doesn't matter, all we need is an authenticated + # endpoint. + request, channel = self.make_request( + b"GET", "/sync", access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) + self.assertEquals( + channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result, + ) + class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): @@ -287,6 +350,8 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): return self.hs def test_renewal_email(self): + self.email_attempts = [] + user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") # We need to manually add an email address otherwise the handler will do @@ -297,14 +362,6 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): validated_at=now, added_at=now, )) - # The specific endpoint doesn't matter, all we need is an authenticated - # endpoint. - request, channel = self.make_request( - b"GET", "/sync", access_token=tok, - ) - self.render(request) - self.assertEquals(channel.result["code"], b"200", channel.result) - # Move 6 days forward. This should trigger a renewal email to be sent. self.reactor.advance(datetime.timedelta(days=6).total_seconds()) self.assertEqual(len(self.email_attempts), 1) @@ -326,3 +383,25 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) + + def test_manual_email_send(self): + self.email_attempts = [] + + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + # We need to manually add an email address otherwise the handler will do + # nothing. + now = self.hs.clock.time_msec() + self.get_success(self.store.user_add_threepid( + user_id=user_id, medium="email", address="kermit@example.com", + validated_at=now, added_at=now, + )) + + request, channel = self.make_request( + b"POST", "/_matrix/client/unstable/account_validity/send_mail", + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.assertEqual(len(self.email_attempts), 1) From 95c603ae6f0f1639f6299fff62d7a75002221276 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 17 Apr 2019 23:52:00 +0100 Subject: [PATCH 037/429] Update debian install docs for new key and repo (#5074) --- INSTALL.md | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index a5c3c6efaa..d55a1f89ac 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -257,18 +257,38 @@ https://github.com/spantaleev/matrix-docker-ansible-deploy #### Matrix.org packages Matrix.org provides Debian/Ubuntu packages of the latest stable version of -Synapse via https://matrix.org/packages/debian/. To use them: +Synapse via https://packages.matrix.org/debian/. To use them: + +For Debian 9 (Stretch), Ubuntu 16.04 (Xenial), and later: ``` -sudo apt install -y lsb-release curl apt-transport-https -echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" | +sudo apt install -y lsb-release wget apt-transport-https +sudo wget -O /usr/share/keyrings/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg +echo "deb [signed-by=/usr/share/keyrings/matrix-org-archive-keyring.gpg] https://packages.matrix.org/debian/ $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/matrix-org.list -curl "https://matrix.org/packages/debian/repo-key.asc" | - sudo apt-key add - sudo apt update sudo apt install matrix-synapse-py3 ``` +For Debian 8 (Jessie): + +``` +sudo apt install -y lsb-release wget apt-transport-https +sudo wget -O /etc/apt/trusted.gpg.d/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg +echo "deb [signed-by=5586CCC0CBBBEFC7A25811ADF473DD4473365DE1] https://packages.matrix.org/debian/ $(lsb_release -cs) main" | + sudo tee /etc/apt/sources.list.d/matrix-org.list +sudo apt update +sudo apt install matrix-synapse-py3 +``` + +**Note**: if you followed a previous version of these instructions which +recommended using `apt-key add` to add an old key from +`https://matrix.org/packages/debian/`, you should note that this key has been +revoked. You should remove the old key with `sudo apt-key remove +C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to +update your configuration. + + #### Downstream Debian/Ubuntu packages For `buster` and `sid`, Synapse is available in the Debian repositories and From f8826d31cdc974552351bfa7d14ad40bb225d3e7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 18 Apr 2019 14:46:08 +0100 Subject: [PATCH 038/429] Don't crash on lack of expiry templates --- changelog.d/5077.bugfix | 1 + synapse/config/emailconfig.py | 8 ++++++-- 2 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5077.bugfix diff --git a/changelog.d/5077.bugfix b/changelog.d/5077.bugfix new file mode 100644 index 0000000000..f3345a6353 --- /dev/null +++ b/changelog.d/5077.bugfix @@ -0,0 +1 @@ +Don't crash on lack of expiry templates. diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 60827be72f..342a6ce5fd 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -71,8 +71,12 @@ class EmailConfig(Config): self.email_notif_from = email_config["notif_from"] self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] - self.email_expiry_template_html = email_config["expiry_template_html"] - self.email_expiry_template_text = email_config["expiry_template_text"] + self.email_expiry_template_html = email_config.get( + "expiry_template_html", "notice_expiry.html", + ) + self.email_expiry_template_text = email_config.get( + "expiry_template_text", "notice_expiry.txt", + ) template_dir = email_config.get("template_dir") # we need an absolute path, because we change directory after starting (and From 3d26eae14a69c5a3b11c6f25b81c883dcdb2098d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 22 Apr 2019 14:36:51 +0100 Subject: [PATCH 039/429] add gpg key fingerprint --- INSTALL.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/INSTALL.md b/INSTALL.md index d55a1f89ac..1032a2c68e 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -281,6 +281,8 @@ sudo apt update sudo apt install matrix-synapse-py3 ``` +The fingerprint of the repository signing key is AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058. + **Note**: if you followed a previous version of these instructions which recommended using `apt-key add` to add an old key from `https://matrix.org/packages/debian/`, you should note that this key has been From 7f025eb425bae8a48b25a230d17c25ccb67cbe2d Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 12:59:00 -0400 Subject: [PATCH 040/429] Show heroes if room name or canonical alias are empty Fixes #4194 Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 153312e39f..f1a436011e 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -583,19 +583,17 @@ class SyncHandler(object): ) # if the room has a name or canonical_alias set, we can skip - # calculating heroes. we assume that if the event has contents, it'll - # be a valid name or canonical_alias - i.e. we're checking that they - # haven't been "deleted" by blatting {} over the top. + # calculating heroes. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content: + if name and name.content and name.content.name: defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content: + if canonical_alias and canonical_alias.content and canonical_alias.content.alias: defer.returnValue(summary) joined_user_ids = [ From 0a4c135f68249c2345449bee6aa7d7557a61179b Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 13:03:59 -0400 Subject: [PATCH 041/429] Add changelog.d/5084.bugfix Signed-off-by: Katie Wolfe --- changelog.d/5084.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5084.bugfix diff --git a/changelog.d/5084.bugfix b/changelog.d/5084.bugfix new file mode 100644 index 0000000000..d5e135cde8 --- /dev/null +++ b/changelog.d/5084.bugfix @@ -0,0 +1 @@ +Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty From 60041eac4b88cad00c1d3da8b90735dab2f0b82b Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 13:10:57 -0400 Subject: [PATCH 042/429] Add full stop to 5084.bugfix Signed-off-by: Katie Wolfe --- changelog.d/5084.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5084.bugfix b/changelog.d/5084.bugfix index d5e135cde8..9d8434460c 100644 --- a/changelog.d/5084.bugfix +++ b/changelog.d/5084.bugfix @@ -1 +1 @@ -Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty +Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. From 5d3ed79944d47f0c9d9c040bf71530eade23e19c Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Mon, 22 Apr 2019 12:59:00 -0400 Subject: [PATCH 043/429] Show heroes if room name or canonical alias are empty Fixes #4194 Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f1a436011e..b3e6be6dd2 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -583,17 +583,18 @@ class SyncHandler(object): ) # if the room has a name or canonical_alias set, we can skip - # calculating heroes. + # calculating heroes. Empty strings are falsey, so we check + # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content and name.content.name: + if name and name.content and "name" in name.content and name.content.get("name", ""): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content and canonical_alias.content.alias: + if canonical_alias and canonical_alias.content and canonical_alias.content.get("alias", ""): defer.returnValue(summary) joined_user_ids = [ From b3e5db402dd96271b5a81d7f4c3191c48ac81e6c Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Wed, 24 Apr 2019 12:04:16 -0400 Subject: [PATCH 044/429] Clean up code Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b3e6be6dd2..9f93a7f2da 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -587,14 +587,15 @@ class SyncHandler(object): # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content and "name" in name.content and name.content.get("name", ""): + if name and name.content and name.content.get("name", ""): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content and canonical_alias.content.get("alias", ""): + if (canonical_alias and canonical_alias.content + and canonical_alias.content.get("alias", "")): defer.returnValue(summary) joined_user_ids = [ From 6b2b9a58c44c9c2f7efc45e8b588cde17fcdb0d4 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 25 Apr 2019 02:37:33 +1000 Subject: [PATCH 045/429] Prevent "producer not unregistered" message (#5009) --- changelog.d/5009.bugfix | 1 + synapse/rest/media/v1/_base.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/5009.bugfix diff --git a/changelog.d/5009.bugfix b/changelog.d/5009.bugfix new file mode 100644 index 0000000000..e66d3dd1aa --- /dev/null +++ b/changelog.d/5009.bugfix @@ -0,0 +1 @@ +Clients timing out/disappearing while downloading from the media repository will now no longer log a spurious "Producer was not unregistered" message. \ No newline at end of file diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index e2b5df701d..2dcc8f74d6 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -191,6 +191,10 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam # in that case. logger.warning("Failed to write to consumer: %s %s", type(e), e) + # Unregister the producer, if it has one, so Twisted doesn't complain + if request.producer: + request.unregisterProducer() + finish_request(request) From 7e07dc429fb6b0207c1725ec0770d5f859623201 Mon Sep 17 00:00:00 2001 From: Katie Wolfe Date: Wed, 24 Apr 2019 12:43:18 -0400 Subject: [PATCH 046/429] Lint I probably should've just run autopep8 in the first place... Signed-off-by: Katie Wolfe --- synapse/handlers/sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 9f93a7f2da..7cf757f65a 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -595,7 +595,7 @@ class SyncHandler(object): canonical_alias_id, allow_none=True, ) if (canonical_alias and canonical_alias.content - and canonical_alias.content.get("alias", "")): + and canonical_alias.content.get("alias", "")): defer.returnValue(summary) joined_user_ids = [ From 788163e2048b560e62005ad3da5ba274ba2726e9 Mon Sep 17 00:00:00 2001 From: Michael Kaye <1917473+michaelkaye@users.noreply.github.com> Date: Wed, 24 Apr 2019 17:44:06 +0100 Subject: [PATCH 047/429] Remove log error for .well-known/matrix/client (#4972) --- changelog.d/4972.misc | 1 + synapse/rest/well_known.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/4972.misc diff --git a/changelog.d/4972.misc b/changelog.d/4972.misc new file mode 100644 index 0000000000..e104a9bb34 --- /dev/null +++ b/changelog.d/4972.misc @@ -0,0 +1 @@ +Reduce log level of .well-known/matrix/client responses. diff --git a/synapse/rest/well_known.py b/synapse/rest/well_known.py index ab901e63f2..a7fa4f39af 100644 --- a/synapse/rest/well_known.py +++ b/synapse/rest/well_known.py @@ -68,6 +68,6 @@ class WellKnownResource(Resource): request.setHeader(b"Content-Type", b"text/plain") return b'.well-known not available' - logger.error("returning: %s", r) + logger.debug("returning: %s", r) request.setHeader(b"Content-Type", b"application/json") return json.dumps(r).encode("utf-8") From 6824ddd93df1cfc347e4c8f423d54fab5bb732fb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 25 Apr 2019 06:22:49 -0700 Subject: [PATCH 048/429] Config option for verifying federation certificates (MSC 1711) (#4967) --- changelog.d/4967.feature | 1 + docs/MSC1711_certificates_FAQ.md | 1 - docs/sample_config.yaml | 34 +++++++ synapse/config/server.py | 6 +- synapse/config/tls.py | 95 +++++++++++++++++-- synapse/crypto/context_factory.py | 33 +++++-- .../federation/matrix_federation_agent.py | 2 +- .../test_matrix_federation_agent.py | 3 +- 8 files changed, 158 insertions(+), 17 deletions(-) create mode 100644 changelog.d/4967.feature diff --git a/changelog.d/4967.feature b/changelog.d/4967.feature new file mode 100644 index 0000000000..7f9f81f849 --- /dev/null +++ b/changelog.d/4967.feature @@ -0,0 +1 @@ +Implementation of [MSC1711](https://github.com/matrix-org/matrix-doc/pull/1711) including config options for requiring valid TLS certificates for federation traffic, the ability to disable TLS validation for specific domains, and the ability to specify your own list of CA certificates. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 8eb22656db..ebfb20f5c8 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -177,7 +177,6 @@ You can do this with a `.well-known` file as follows: on `customer.example.net:8000` it correctly handles HTTP requests with Host header set to `customer.example.net:8000`. - ## FAQ ### Synapse 0.99.0 has just been released, what do I need to do right now? diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ab02e8f20e..a7f6bf31ac 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -260,6 +260,40 @@ listeners: # #tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" +# Whether to verify TLS certificates when sending federation traffic. +# +# This currently defaults to `false`, however this will change in +# Synapse 1.0 when valid federation certificates will be required. +# +#federation_verify_certificates: true + +# Skip federation certificate verification on the following whitelist +# of domains. +# +# This setting should only be used in very specific cases, such as +# federation over Tor hidden services and similar. For private networks +# of homeservers, you likely want to use a private CA instead. +# +# Only effective if federation_verify_certicates is `true`. +# +#federation_certificate_verification_whitelist: +# - lon.example.com +# - *.domain.com +# - *.onion + +# List of custom certificate authorities for federation traffic. +# +# This setting should only normally be used within a private network of +# homeservers. +# +# Note that this list will replace those that are provided by your +# operating environment. Certificates must be in PEM format. +# +#federation_custom_ca_list: +# - myCA1.pem +# - myCA2.pem +# - myCA3.pem + # ACME support: This will configure Synapse to request a valid TLS certificate # for your configured `server_name` via Let's Encrypt. # diff --git a/synapse/config/server.py b/synapse/config/server.py index c5e5679d52..cdf1e4d286 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -114,11 +114,13 @@ class ServerConfig(Config): # FIXME: federation_domain_whitelist needs sytests self.federation_domain_whitelist = None federation_domain_whitelist = config.get( - "federation_domain_whitelist", None + "federation_domain_whitelist", None, ) - # turn the whitelist into a hash for speed of lookup + if federation_domain_whitelist is not None: + # turn the whitelist into a hash for speed of lookup self.federation_domain_whitelist = {} + for domain in federation_domain_whitelist: self.federation_domain_whitelist[domain] = True diff --git a/synapse/config/tls.py b/synapse/config/tls.py index f0014902da..72dd5926f9 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -24,8 +24,10 @@ import six from unpaddedbase64 import encode_base64 from OpenSSL import crypto +from twisted.internet._sslverify import Certificate, trustRootFromCertificates from synapse.config._base import Config, ConfigError +from synapse.util import glob_to_regex logger = logging.getLogger(__name__) @@ -70,6 +72,53 @@ class TlsConfig(Config): self.tls_fingerprints = list(self._original_tls_fingerprints) + # Whether to verify certificates on outbound federation traffic + self.federation_verify_certificates = config.get( + "federation_verify_certificates", False, + ) + + # Whitelist of domains to not verify certificates for + fed_whitelist_entries = config.get( + "federation_certificate_verification_whitelist", [], + ) + + # Support globs (*) in whitelist values + self.federation_certificate_verification_whitelist = [] + for entry in fed_whitelist_entries: + # Convert globs to regex + entry_regex = glob_to_regex(entry) + self.federation_certificate_verification_whitelist.append(entry_regex) + + # List of custom certificate authorities for federation traffic validation + custom_ca_list = config.get( + "federation_custom_ca_list", None, + ) + + # Read in and parse custom CA certificates + self.federation_ca_trust_root = None + if custom_ca_list is not None: + if len(custom_ca_list) == 0: + # A trustroot cannot be generated without any CA certificates. + # Raise an error if this option has been specified without any + # corresponding certificates. + raise ConfigError("federation_custom_ca_list specified without " + "any certificate files") + + certs = [] + for ca_file in custom_ca_list: + logger.debug("Reading custom CA certificate file: %s", ca_file) + content = self.read_file(ca_file) + + # Parse the CA certificates + try: + cert_base = Certificate.loadPEM(content) + certs.append(cert_base) + except Exception as e: + raise ConfigError("Error parsing custom CA certificate file %s: %s" + % (ca_file, e)) + + self.federation_ca_trust_root = trustRootFromCertificates(certs) + # This config option applies to non-federation HTTP clients # (e.g. for talking to recaptcha, identity servers, and such) # It should never be used in production, and is intended for @@ -99,15 +148,15 @@ class TlsConfig(Config): try: with open(self.tls_certificate_file, 'rb') as f: cert_pem = f.read() - except Exception: - logger.exception("Failed to read existing certificate off disk!") - raise + except Exception as e: + raise ConfigError("Failed to read existing certificate file %s: %s" + % (self.tls_certificate_file, e)) try: tls_certificate = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem) - except Exception: - logger.exception("Failed to parse existing certificate off disk!") - raise + except Exception as e: + raise ConfigError("Failed to parse existing certificate file %s: %s" + % (self.tls_certificate_file, e)) if not allow_self_signed: if tls_certificate.get_subject() == tls_certificate.get_issuer(): @@ -192,6 +241,40 @@ class TlsConfig(Config): # #tls_private_key_path: "%(tls_private_key_path)s" + # Whether to verify TLS certificates when sending federation traffic. + # + # This currently defaults to `false`, however this will change in + # Synapse 1.0 when valid federation certificates will be required. + # + #federation_verify_certificates: true + + # Skip federation certificate verification on the following whitelist + # of domains. + # + # This setting should only be used in very specific cases, such as + # federation over Tor hidden services and similar. For private networks + # of homeservers, you likely want to use a private CA instead. + # + # Only effective if federation_verify_certicates is `true`. + # + #federation_certificate_verification_whitelist: + # - lon.example.com + # - *.domain.com + # - *.onion + + # List of custom certificate authorities for federation traffic. + # + # This setting should only normally be used within a private network of + # homeservers. + # + # Note that this list will replace those that are provided by your + # operating environment. Certificates must be in PEM format. + # + #federation_custom_ca_list: + # - myCA1.pem + # - myCA2.pem + # - myCA3.pem + # ACME support: This will configure Synapse to request a valid TLS certificate # for your configured `server_name` via Let's Encrypt. # diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 49cbc7098f..59ea087e66 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -18,10 +18,10 @@ import logging from zope.interface import implementer from OpenSSL import SSL, crypto -from twisted.internet._sslverify import _defaultCurveName +from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName from twisted.internet.abstract import isIPAddress, isIPv6Address from twisted.internet.interfaces import IOpenSSLClientConnectionCreator -from twisted.internet.ssl import CertificateOptions, ContextFactory +from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust from twisted.python.failure import Failure logger = logging.getLogger(__name__) @@ -90,7 +90,7 @@ def _tolerateErrors(wrapped): @implementer(IOpenSSLClientConnectionCreator) -class ClientTLSOptions(object): +class ClientTLSOptionsNoVerify(object): """ Client creator for TLS without certificate identity verification. This is a copy of twisted.internet._sslverify.ClientTLSOptions with the identity @@ -127,9 +127,30 @@ class ClientTLSOptionsFactory(object): to remote servers for federation.""" def __init__(self, config): - # We don't use config options yet - self._options = CertificateOptions(verify=False) + self._config = config + self._options_noverify = CertificateOptions() + + # Check if we're using a custom list of a CA certificates + trust_root = config.federation_ca_trust_root + if trust_root is None: + # Use CA root certs provided by OpenSSL + trust_root = platformTrust() + + self._options_verify = CertificateOptions(trustRoot=trust_root) def get_options(self, host): # Use _makeContext so that we get a fresh OpenSSL CTX each time. - return ClientTLSOptions(host, self._options._makeContext()) + + # Check if certificate verification has been enabled + should_verify = self._config.federation_verify_certificates + + # Check if we've disabled certificate verification for this host + if should_verify: + for regex in self._config.federation_certificate_verification_whitelist: + if regex.match(host): + should_verify = False + break + + if should_verify: + return ClientTLSOptions(host, self._options_verify._makeContext()) + return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext()) diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 1334c630cc..b4cbe97b41 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -149,7 +149,7 @@ class MatrixFederationAgent(object): tls_options = None else: tls_options = self._tls_client_options_factory.get_options( - res.tls_server_name.decode("ascii") + res.tls_server_name.decode("ascii"), ) # make sure that the Host header is set correctly diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index dcf184d3cf..e9eb662c4c 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -39,6 +39,7 @@ from synapse.util.logcontext import LoggingContext from tests.http import ServerTLSContext from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase +from tests.utils import default_config logger = logging.getLogger(__name__) @@ -53,7 +54,7 @@ class MatrixFederationAgentTests(TestCase): self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(None), + tls_client_options_factory=ClientTLSOptionsFactory(default_config("test")), _well_known_tls_policy=TrustingTLSPolicyForHTTPS(), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, From 4a9a118a94748ce287741f207f2077c6595ff178 Mon Sep 17 00:00:00 2001 From: *=0=1=4=* <21157384+f35f0ef9d0e827dae86552d3899f78fc@users.noreply.github.com> Date: Thu, 25 Apr 2019 08:47:22 -0500 Subject: [PATCH 049/429] Fix handling of SYNAPSE_NO_TLS in docker image (#5005) --- changelog.d/5005.misc | 1 + docker/README.md | 5 +++-- docker/start.py | 12 ++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5005.misc diff --git a/changelog.d/5005.misc b/changelog.d/5005.misc new file mode 100644 index 0000000000..f74147b352 --- /dev/null +++ b/changelog.d/5005.misc @@ -0,0 +1 @@ +Convert SYNAPSE_NO_TLS Docker variable to boolean for user friendliness. Contributed by Gabriel Eckerson. diff --git a/docker/README.md b/docker/README.md index b48d74e09c..b27a692d5b 100644 --- a/docker/README.md +++ b/docker/README.md @@ -102,8 +102,9 @@ when ``SYNAPSE_CONFIG_PATH`` is not set. * ``SYNAPSE_SERVER_NAME`` (mandatory), the server public hostname. * ``SYNAPSE_REPORT_STATS``, (mandatory, ``yes`` or ``no``), enable anonymous statistics reporting back to the Matrix project which helps us to get funding. -* ``SYNAPSE_NO_TLS``, set this variable to disable TLS in Synapse (use this if - you run your own TLS-capable reverse proxy). +* `SYNAPSE_NO_TLS`, (accepts `true`, `false`, `on`, `off`, `1`, `0`, `yes`, `no`]): disable + TLS in Synapse (use this if you run your own TLS-capable reverse proxy). Defaults + to `false` (ie, TLS is enabled by default). * ``SYNAPSE_ENABLE_REGISTRATION``, set this variable to enable registration on the Synapse instance. * ``SYNAPSE_ALLOW_GUEST``, set this variable to allow guest joining this server. diff --git a/docker/start.py b/docker/start.py index 941d9996a8..2da555272a 100755 --- a/docker/start.py +++ b/docker/start.py @@ -59,6 +59,18 @@ else: if not os.path.exists("/compiled"): os.mkdir("/compiled") config_path = "/compiled/homeserver.yaml" + + # Convert SYNAPSE_NO_TLS to boolean if exists + if "SYNAPSE_NO_TLS" in environ: + tlsanswerstring = str.lower(environ["SYNAPSE_NO_TLS"]) + if tlsanswerstring in ("true", "on", "1", "yes"): + environ["SYNAPSE_NO_TLS"] = True + else: + if tlsanswerstring in ("false", "off", "0", "no"): + environ["SYNAPSE_NO_TLS"] = False + else: + print("Environment variable \"SYNAPSE_NO_TLS\" found but value \"" + tlsanswerstring + "\" unrecognized; exiting.") + sys.exit(2) convert("/conf/homeserver.yaml", config_path, environ) convert("/conf/log.config", "/compiled/log.config", environ) From 00714e5102523829eb20129567152667df154d5b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Apr 2019 14:52:19 +0100 Subject: [PATCH 050/429] set PIP_USE_PEP517 = False for tests pip 19.1 otherwise complains about "editable mode is not supported for pyproject.toml-style projects" --- tox.ini | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tox.ini b/tox.ini index ef543890f9..d0e519ce46 100644 --- a/tox.ini +++ b/tox.ini @@ -24,6 +24,11 @@ deps = pip>=10 setenv = + # we have a pyproject.toml, but don't want pip to use it for building. + # (otherwise we get an error about 'editable mode is not supported for + # pyproject.toml-style projects'). + PIP_USE_PEP517 = false + PYTHONDONTWRITEBYTECODE = no_byte_code COVERAGE_PROCESS_START = {toxinidir}/.coveragerc From afe560b07235ef002461893dc467c80260e85c56 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Apr 2019 14:54:54 +0100 Subject: [PATCH 051/429] Add --no-pep-517 to README instructions --- README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 24afb93d7d..5409f0c563 100644 --- a/README.rst +++ b/README.rst @@ -173,7 +173,7 @@ Synapse offers two database engines: * `PostgreSQL `_ By default Synapse uses SQLite in and doing so trades performance for convenience. -SQLite is only recommended in Synapse for testing purposes or for servers with +SQLite is only recommended in Synapse for testing purposes or for servers with light workloads. Almost all installations should opt to use PostreSQL. Advantages include: @@ -272,7 +272,7 @@ to install using pip and a virtualenv:: virtualenv -p python3 env source env/bin/activate - python -m pip install -e .[all] + python -m pip install --no-pep-517 -e .[all] This will run a process of downloading and installing all the needed dependencies into a virtual env. From e86d74d748af614c3a7e4d8e9eb7968c50e37aa8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Apr 2019 14:55:33 +0100 Subject: [PATCH 052/429] Changelog --- changelog.d/5098.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5098.misc diff --git a/changelog.d/5098.misc b/changelog.d/5098.misc new file mode 100644 index 0000000000..9cd83bf226 --- /dev/null +++ b/changelog.d/5098.misc @@ -0,0 +1 @@ +Add workarounds for pep-517 install errors. From 7ca638c76135d7a0f86f6aa7981554bbe0b7a335 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Apr 2019 20:53:10 +0100 Subject: [PATCH 053/429] Clarify logging when PDU signature checking fails --- synapse/crypto/keyring.py | 4 +--- synapse/federation/federation_base.py | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index ed2e994437..b6d1b4cf0b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -120,11 +120,9 @@ class Keyring(object): key_ids = signature_ids(json_object, server_name) if not key_ids: - logger.warn("Request from %s: no supported signature keys", - server_name) deferred = defer.fail(SynapseError( 400, - "Not signed with a supported algorithm", + "Not signed by %s" % (server_name, ), Codes.UNAUTHORIZED, )) else: diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index dfe6b4aa5c..1d641337da 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -269,7 +269,17 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): for p in pdus_to_check_sender ]) + def sender_err(e, pdu_to_check): + logger.warning( + "event id %s: unable to verify signature for sender %s: %s", + pdu_to_check.pdu.event_id, + pdu_to_check.sender_domain, + e, + ) + return e + for p, d in zip(pdus_to_check_sender, more_deferreds): + d.addErrback(sender_err, p) p.deferreds.append(d) # now let's look for events where the sender's domain is different to the @@ -291,7 +301,16 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): for p in pdus_to_check_event_id ]) + def event_err(e, pdu_to_check): + logger.warning( + "event id %s: unable to verify signature for event id domain: %s", + pdu_to_check.pdu.event_id, + e, + ) + return e + for p, d in zip(pdus_to_check_event_id, more_deferreds): + d.addErrback(event_err, p) p.deferreds.append(d) # replace lists of deferreds with single Deferreds From fd8fb32bdd70b0ad68666e0f39a95ac90f9b1c27 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Apr 2019 21:08:12 +0100 Subject: [PATCH 054/429] remove extraneous exception logging --- synapse/crypto/keyring.py | 48 ++++++++++++++++++--------- synapse/federation/federation_base.py | 4 +-- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index b6d1b4cf0b..d8ba870cca 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -114,38 +114,54 @@ class Keyring(object): server_name. The deferreds run their callbacks in the sentinel logcontext. """ + # a list of VerifyKeyRequests verify_requests = [] + handle = preserve_fn(_handle_key_deferred) - for server_name, json_object in server_and_json: + def process(server_name, json_object): + """Process an entry in the request list + Given a (server_name, json_object) pair from the request list, + adds a key request to verify_requests, and returns a deferred which will + complete or fail (in the sentinel context) when verification completes. + """ key_ids = signature_ids(json_object, server_name) + if not key_ids: - deferred = defer.fail(SynapseError( - 400, - "Not signed by %s" % (server_name, ), - Codes.UNAUTHORIZED, - )) - else: - deferred = defer.Deferred() + return defer.fail( + SynapseError( + 400, + "Not signed by %s" % (server_name,), + Codes.UNAUTHORIZED, + ) + ) logger.debug("Verifying for %s with key_ids %s", server_name, key_ids) + # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, deferred + server_name, key_ids, json_object, defer.Deferred(), ) - verify_requests.append(verify_request) - run_in_background(self._start_key_lookups, verify_requests) + # now run _handle_key_deferred, which will wait for the key request + # to complete and then do the verification. + # + # We want _handle_key_request to log to the right context, so we + # wrap it with preserve_fn (aka run_in_background) + return handle(verify_request) - # Pass those keys to handle_key_deferred so that the json object - # signatures can be verified - handle = preserve_fn(_handle_key_deferred) - return [ - handle(rq) for rq in verify_requests + results = [ + process(server_name, json_object) + for server_name, json_object in server_and_json ] + if verify_requests: + run_in_background(self._start_key_lookups, verify_requests) + + return results + @defer.inlineCallbacks def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 1d641337da..832e2bdb9b 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -274,7 +274,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): "event id %s: unable to verify signature for sender %s: %s", pdu_to_check.pdu.event_id, pdu_to_check.sender_domain, - e, + e.getErrorMessage(), ) return e @@ -305,7 +305,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): logger.warning( "event id %s: unable to verify signature for event id domain: %s", pdu_to_check.pdu.event_id, - e, + e.getErrorMessage(), ) return e From 837d7f85a9d0a479487cd6205a3982f3981e4276 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Apr 2019 22:17:59 +0100 Subject: [PATCH 055/429] more logging improvements --- synapse/federation/federation_base.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 832e2bdb9b..cffa831d80 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -270,13 +270,14 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): ]) def sender_err(e, pdu_to_check): - logger.warning( - "event id %s: unable to verify signature for sender %s: %s", + errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( pdu_to_check.pdu.event_id, pdu_to_check.sender_domain, e.getErrorMessage(), ) - return e + # XX not really sure if these are the right codes, but they are what + # we've done for ages + raise SynapseError(400, errmsg, Codes.UNAUTHORIZED) for p, d in zip(pdus_to_check_sender, more_deferreds): d.addErrback(sender_err, p) @@ -302,12 +303,14 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): ]) def event_err(e, pdu_to_check): - logger.warning( - "event id %s: unable to verify signature for event id domain: %s", - pdu_to_check.pdu.event_id, - e.getErrorMessage(), + errmsg = ( + "event id %s: unable to verify signature for event id domain: %s" % ( + pdu_to_check.pdu.event_id, + e.getErrorMessage(), + ) ) - return e + # XX as above: not really sure if these are the right codes + raise SynapseError(400, errmsg, Codes.UNAUTHORIZED) for p, d in zip(pdus_to_check_event_id, more_deferreds): d.addErrback(event_err, p) From 0962d3cdffd91dec7472661d81b21a35800eb8b5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 25 Apr 2019 23:05:06 +0100 Subject: [PATCH 056/429] changelog --- changelog.d/5100.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5100.misc diff --git a/changelog.d/5100.misc b/changelog.d/5100.misc new file mode 100644 index 0000000000..db5eb1b156 --- /dev/null +++ b/changelog.d/5100.misc @@ -0,0 +1 @@ +Improve logging when event-signature checks fail. From bd0d45ca69587f4f258b738dfa3a55704838081e Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 26 Apr 2019 11:13:16 +0100 Subject: [PATCH 057/429] Fix infinite loop in presence handler Fixes #5102 --- changelog.d/5103.bugfix | 1 + synapse/handlers/presence.py | 5 +++++ synapse/storage/state_deltas.py | 18 ++++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 changelog.d/5103.bugfix diff --git a/changelog.d/5103.bugfix b/changelog.d/5103.bugfix new file mode 100644 index 0000000000..590d80d58f --- /dev/null +++ b/changelog.d/5103.bugfix @@ -0,0 +1 @@ +Fix bug where presence updates were sent to all servers in a room when a new server joined, rather than to just the new server. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index bd1285b15c..59d53f1050 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -828,6 +828,11 @@ class PresenceHandler(object): if typ != EventTypes.Member: continue + if event_id is None: + # state has been deleted, so this is not a join. We only care about + # joins. + continue + event = yield self.store.get_event(event_id) if event.content.get("membership") != Membership.JOIN: # We only care about joins diff --git a/synapse/storage/state_deltas.py b/synapse/storage/state_deltas.py index 56e42f583d..31a0279b18 100644 --- a/synapse/storage/state_deltas.py +++ b/synapse/storage/state_deltas.py @@ -22,6 +22,24 @@ logger = logging.getLogger(__name__) class StateDeltasStore(SQLBaseStore): def get_current_state_deltas(self, prev_stream_id): + """Fetch a list of room state changes since the given stream id + + Each entry in the result contains the following fields: + - stream_id (int) + - room_id (str) + - type (str): event type + - state_key (str): + - event_id (str|None): new event_id for this state key. None if the + state has been deleted. + - prev_event_id (str|None): previous event_id for this state key. None + if it's new state. + + Args: + prev_stream_id (int): point to get changes since (exclusive) + + Returns: + Deferred[list[dict]]: results + """ prev_stream_id = int(prev_stream_id) if not self._curr_state_delta_stream_cache.has_any_entity_changed( prev_stream_id From 28a81ed62fdf489001385514e666729d85deb4bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 26 Apr 2019 18:06:25 +0100 Subject: [PATCH 058/429] Ratelimit 3pid invites We do ratelimit sending the 3PID invite events, but that happens after spamming the identity server. --- synapse/handlers/room_member.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index e432740832..53e0103b5b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -33,6 +33,8 @@ from synapse.types import RoomID, UserID from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_joined_room, user_left_room +from ._base import BaseHandler + logger = logging.getLogger(__name__) id_server_scheme = "https://" @@ -71,6 +73,11 @@ class RoomMemberHandler(object): self.spam_checker = hs.get_spam_checker() self._server_notices_mxid = self.config.server_notices_mxid + # This is only used to get at ratelimit function, and + # maybe_kick_guest_users. It's fine there are multiple of these as + # it doesn't store state. + self.base_handler = BaseHandler(hs) + @abc.abstractmethod def _remote_join(self, requester, remote_room_hosts, room_id, user, content): """Try and join a room that this server is not in @@ -702,6 +709,9 @@ class RoomMemberHandler(object): Codes.FORBIDDEN, ) + # Check whether we'll be ratelimited + yield self.base_handler.ratelimit(requester, update=False) + invitee = yield self._lookup_3pid( id_server, medium, address ) From 19f0722b2cd4e1cc09602c04721f143c878ed974 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 26 Apr 2019 18:08:33 +0100 Subject: [PATCH 059/429] Newsfile --- changelog.d/5104.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5104.bugfix diff --git a/changelog.d/5104.bugfix b/changelog.d/5104.bugfix new file mode 100644 index 0000000000..f88aca8a40 --- /dev/null +++ b/changelog.d/5104.bugfix @@ -0,0 +1 @@ +Fix the ratelimting on third party invites. From 8c5b1e30d454f87fba22d16b59b7ff0a76cb4ca4 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 29 Apr 2019 15:40:31 -0600 Subject: [PATCH 060/429] Add a default .m.rule.tombstone push rule (#4867) * Add a default .m.rule.tombstone push rule In support of MSC1930: https://github.com/matrix-org/matrix-doc/pull/1930 * changelog * Appease the changelog linter --- changelog.d/4867.feature | 1 + synapse/push/baserules.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) create mode 100644 changelog.d/4867.feature diff --git a/changelog.d/4867.feature b/changelog.d/4867.feature new file mode 100644 index 0000000000..f5f9030e22 --- /dev/null +++ b/changelog.d/4867.feature @@ -0,0 +1 @@ +Add a default .m.rule.tombstone push rule. diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py index 8f0682c948..3523a40108 100644 --- a/synapse/push/baserules.py +++ b/synapse/push/baserules.py @@ -261,6 +261,23 @@ BASE_APPEND_OVERRIDE_RULES = [ 'value': True, } ] + }, + { + 'rule_id': 'global/override/.m.rule.tombstone', + 'conditions': [ + { + 'kind': 'event_match', + 'key': 'type', + 'pattern': 'm.room.tombstone', + '_id': '_tombstone', + } + ], + 'actions': [ + 'notify', { + 'set_tweak': 'highlight', + 'value': True, + } + ] } ] From d8e357b7cfc80de824ab10167c7472098506549f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 1 May 2019 11:34:22 +0100 Subject: [PATCH 061/429] Fix typo in account validity admin route --- docs/admin_api/account_validity.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst index 980ea23605..47439460fb 100644 --- a/docs/admin_api/account_validity.rst +++ b/docs/admin_api/account_validity.rst @@ -13,7 +13,7 @@ This API extends the validity of an account by as much time as configured in the The API is:: - POST /_matrix/client/unstable/account_validity/send_mail + POST /_matrix/client/unstable/account_validity/validity with the following body: From 031919dafbc1c4e8a4060e498c4585ee95b91397 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 1 May 2019 11:38:27 +0100 Subject: [PATCH 062/429] Fix whole path for admin route --- docs/admin_api/account_validity.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst index 47439460fb..c26353752f 100644 --- a/docs/admin_api/account_validity.rst +++ b/docs/admin_api/account_validity.rst @@ -13,7 +13,7 @@ This API extends the validity of an account by as much time as configured in the The API is:: - POST /_matrix/client/unstable/account_validity/validity + POST /_matrix/client/unstable/admin/account_validity/validity with the following body: From 803a28fd1d2a72a7757a88e80d8c588fb65bba9e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 1 May 2019 11:43:31 +0100 Subject: [PATCH 063/429] Add changelog --- changelog.d/5116.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5116.misc diff --git a/changelog.d/5116.misc b/changelog.d/5116.misc new file mode 100644 index 0000000000..dcbf7c1fb8 --- /dev/null +++ b/changelog.d/5116.misc @@ -0,0 +1 @@ + Add time-based account expiration. From 6aad81ec0c0c411978980eadd2f8a2ebc0f75cb1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 1 May 2019 11:50:15 +0100 Subject: [PATCH 064/429] Rename changelog file --- changelog.d/{5116.misc => 5116.feature} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{5116.misc => 5116.feature} (100%) diff --git a/changelog.d/5116.misc b/changelog.d/5116.feature similarity index 100% rename from changelog.d/5116.misc rename to changelog.d/5116.feature From 579b637b6c52991f59bbcf7d836587218e42e744 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 May 2019 10:40:33 +0100 Subject: [PATCH 065/429] Move admin API away from ClientV1RestServlet --- synapse/rest/client/v1/admin.py | 66 ++++++++++++++++----------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 0a1e233b23..8efa457d15 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -28,6 +28,7 @@ import synapse from synapse.api.constants import Membership, UserTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.servlet import ( + RestServlet, assert_params_in_dict, parse_integer, parse_json_object_from_request, @@ -36,16 +37,17 @@ from synapse.http.servlet import ( from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string -from .base import ClientV1RestServlet, client_path_patterns +from .base import client_path_patterns logger = logging.getLogger(__name__) -class UsersRestServlet(ClientV1RestServlet): +class UsersRestServlet(RestServlet): PATTERNS = client_path_patterns("/admin/users/(?P[^/]*)") def __init__(self, hs): - super(UsersRestServlet, self).__init__(hs) + self.hs = hs + self.auth = hs.get_auth() self.handlers = hs.get_handlers() @defer.inlineCallbacks @@ -69,9 +71,12 @@ class UsersRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) -class VersionServlet(ClientV1RestServlet): +class VersionServlet(RestServlet): PATTERNS = client_path_patterns("/admin/server_version") + def __init__(self, hs): + self.auth = hs.get_auth() + @defer.inlineCallbacks def on_GET(self, request): requester = yield self.auth.get_user_by_req(request) @@ -88,7 +93,7 @@ class VersionServlet(ClientV1RestServlet): defer.returnValue((200, ret)) -class UserRegisterServlet(ClientV1RestServlet): +class UserRegisterServlet(RestServlet): """ Attributes: NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted @@ -99,7 +104,6 @@ class UserRegisterServlet(ClientV1RestServlet): NONCE_TIMEOUT = 60 def __init__(self, hs): - super(UserRegisterServlet, self).__init__(hs) self.handlers = hs.get_handlers() self.reactor = hs.get_reactor() self.nonces = {} @@ -226,11 +230,12 @@ class UserRegisterServlet(ClientV1RestServlet): defer.returnValue((200, result)) -class WhoisRestServlet(ClientV1RestServlet): +class WhoisRestServlet(RestServlet): PATTERNS = client_path_patterns("/admin/whois/(?P[^/]*)") def __init__(self, hs): - super(WhoisRestServlet, self).__init__(hs) + self.hs = hs + self.auth = hs.get_auth() self.handlers = hs.get_handlers() @defer.inlineCallbacks @@ -251,12 +256,12 @@ class WhoisRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) -class PurgeMediaCacheRestServlet(ClientV1RestServlet): +class PurgeMediaCacheRestServlet(RestServlet): PATTERNS = client_path_patterns("/admin/purge_media_cache") def __init__(self, hs): self.media_repository = hs.get_media_repository() - super(PurgeMediaCacheRestServlet, self).__init__(hs) + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request): @@ -274,7 +279,7 @@ class PurgeMediaCacheRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) -class PurgeHistoryRestServlet(ClientV1RestServlet): +class PurgeHistoryRestServlet(RestServlet): PATTERNS = client_path_patterns( "/admin/purge_history/(?P[^/]*)(/(?P[^/]+))?" ) @@ -285,9 +290,9 @@ class PurgeHistoryRestServlet(ClientV1RestServlet): Args: hs (synapse.server.HomeServer) """ - super(PurgeHistoryRestServlet, self).__init__(hs) self.pagination_handler = hs.get_pagination_handler() self.store = hs.get_datastore() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request, room_id, event_id): @@ -371,7 +376,7 @@ class PurgeHistoryRestServlet(ClientV1RestServlet): })) -class PurgeHistoryStatusRestServlet(ClientV1RestServlet): +class PurgeHistoryStatusRestServlet(RestServlet): PATTERNS = client_path_patterns( "/admin/purge_history_status/(?P[^/]+)" ) @@ -382,8 +387,8 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet): Args: hs (synapse.server.HomeServer) """ - super(PurgeHistoryStatusRestServlet, self).__init__(hs) self.pagination_handler = hs.get_pagination_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, purge_id): @@ -400,12 +405,12 @@ class PurgeHistoryStatusRestServlet(ClientV1RestServlet): defer.returnValue((200, purge_status.asdict())) -class DeactivateAccountRestServlet(ClientV1RestServlet): +class DeactivateAccountRestServlet(RestServlet): PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)") def __init__(self, hs): - super(DeactivateAccountRestServlet, self).__init__(hs) self._deactivate_account_handler = hs.get_deactivate_account_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request, target_user_id): @@ -438,7 +443,7 @@ class DeactivateAccountRestServlet(ClientV1RestServlet): })) -class ShutdownRoomRestServlet(ClientV1RestServlet): +class ShutdownRoomRestServlet(RestServlet): """Shuts down a room by removing all local users from the room and blocking all future invites and joins to the room. Any local aliases will be repointed to a new room created by `new_room_user_id` and kicked users will be auto @@ -452,12 +457,13 @@ class ShutdownRoomRestServlet(ClientV1RestServlet): ) def __init__(self, hs): - super(ShutdownRoomRestServlet, self).__init__(hs) + self.hs = hs self.store = hs.get_datastore() self.state = hs.get_state_handler() self._room_creation_handler = hs.get_room_creation_handler() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request, room_id): @@ -564,15 +570,15 @@ class ShutdownRoomRestServlet(ClientV1RestServlet): })) -class QuarantineMediaInRoom(ClientV1RestServlet): +class QuarantineMediaInRoom(RestServlet): """Quarantines all media in a room so that no one can download it via this server. """ PATTERNS = client_path_patterns("/admin/quarantine_media/(?P[^/]+)") def __init__(self, hs): - super(QuarantineMediaInRoom, self).__init__(hs) self.store = hs.get_datastore() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request, room_id): @@ -588,13 +594,12 @@ class QuarantineMediaInRoom(ClientV1RestServlet): defer.returnValue((200, {"num_quarantined": num_quarantined})) -class ListMediaInRoom(ClientV1RestServlet): +class ListMediaInRoom(RestServlet): """Lists all of the media in a given room. """ PATTERNS = client_path_patterns("/admin/room/(?P[^/]+)/media") def __init__(self, hs): - super(ListMediaInRoom, self).__init__(hs) self.store = hs.get_datastore() @defer.inlineCallbacks @@ -609,7 +614,7 @@ class ListMediaInRoom(ClientV1RestServlet): defer.returnValue((200, {"local": local_mxcs, "remote": remote_mxcs})) -class ResetPasswordRestServlet(ClientV1RestServlet): +class ResetPasswordRestServlet(RestServlet): """Post request to allow an administrator reset password for a user. This needs user to have administrator access in Synapse. Example: @@ -626,7 +631,6 @@ class ResetPasswordRestServlet(ClientV1RestServlet): def __init__(self, hs): self.store = hs.get_datastore() - super(ResetPasswordRestServlet, self).__init__(hs) self.hs = hs self.auth = hs.get_auth() self._set_password_handler = hs.get_set_password_handler() @@ -653,7 +657,7 @@ class ResetPasswordRestServlet(ClientV1RestServlet): defer.returnValue((200, {})) -class GetUsersPaginatedRestServlet(ClientV1RestServlet): +class GetUsersPaginatedRestServlet(RestServlet): """Get request to get specific number of users from Synapse. This needs user to have administrator access in Synapse. Example: @@ -666,7 +670,6 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet): def __init__(self, hs): self.store = hs.get_datastore() - super(GetUsersPaginatedRestServlet, self).__init__(hs) self.hs = hs self.auth = hs.get_auth() self.handlers = hs.get_handlers() @@ -736,7 +739,7 @@ class GetUsersPaginatedRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) -class SearchUsersRestServlet(ClientV1RestServlet): +class SearchUsersRestServlet(RestServlet): """Get request to search user table for specific users according to search term. This needs user to have administrator access in Synapse. @@ -750,7 +753,6 @@ class SearchUsersRestServlet(ClientV1RestServlet): def __init__(self, hs): self.store = hs.get_datastore() - super(SearchUsersRestServlet, self).__init__(hs) self.hs = hs self.auth = hs.get_auth() self.handlers = hs.get_handlers() @@ -784,15 +786,15 @@ class SearchUsersRestServlet(ClientV1RestServlet): defer.returnValue((200, ret)) -class DeleteGroupAdminRestServlet(ClientV1RestServlet): +class DeleteGroupAdminRestServlet(RestServlet): """Allows deleting of local groups """ PATTERNS = client_path_patterns("/admin/delete_group/(?P[^/]*)") def __init__(self, hs): - super(DeleteGroupAdminRestServlet, self).__init__(hs) self.group_server = hs.get_groups_server_handler() self.is_mine_id = hs.is_mine_id + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request, group_id): @@ -809,7 +811,7 @@ class DeleteGroupAdminRestServlet(ClientV1RestServlet): defer.returnValue((200, {})) -class AccountValidityRenewServlet(ClientV1RestServlet): +class AccountValidityRenewServlet(RestServlet): PATTERNS = client_path_patterns("/admin/account_validity/validity$") def __init__(self, hs): @@ -817,8 +819,6 @@ class AccountValidityRenewServlet(ClientV1RestServlet): Args: hs (synapse.server.HomeServer): server """ - super(AccountValidityRenewServlet, self).__init__(hs) - self.hs = hs self.account_activity_handler = hs.get_account_validity_handler() self.auth = hs.get_auth() From 8e9ca8353730949290460f9183c5ee48941d1f75 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 May 2019 15:18:58 +0100 Subject: [PATCH 066/429] Move admin API to a new prefix --- docs/admin_api/account_validity.rst | 2 +- docs/admin_api/delete_group.md | 2 +- docs/admin_api/media_admin_api.md | 2 +- docs/admin_api/purge_history_api.rst | 4 +- docs/admin_api/purge_remote_media.rst | 2 +- docs/admin_api/register_api.rst | 4 +- docs/admin_api/user_admin_api.rst | 6 +-- docs/admin_api/version_api.rst | 2 +- synapse/app/homeserver.py | 2 + synapse/config/server.py | 4 +- synapse/rest/__init__.py | 12 ++++- synapse/rest/client/v1/admin.py | 70 +++++++++++++++++++-------- 12 files changed, 76 insertions(+), 36 deletions(-) diff --git a/docs/admin_api/account_validity.rst b/docs/admin_api/account_validity.rst index c26353752f..7559de4c57 100644 --- a/docs/admin_api/account_validity.rst +++ b/docs/admin_api/account_validity.rst @@ -13,7 +13,7 @@ This API extends the validity of an account by as much time as configured in the The API is:: - POST /_matrix/client/unstable/admin/account_validity/validity + POST /_synapse/admin/v1/account_validity/validity with the following body: diff --git a/docs/admin_api/delete_group.md b/docs/admin_api/delete_group.md index d703d108b0..1710488ea8 100644 --- a/docs/admin_api/delete_group.md +++ b/docs/admin_api/delete_group.md @@ -8,7 +8,7 @@ being deleted. The API is: ``` -POST /_matrix/client/r0/admin/delete_group/ +POST /_synapse/admin/v1/delete_group/ ``` including an `access_token` of a server admin. diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index abdbc1ea86..5e9f8e5d84 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -4,7 +4,7 @@ This API gets a list of known media in a room. The API is: ``` -GET /_matrix/client/r0/admin/room//media +GET /_synapse/admin/v1/room//media ``` including an `access_token` of a server admin. diff --git a/docs/admin_api/purge_history_api.rst b/docs/admin_api/purge_history_api.rst index a5c3dc8149..f7be226fd9 100644 --- a/docs/admin_api/purge_history_api.rst +++ b/docs/admin_api/purge_history_api.rst @@ -10,7 +10,7 @@ paginate further back in the room from the point being purged from. The API is: -``POST /_matrix/client/r0/admin/purge_history/[/]`` +``POST /_synapse/admin/v1/purge_history/[/]`` including an ``access_token`` of a server admin. @@ -49,7 +49,7 @@ Purge status query It is possible to poll for updates on recent purges with a second API; -``GET /_matrix/client/r0/admin/purge_history_status/`` +``GET /_synapse/admin/v1/purge_history_status/`` (again, with a suitable ``access_token``). This API returns a JSON body like the following: diff --git a/docs/admin_api/purge_remote_media.rst b/docs/admin_api/purge_remote_media.rst index 5deb02a3df..dacd5bc8fb 100644 --- a/docs/admin_api/purge_remote_media.rst +++ b/docs/admin_api/purge_remote_media.rst @@ -6,7 +6,7 @@ media. The API is:: - POST /_matrix/client/r0/admin/purge_media_cache?before_ts=&access_token= + POST /_synapse/admin/v1/purge_media_cache?before_ts=&access_token= {} diff --git a/docs/admin_api/register_api.rst b/docs/admin_api/register_api.rst index 084e74ebf5..3a63109aa0 100644 --- a/docs/admin_api/register_api.rst +++ b/docs/admin_api/register_api.rst @@ -12,7 +12,7 @@ is not enabled. To fetch the nonce, you need to request one from the API:: - > GET /_matrix/client/r0/admin/register + > GET /_synapse/admin/v1/register < {"nonce": "thisisanonce"} @@ -22,7 +22,7 @@ body containing the nonce, username, password, whether they are an admin As an example:: - > POST /_matrix/client/r0/admin/register + > POST /_synapse/admin/v1/register > { "nonce": "thisisanonce", "username": "pepper_roni", diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index d17121a188..8aca4f158d 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -5,7 +5,7 @@ This API returns information about a specific user account. The api is:: - GET /_matrix/client/r0/admin/whois/ + GET /_synapse/admin/v1/whois/ including an ``access_token`` of a server admin. @@ -50,7 +50,7 @@ references to it). The api is:: - POST /_matrix/client/r0/admin/deactivate/ + POST /_synapse/admin/v1/deactivate/ with a body of: @@ -73,7 +73,7 @@ Changes the password of another user. The api is:: - POST /_matrix/client/r0/admin/reset_password/ + POST /_synapse/admin/v1/reset_password/ with a body of: diff --git a/docs/admin_api/version_api.rst b/docs/admin_api/version_api.rst index 30a91b5f43..6d66543b96 100644 --- a/docs/admin_api/version_api.rst +++ b/docs/admin_api/version_api.rst @@ -8,7 +8,7 @@ contains Synapse version information). The api is:: - GET /_matrix/client/r0/admin/server_version + GET /_synapse/admin/v1/server_version including an ``access_token`` of a server admin. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 79be977ea6..3d7db61d14 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -62,6 +62,7 @@ from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource +from synapse.rest.client.v1.admin import AdminRestResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.rest.well_known import WellKnownResource @@ -180,6 +181,7 @@ class SynapseHomeServer(HomeServer): "/_matrix/client/v2_alpha": client_resource, "/_matrix/client/versions": client_resource, "/.well-known/matrix/client": WellKnownResource(self), + "/_synapse/admin": AdminRestResource(self), }) if self.get_config().saml2_enabled: diff --git a/synapse/config/server.py b/synapse/config/server.py index cdf1e4d286..cc232422b9 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -388,8 +388,8 @@ class ServerConfig(Config): # # Valid resource names are: # - # client: the client-server API (/_matrix/client). Also implies 'media' and - # 'static'. + # client: the client-server API (/_matrix/client), and the synapse admin + # API (/_synapse/admin). Also implies 'media' and 'static'. # # consent: user consent forms (/_matrix/consent). See # docs/consent_tracking.md. diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index a66885d349..6bc50f78e1 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -58,8 +58,14 @@ from synapse.rest.client.v2_alpha import ( class ClientRestResource(JsonResource): - """A resource for version 1 of the matrix client API.""" + """Matrix Client API REST resource. + This gets mounted at various points under /_matrix/client, including: + * /_matrix/client/r0 + * /_matrix/client/api/v1 + * /_matrix/client/unstable + * etc + """ def __init__(self, hs): JsonResource.__init__(self, hs, canonical_json=False) self.register_servlets(self, hs) @@ -82,7 +88,6 @@ class ClientRestResource(JsonResource): presence.register_servlets(hs, client_resource) directory.register_servlets(hs, client_resource) voip.register_servlets(hs, client_resource) - admin.register_servlets(hs, client_resource) pusher.register_servlets(hs, client_resource) push_rule.register_servlets(hs, client_resource) logout.register_servlets(hs, client_resource) @@ -111,3 +116,6 @@ class ClientRestResource(JsonResource): room_upgrade_rest_servlet.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) account_validity.register_servlets(hs, client_resource) + + # moving to /_synapse/admin + admin.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/client/v1/admin.py index 8efa457d15..d4f83e4ae8 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/client/v1/admin.py @@ -18,6 +18,7 @@ import hashlib import hmac import logging import platform +import re from six import text_type from six.moves import http_client @@ -27,6 +28,7 @@ from twisted.internet import defer import synapse from synapse.api.constants import Membership, UserTypes from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.http.server import JsonResource from synapse.http.servlet import ( RestServlet, assert_params_in_dict, @@ -37,13 +39,33 @@ from synapse.http.servlet import ( from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string -from .base import client_path_patterns - logger = logging.getLogger(__name__) +def historical_admin_path_patterns(path_regex): + """Returns the list of patterns for an admin endpoint, including historical ones + + This is a backwards-compatibility hack. Previously, the Admin API was exposed at + various paths under /_matrix/client. This function returns a list of patterns + matching those paths (as well as the new one), so that existing scripts which rely + on the endpoints being available there are not broken. + + Note that this should only be used for existing endpoints: new ones should just + register for the /_synapse/admin path. + """ + return list( + re.compile(prefix + path_regex) + for prefix in ( + "^/_synapse/admin/v1", + "^/_matrix/client/api/v1/admin", + "^/_matrix/client/unstable/admin", + "^/_matrix/client/r0/admin" + ) + ) + + class UsersRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/users/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)") def __init__(self, hs): self.hs = hs @@ -72,7 +94,7 @@ class UsersRestServlet(RestServlet): class VersionServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/server_version") + PATTERNS = historical_admin_path_patterns("/server_version") def __init__(self, hs): self.auth = hs.get_auth() @@ -100,7 +122,7 @@ class UserRegisterServlet(RestServlet): nonces (dict[str, int]): The nonces that we will accept. A dict of nonce to the time it was generated, in int seconds. """ - PATTERNS = client_path_patterns("/admin/register") + PATTERNS = historical_admin_path_patterns("/register") NONCE_TIMEOUT = 60 def __init__(self, hs): @@ -231,7 +253,7 @@ class UserRegisterServlet(RestServlet): class WhoisRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/whois/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/whois/(?P[^/]*)") def __init__(self, hs): self.hs = hs @@ -257,7 +279,7 @@ class WhoisRestServlet(RestServlet): class PurgeMediaCacheRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/purge_media_cache") + PATTERNS = historical_admin_path_patterns("/purge_media_cache") def __init__(self, hs): self.media_repository = hs.get_media_repository() @@ -280,8 +302,8 @@ class PurgeMediaCacheRestServlet(RestServlet): class PurgeHistoryRestServlet(RestServlet): - PATTERNS = client_path_patterns( - "/admin/purge_history/(?P[^/]*)(/(?P[^/]+))?" + PATTERNS = historical_admin_path_patterns( + "/purge_history/(?P[^/]*)(/(?P[^/]+))?" ) def __init__(self, hs): @@ -377,8 +399,8 @@ class PurgeHistoryRestServlet(RestServlet): class PurgeHistoryStatusRestServlet(RestServlet): - PATTERNS = client_path_patterns( - "/admin/purge_history_status/(?P[^/]+)" + PATTERNS = historical_admin_path_patterns( + "/purge_history_status/(?P[^/]+)" ) def __init__(self, hs): @@ -406,7 +428,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): class DeactivateAccountRestServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/deactivate/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/deactivate/(?P[^/]*)") def __init__(self, hs): self._deactivate_account_handler = hs.get_deactivate_account_handler() @@ -449,7 +471,7 @@ class ShutdownRoomRestServlet(RestServlet): to a new room created by `new_room_user_id` and kicked users will be auto joined to the new room. """ - PATTERNS = client_path_patterns("/admin/shutdown_room/(?P[^/]+)") + PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P[^/]+)") DEFAULT_MESSAGE = ( "Sharing illegal content on this server is not permitted and rooms in" @@ -574,7 +596,7 @@ class QuarantineMediaInRoom(RestServlet): """Quarantines all media in a room so that no one can download it via this server. """ - PATTERNS = client_path_patterns("/admin/quarantine_media/(?P[^/]+)") + PATTERNS = historical_admin_path_patterns("/quarantine_media/(?P[^/]+)") def __init__(self, hs): self.store = hs.get_datastore() @@ -597,7 +619,7 @@ class QuarantineMediaInRoom(RestServlet): class ListMediaInRoom(RestServlet): """Lists all of the media in a given room. """ - PATTERNS = client_path_patterns("/admin/room/(?P[^/]+)/media") + PATTERNS = historical_admin_path_patterns("/room/(?P[^/]+)/media") def __init__(self, hs): self.store = hs.get_datastore() @@ -627,7 +649,7 @@ class ResetPasswordRestServlet(RestServlet): Returns: 200 OK with empty object if success otherwise an error. """ - PATTERNS = client_path_patterns("/admin/reset_password/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/reset_password/(?P[^/]*)") def __init__(self, hs): self.store = hs.get_datastore() @@ -666,7 +688,7 @@ class GetUsersPaginatedRestServlet(RestServlet): Returns: 200 OK with json object {list[dict[str, Any]], count} or empty object. """ - PATTERNS = client_path_patterns("/admin/users_paginate/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/users_paginate/(?P[^/]*)") def __init__(self, hs): self.store = hs.get_datastore() @@ -749,7 +771,7 @@ class SearchUsersRestServlet(RestServlet): Returns: 200 OK with json object {list[dict[str, Any]], count} or empty object. """ - PATTERNS = client_path_patterns("/admin/search_users/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)") def __init__(self, hs): self.store = hs.get_datastore() @@ -789,7 +811,7 @@ class SearchUsersRestServlet(RestServlet): class DeleteGroupAdminRestServlet(RestServlet): """Allows deleting of local groups """ - PATTERNS = client_path_patterns("/admin/delete_group/(?P[^/]*)") + PATTERNS = historical_admin_path_patterns("/delete_group/(?P[^/]*)") def __init__(self, hs): self.group_server = hs.get_groups_server_handler() @@ -812,7 +834,7 @@ class DeleteGroupAdminRestServlet(RestServlet): class AccountValidityRenewServlet(RestServlet): - PATTERNS = client_path_patterns("/admin/account_validity/validity$") + PATTERNS = historical_admin_path_patterns("/account_validity/validity$") def __init__(self, hs): """ @@ -847,6 +869,14 @@ class AccountValidityRenewServlet(RestServlet): defer.returnValue((200, res)) +class AdminRestResource(JsonResource): + """The REST resource which gets mounted at /_synapse/admin""" + + def __init__(self, hs): + JsonResource.__init__(self, hs, canonical_json=False) + register_servlets(hs, self) + + def register_servlets(hs, http_server): WhoisRestServlet(hs).register(http_server) PurgeMediaCacheRestServlet(hs).register(http_server) From 40e576e29cf6f06d6b5244c5d1df34cf33b1f556 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 May 2019 15:32:38 +0100 Subject: [PATCH 067/429] Move admin api impl to its own package It doesn't really belong under rest/client/v1 any more. --- synapse/app/homeserver.py | 2 +- synapse/rest/__init__.py | 5 ++--- .../rest/{client/v1/admin.py => admin/__init__.py} | 2 +- tests/handlers/test_user_directory.py | 7 ++++--- tests/push/test_email.py | 5 +++-- tests/push/test_http.py | 5 +++-- tests/rest/admin/__init__.py | 14 ++++++++++++++ tests/rest/{client/v1 => admin}/test_admin.py | 11 ++++++----- tests/rest/client/test_consent.py | 5 +++-- tests/rest/client/test_identity.py | 5 +++-- tests/rest/client/v1/test_events.py | 5 +++-- tests/rest/client/v1/test_login.py | 5 +++-- tests/rest/client/v1/test_rooms.py | 5 +++-- tests/rest/client/v2_alpha/test_auth.py | 4 ++-- tests/rest/client/v2_alpha/test_capabilities.py | 6 +++--- tests/rest/client/v2_alpha/test_register.py | 7 ++++--- tests/rest/client/v2_alpha/test_sync.py | 5 +++-- tests/server_notices/test_consent.py | 6 +++--- tests/storage/test_client_ips.py | 5 +++-- 19 files changed, 67 insertions(+), 42 deletions(-) rename synapse/rest/{client/v1/admin.py => admin/__init__.py} (99%) create mode 100644 tests/rest/admin/__init__.py rename tests/rest/{client/v1 => admin}/test_admin.py (98%) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 3d7db61d14..1045d28949 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -62,7 +62,7 @@ from synapse.python_dependencies import check_requirements from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.rest import ClientRestResource -from synapse.rest.client.v1.admin import AdminRestResource +from synapse.rest.admin import AdminRestResource from synapse.rest.key.v2 import KeyApiV2Resource from synapse.rest.media.v0.content_repository import ContentRepoResource from synapse.rest.well_known import WellKnownResource diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 6bc50f78e1..e8e1bcddea 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -13,11 +13,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import synapse.rest.admin from synapse.http.server import JsonResource from synapse.rest.client import versions from synapse.rest.client.v1 import ( - admin, directory, events, initial_sync, @@ -118,4 +117,4 @@ class ClientRestResource(JsonResource): account_validity.register_servlets(hs, client_resource) # moving to /_synapse/admin - admin.register_servlets(hs, client_resource) + synapse.rest.admin.register_servlets(hs, client_resource) diff --git a/synapse/rest/client/v1/admin.py b/synapse/rest/admin/__init__.py similarity index 99% rename from synapse/rest/client/v1/admin.py rename to synapse/rest/admin/__init__.py index d4f83e4ae8..ddf9ced1a3 100644 --- a/synapse/rest/client/v1/admin.py +++ b/synapse/rest/admin/__init__.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index f1d0aa42b6..32ef83e9c0 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -14,8 +14,9 @@ # limitations under the License. from mock import Mock +import synapse.rest.admin from synapse.api.constants import UserTypes -from synapse.rest.client.v1 import admin, login, room +from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import user_directory from synapse.storage.roommember import ProfileInfo @@ -29,7 +30,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): servlets = [ login.register_servlets, - admin.register_servlets, + synapse.rest.admin.register_servlets, room.register_servlets, ] @@ -327,7 +328,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): user_directory.register_servlets, room.register_servlets, login.register_servlets, - admin.register_servlets, + synapse.rest.admin.register_servlets, ] def make_homeserver(self, reactor, clock): diff --git a/tests/push/test_email.py b/tests/push/test_email.py index be3fed8de3..e29bd18ad7 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -19,7 +19,8 @@ import pkg_resources from twisted.internet.defer import Deferred -from synapse.rest.client.v1 import admin, login, room +import synapse.rest.admin +from synapse.rest.client.v1 import login, room from tests.unittest import HomeserverTestCase @@ -33,7 +34,7 @@ class EmailPusherTests(HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, room.register_servlets, login.register_servlets, ] diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 6dc45e8506..3f9f56bb79 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -17,7 +17,8 @@ from mock import Mock from twisted.internet.defer import Deferred -from synapse.rest.client.v1 import admin, login, room +import synapse.rest.admin +from synapse.rest.client.v1 import login, room from synapse.util.logcontext import make_deferred_yieldable from tests.unittest import HomeserverTestCase @@ -32,7 +33,7 @@ class HTTPPusherTests(HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/admin/__init__.py b/tests/rest/admin/__init__.py new file mode 100644 index 0000000000..1453d04571 --- /dev/null +++ b/tests/rest/admin/__init__.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/rest/client/v1/test_admin.py b/tests/rest/admin/test_admin.py similarity index 98% rename from tests/rest/client/v1/test_admin.py rename to tests/rest/admin/test_admin.py index c00ef21d75..42858b5fea 100644 --- a/tests/rest/client/v1/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -19,8 +19,9 @@ import json from mock import Mock +import synapse.rest.admin from synapse.api.constants import UserTypes -from synapse.rest.client.v1 import admin, events, login, room +from synapse.rest.client.v1 import events, login, room from synapse.rest.client.v2_alpha import groups from tests import unittest @@ -29,7 +30,7 @@ from tests import unittest class VersionTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, ] @@ -62,7 +63,7 @@ class VersionTestCase(unittest.HomeserverTestCase): class UserRegisterTestCase(unittest.HomeserverTestCase): - servlets = [admin.register_servlets] + servlets = [synapse.rest.admin.register_servlets] def make_homeserver(self, reactor, clock): @@ -358,7 +359,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): class ShutdownRoomTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, events.register_servlets, room.register_servlets, @@ -495,7 +496,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): class DeleteGroupTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, groups.register_servlets, ] diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index 4294bbec2a..36e6c1c67d 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -15,8 +15,9 @@ import os +import synapse.rest.admin from synapse.api.urls import ConsentURIBuilder -from synapse.rest.client.v1 import admin, login, room +from synapse.rest.client.v1 import login, room from synapse.rest.consent import consent_resource from tests import unittest @@ -31,7 +32,7 @@ except Exception: class ConsentResourceTestCase(unittest.HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index ca63b2e6ed..d4fe0aee7d 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -15,7 +15,8 @@ import json -from synapse.rest.client.v1 import admin, login, room +import synapse.rest.admin +from synapse.rest.client.v1 import login, room from tests import unittest @@ -23,7 +24,7 @@ from tests import unittest class IdentityTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index 36d8547275..5cb1c1ae9f 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -17,7 +17,8 @@ from mock import Mock, NonCallableMock -from synapse.rest.client.v1 import admin, events, login, room +import synapse.rest.admin +from synapse.rest.client.v1 import events, login, room from tests import unittest @@ -28,7 +29,7 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): servlets = [ events.register_servlets, room.register_servlets, - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 86312f1096..8d9ef877f6 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -1,6 +1,7 @@ import json -from synapse.rest.client.v1 import admin, login +import synapse.rest.admin +from synapse.rest.client.v1 import login from tests import unittest @@ -10,7 +11,7 @@ LOGIN_URL = b"/_matrix/client/r0/login" class LoginRestServletTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 015c144248..1a34924f3e 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -22,8 +22,9 @@ from six.moves.urllib import parse as urlparse from twisted.internet import defer +import synapse.rest.admin from synapse.api.constants import Membership -from synapse.rest.client.v1 import admin, login, room +from synapse.rest.client.v1 import login, room from tests import unittest @@ -803,7 +804,7 @@ class RoomMessageListTestCase(RoomBase): class RoomSearchTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 7fa120a10f..67021185d0 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -16,8 +16,8 @@ from twisted.internet.defer import succeed +import synapse.rest.admin from synapse.api.constants import LoginType -from synapse.rest.client.v1 import admin from synapse.rest.client.v2_alpha import auth, register from tests import unittest @@ -27,7 +27,7 @@ class FallbackAuthTests(unittest.HomeserverTestCase): servlets = [ auth.register_servlets, - admin.register_servlets, + synapse.rest.admin.register_servlets, register.register_servlets, ] hijack_auth = False diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index bbfc77e829..8134163e20 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -12,9 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import synapse.rest.admin from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS -from synapse.rest.client.v1 import admin, login +from synapse.rest.client.v1 import login from synapse.rest.client.v2_alpha import capabilities from tests import unittest @@ -23,7 +23,7 @@ from tests import unittest class CapabilitiesTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, capabilities.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 3d44667489..4d698af03a 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -4,10 +4,11 @@ import os import pkg_resources +import synapse.rest.admin from synapse.api.constants import LoginType from synapse.api.errors import Codes from synapse.appservice import ApplicationService -from synapse.rest.client.v1 import admin, login +from synapse.rest.client.v1 import login from synapse.rest.client.v2_alpha import account_validity, register, sync from tests import unittest @@ -198,7 +199,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): servlets = [ register.register_servlets, - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, sync.register_servlets, account_validity.register_servlets, @@ -307,7 +308,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ register.register_servlets, - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, sync.register_servlets, account_validity.register_servlets, diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 99b716f00a..65fac1d5ce 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -15,7 +15,8 @@ from mock import Mock -from synapse.rest.client.v1 import admin, login, room +import synapse.rest.admin +from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import sync from tests import unittest @@ -72,7 +73,7 @@ class FilterTestCase(unittest.HomeserverTestCase): class SyncTypingTests(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets, room.register_servlets, login.register_servlets, sync.register_servlets, diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index 95badc985e..e8b8ac5725 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -12,8 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from synapse.rest.client.v1 import admin, login, room +import synapse.rest.admin +from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import sync from tests import unittest @@ -23,7 +23,7 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): servlets = [ sync.register_servlets, - admin.register_servlets, + synapse.rest.admin.register_servlets, login.register_servlets, room.register_servlets, ] diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 858efe4992..b0f6fd34d8 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -18,8 +18,9 @@ from mock import Mock from twisted.internet import defer +import synapse.rest.admin from synapse.http.site import XForwardedForRequest -from synapse.rest.client.v1 import admin, login +from synapse.rest.client.v1 import login from tests import unittest @@ -205,7 +206,7 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): class ClientIpAuthTestCase(unittest.HomeserverTestCase): - servlets = [admin.register_servlets, login.register_servlets] + servlets = [synapse.rest.admin.register_servlets, login.register_servlets] def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver() From 03ad6bd4832ce23ac8d2f5d1308cb2caefc651e2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 May 2019 15:34:38 +0100 Subject: [PATCH 068/429] changelog --- changelog.d/5119.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5119.feature diff --git a/changelog.d/5119.feature b/changelog.d/5119.feature new file mode 100644 index 0000000000..a3a73f09d3 --- /dev/null +++ b/changelog.d/5119.feature @@ -0,0 +1 @@ +Move admin APIs to `/_synapse/admin/v1`. (The old paths are retained for backwards-compatibility, for now). \ No newline at end of file From cc4bd762df35dfceb8250d85f400f9f691477eff Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 May 2019 16:48:23 +0100 Subject: [PATCH 069/429] Fix sample config --- docs/sample_config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index a7f6bf31ac..61be7502bc 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -136,8 +136,8 @@ pid_file: DATADIR/homeserver.pid # # Valid resource names are: # -# client: the client-server API (/_matrix/client). Also implies 'media' and -# 'static'. +# client: the client-server API (/_matrix/client), and the synapse admin +# API (/_synapse/admin). Also implies 'media' and 'static'. # # consent: user consent forms (/_matrix/consent). See # docs/consent_tracking.md. From f203c98794aa5344c1cbc26e526bd334fd80a1c2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 1 May 2019 17:49:11 +0100 Subject: [PATCH 070/429] fix examples --- synapse/rest/admin/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index ddf9ced1a3..afac8b1724 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -640,7 +640,7 @@ class ResetPasswordRestServlet(RestServlet): """Post request to allow an administrator reset password for a user. This needs user to have administrator access in Synapse. Example: - http://localhost:8008/_matrix/client/api/v1/admin/reset_password/ + http://localhost:8008/_synapse/admin/v1/reset_password/ @user:to_reset_password?access_token=admin_access_token JsonBodyToSend: { @@ -683,7 +683,7 @@ class GetUsersPaginatedRestServlet(RestServlet): """Get request to get specific number of users from Synapse. This needs user to have administrator access in Synapse. Example: - http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/ + http://localhost:8008/_synapse/admin/v1/users_paginate/ @admin:user?access_token=admin_access_token&start=0&limit=10 Returns: 200 OK with json object {list[dict[str, Any]], count} or empty object. @@ -731,7 +731,7 @@ class GetUsersPaginatedRestServlet(RestServlet): """Post request to get specific number of users from Synapse.. This needs user to have administrator access in Synapse. Example: - http://localhost:8008/_matrix/client/api/v1/admin/users_paginate/ + http://localhost:8008/_synapse/admin/v1/users_paginate/ @admin:user?access_token=admin_access_token JsonBodyToSend: { @@ -766,7 +766,7 @@ class SearchUsersRestServlet(RestServlet): search term. This needs user to have administrator access in Synapse. Example: - http://localhost:8008/_matrix/client/api/v1/admin/search_users/ + http://localhost:8008/_synapse/admin/v1/search_users/ @admin:user?access_token=admin_access_token&term=alice Returns: 200 OK with json object {list[dict[str, Any]], count} or empty object. From 0836cbb9f5aaf9260161fa998b2fe4c0ea7e692b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 2 May 2019 10:45:52 +0100 Subject: [PATCH 071/429] Factor out an "assert_requester_is_admin" function (#5120) Rather than copying-and-pasting the same four lines hundreds of times --- changelog.d/5120.misc | 1 + synapse/api/auth.py | 2 +- synapse/rest/admin/__init__.py | 95 +++++++--------------------------- synapse/rest/admin/_base.py | 59 +++++++++++++++++++++ 4 files changed, 81 insertions(+), 76 deletions(-) create mode 100644 changelog.d/5120.misc create mode 100644 synapse/rest/admin/_base.py diff --git a/changelog.d/5120.misc b/changelog.d/5120.misc new file mode 100644 index 0000000000..6575f29322 --- /dev/null +++ b/changelog.d/5120.misc @@ -0,0 +1 @@ +Factor out an "assert_requester_is_admin" function. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 960e66dbdc..0c6c93a87b 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -556,7 +556,7 @@ class Auth(object): """ Check if the given user is a local server admin. Args: - user (str): mxid of user to check + user (UserID): user to check Returns: bool: True if the user is an admin diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index afac8b1724..d02f5198b8 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -36,6 +36,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.rest.admin._base import assert_requester_is_admin, assert_user_is_admin from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string @@ -75,15 +76,7 @@ class UsersRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): target_user = UserID.from_string(user_id) - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") - - # To allow all users to get the users list - # if not is_admin and target_user != auth_user: - # raise AuthError(403, "You are not a server admin") + yield assert_requester_is_admin(self.auth, request) if not self.hs.is_mine(target_user): raise SynapseError(400, "Can only users a local user") @@ -101,11 +94,7 @@ class VersionServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request): - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_requester_is_admin(self.auth, request) ret = { 'server_version': get_version_string(synapse), @@ -265,10 +254,9 @@ class WhoisRestServlet(RestServlet): target_user = UserID.from_string(user_id) requester = yield self.auth.get_user_by_req(request) auth_user = requester.user - is_admin = yield self.auth.is_server_admin(requester.user) - if not is_admin and target_user != auth_user: - raise AuthError(403, "You are not a server admin") + if target_user != auth_user: + yield assert_user_is_admin(self.auth, auth_user) if not self.hs.is_mine(target_user): raise SynapseError(400, "Can only whois a local user") @@ -287,11 +275,7 @@ class PurgeMediaCacheRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_requester_is_admin(self.auth, request) before_ts = parse_integer(request, "before_ts", required=True) logger.info("before_ts: %r", before_ts) @@ -318,11 +302,7 @@ class PurgeHistoryRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, room_id, event_id): - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_requester_is_admin(self.auth, request) body = parse_json_object_from_request(request, allow_empty_body=True) @@ -414,11 +394,7 @@ class PurgeHistoryStatusRestServlet(RestServlet): @defer.inlineCallbacks def on_GET(self, request, purge_id): - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_requester_is_admin(self.auth, request) purge_status = self.pagination_handler.get_purge_status(purge_id) if purge_status is None: @@ -436,6 +412,7 @@ class DeactivateAccountRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, target_user_id): + yield assert_requester_is_admin(self.auth, request) body = parse_json_object_from_request(request, allow_empty_body=True) erase = body.get("erase", False) if not isinstance(erase, bool): @@ -446,11 +423,6 @@ class DeactivateAccountRestServlet(RestServlet): ) UserID.from_string(target_user_id) - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") result = yield self._deactivate_account_handler.deactivate_account( target_user_id, erase, @@ -490,9 +462,7 @@ class ShutdownRoomRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, room_id): requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_user_is_admin(self.auth, requester.user) content = parse_json_object_from_request(request) assert_params_in_dict(content, ["new_room_user_id"]) @@ -605,9 +575,7 @@ class QuarantineMediaInRoom(RestServlet): @defer.inlineCallbacks def on_POST(self, request, room_id): requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_user_is_admin(self.auth, requester.user) num_quarantined = yield self.store.quarantine_media_ids_in_room( room_id, requester.user.to_string(), @@ -662,12 +630,10 @@ class ResetPasswordRestServlet(RestServlet): """Post request to allow an administrator reset password for a user. This needs user to have administrator access in Synapse. """ - UserID.from_string(target_user_id) requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) + yield assert_user_is_admin(self.auth, requester.user) - if not is_admin: - raise AuthError(403, "You are not a server admin") + UserID.from_string(target_user_id) params = parse_json_object_from_request(request) assert_params_in_dict(params, ["new_password"]) @@ -701,16 +667,9 @@ class GetUsersPaginatedRestServlet(RestServlet): """Get request to get specific number of users from Synapse. This needs user to have administrator access in Synapse. """ + yield assert_requester_is_admin(self.auth, request) + target_user = UserID.from_string(target_user_id) - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") - - # To allow all users to get the users list - # if not is_admin and target_user != auth_user: - # raise AuthError(403, "You are not a server admin") if not self.hs.is_mine(target_user): raise SynapseError(400, "Can only users a local user") @@ -741,12 +700,8 @@ class GetUsersPaginatedRestServlet(RestServlet): Returns: 200 OK with json object {list[dict[str, Any]], count} or empty object. """ + yield assert_requester_is_admin(self.auth, request) UserID.from_string(target_user_id) - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") order = "name" # order by name in user table params = parse_json_object_from_request(request) @@ -785,12 +740,9 @@ class SearchUsersRestServlet(RestServlet): search term. This needs user to have a administrator access in Synapse. """ - target_user = UserID.from_string(target_user_id) - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) + yield assert_requester_is_admin(self.auth, request) - if not is_admin: - raise AuthError(403, "You are not a server admin") + target_user = UserID.from_string(target_user_id) # To allow all users to get the users list # if not is_admin and target_user != auth_user: @@ -821,10 +773,7 @@ class DeleteGroupAdminRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request, group_id): requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_user_is_admin(self.auth, requester.user) if not self.is_mine_id(group_id): raise SynapseError(400, "Can only delete local groups") @@ -847,11 +796,7 @@ class AccountValidityRenewServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): - requester = yield self.auth.get_user_by_req(request) - is_admin = yield self.auth.is_server_admin(requester.user) - - if not is_admin: - raise AuthError(403, "You are not a server admin") + yield assert_requester_is_admin(self.auth, request) body = parse_json_object_from_request(request) diff --git a/synapse/rest/admin/_base.py b/synapse/rest/admin/_base.py new file mode 100644 index 0000000000..881d67b89c --- /dev/null +++ b/synapse/rest/admin/_base.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from twisted.internet import defer + +from synapse.api.errors import AuthError + + +@defer.inlineCallbacks +def assert_requester_is_admin(auth, request): + """Verify that the requester is an admin user + + WARNING: MAKE SURE YOU YIELD ON THE RESULT! + + Args: + auth (synapse.api.auth.Auth): + request (twisted.web.server.Request): incoming request + + Returns: + Deferred + + Raises: + AuthError if the requester is not an admin + """ + requester = yield auth.get_user_by_req(request) + yield assert_user_is_admin(auth, requester.user) + + +@defer.inlineCallbacks +def assert_user_is_admin(auth, user_id): + """Verify that the given user is an admin user + + WARNING: MAKE SURE YOU YIELD ON THE RESULT! + + Args: + auth (synapse.api.auth.Auth): + user_id (UserID): + + Returns: + Deferred + + Raises: + AuthError if the user is not an admin + """ + + is_admin = yield auth.is_server_admin(user_id) + if not is_admin: + raise AuthError(403, "You are not a server admin") From 84196cb2310ca55100f32cfecbc62810085418e5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 2 May 2019 09:21:29 +0100 Subject: [PATCH 072/429] Add some limitations to alias creation --- changelog.d/5124.bugfix | 1 + docs/sample_config.yaml | 5 +++++ synapse/config/server.py | 11 +++++++++++ synapse/handlers/directory.py | 22 +++++++++++++++++++++- synapse/handlers/room.py | 3 ++- 5 files changed, 40 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5124.bugfix diff --git a/changelog.d/5124.bugfix b/changelog.d/5124.bugfix new file mode 100644 index 0000000000..46df1e9fd5 --- /dev/null +++ b/changelog.d/5124.bugfix @@ -0,0 +1 @@ +Add some missing limitations to room alias creation. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index a7f6bf31ac..ec18516b5c 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -239,6 +239,11 @@ listeners: # Used by phonehome stats to group together related servers. #server_context: context +# Whether to require a user to be in the room to add an alias to it. +# Defaults to 'true'. +# +#require_membership_for_aliases: false + ## TLS ## diff --git a/synapse/config/server.py b/synapse/config/server.py index cdf1e4d286..b1c9d25c71 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -134,6 +134,12 @@ class ServerConfig(Config): # sending out any replication updates. self.replication_torture_level = config.get("replication_torture_level") + # Whether to require a user to be in the room to add an alias to it. + # Defaults to True. + self.require_membership_for_aliases = config.get( + "require_membership_for_aliases", True, + ) + self.listeners = [] for listener in config.get("listeners", []): if not isinstance(listener.get("port", None), int): @@ -490,6 +496,11 @@ class ServerConfig(Config): # Used by phonehome stats to group together related servers. #server_context: context + + # Whether to require a user to be in the room to add an alias to it. + # Defaults to 'true'. + # + #require_membership_for_aliases: false """ % locals() def read_arguments(self, args): diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 27bd06df5d..50c587aa61 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -36,6 +36,7 @@ logger = logging.getLogger(__name__) class DirectoryHandler(BaseHandler): + MAX_ALIAS_LENGTH = 255 def __init__(self, hs): super(DirectoryHandler, self).__init__(hs) @@ -43,8 +44,10 @@ class DirectoryHandler(BaseHandler): self.state = hs.get_state_handler() self.appservice_handler = hs.get_application_service_handler() self.event_creation_handler = hs.get_event_creation_handler() + self.store = hs.get_datastore() self.config = hs.config self.enable_room_list_search = hs.config.enable_room_list_search + self.require_membership = hs.config.require_membership_for_aliases self.federation = hs.get_federation_client() hs.get_federation_registry().register_query_handler( @@ -83,7 +86,7 @@ class DirectoryHandler(BaseHandler): @defer.inlineCallbacks def create_association(self, requester, room_alias, room_id, servers=None, - send_event=True): + send_event=True, check_membership=True): """Attempt to create a new alias Args: @@ -93,6 +96,8 @@ class DirectoryHandler(BaseHandler): servers (list[str]|None): List of servers that others servers should try and join via send_event (bool): Whether to send an updated m.room.aliases event + check_membership (bool): Whether to check if the user is in the room + before the alias can be set (if the server's config requires it). Returns: Deferred @@ -100,6 +105,13 @@ class DirectoryHandler(BaseHandler): user_id = requester.user.to_string() + if len(room_alias.to_string()) > self.MAX_ALIAS_LENGTH: + raise SynapseError( + 400, + "Can't create aliases longer than %s characters" % self.MAX_ALIAS_LENGTH, + Codes.INVALID_PARAM, + ) + service = requester.app_service if service: if not service.is_interested_in_alias(room_alias.to_string()): @@ -108,6 +120,14 @@ class DirectoryHandler(BaseHandler): " this kind of alias.", errcode=Codes.EXCLUSIVE ) else: + if self.require_membership and check_membership: + rooms_for_user = yield self.store.get_rooms_for_user(user_id) + if room_id not in rooms_for_user: + raise AuthError( + 403, + "You must be in the room to create an alias for it", + ) + if not self.spam_checker.user_may_create_room_alias(user_id, room_alias): raise AuthError( 403, "This user is not permitted to create this alias", diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 17628e2684..e37ae96899 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -402,7 +402,7 @@ class RoomCreationHandler(BaseHandler): yield directory_handler.create_association( requester, RoomAlias.from_string(alias), new_room_id, servers=(self.hs.hostname, ), - send_event=False, + send_event=False, check_membership=False, ) logger.info("Moved alias %s to new room", alias) except SynapseError as e: @@ -538,6 +538,7 @@ class RoomCreationHandler(BaseHandler): room_alias=room_alias, servers=[self.hs.hostname], send_event=False, + check_membership=False, ) preset_config = config.get( From 12f9d51e826058998cb11759e068de8977ddd3d5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 2 May 2019 11:59:16 +0100 Subject: [PATCH 073/429] Add admin api for sending server_notices (#5121) --- changelog.d/5121.feature | 1 + docs/admin_api/server_notices.md | 48 +++++++++ docs/server_notices.md | 25 ++--- synapse/rest/__init__.py | 4 +- synapse/rest/admin/__init__.py | 17 ++- synapse/rest/admin/server_notice_servlet.py | 100 ++++++++++++++++++ tests/handlers/test_user_directory.py | 4 +- tests/push/test_email.py | 2 +- tests/push/test_http.py | 2 +- tests/rest/admin/test_admin.py | 8 +- tests/rest/client/test_consent.py | 2 +- tests/rest/client/test_identity.py | 2 +- tests/rest/client/v1/test_events.py | 2 +- tests/rest/client/v1/test_login.py | 2 +- tests/rest/client/v1/test_rooms.py | 2 +- tests/rest/client/v2_alpha/test_auth.py | 2 +- .../rest/client/v2_alpha/test_capabilities.py | 2 +- tests/rest/client/v2_alpha/test_register.py | 4 +- tests/rest/client/v2_alpha/test_sync.py | 2 +- tests/server_notices/test_consent.py | 2 +- tests/storage/test_client_ips.py | 5 +- 21 files changed, 196 insertions(+), 42 deletions(-) create mode 100644 changelog.d/5121.feature create mode 100644 docs/admin_api/server_notices.md create mode 100644 synapse/rest/admin/server_notice_servlet.py diff --git a/changelog.d/5121.feature b/changelog.d/5121.feature new file mode 100644 index 0000000000..54b228680d --- /dev/null +++ b/changelog.d/5121.feature @@ -0,0 +1 @@ +Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. diff --git a/docs/admin_api/server_notices.md b/docs/admin_api/server_notices.md new file mode 100644 index 0000000000..5ddd21cfb2 --- /dev/null +++ b/docs/admin_api/server_notices.md @@ -0,0 +1,48 @@ +# Server Notices + +The API to send notices is as follows: + +``` +POST /_synapse/admin/v1/send_server_notice +``` + +or: + +``` +PUT /_synapse/admin/v1/send_server_notice/{txnId} +``` + +You will need to authenticate with an access token for an admin user. + +When using the `PUT` form, retransmissions with the same transaction ID will be +ignored in the same way as with `PUT +/_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`. + +The request body should look something like the following: + +```json +{ + "user_id": "@target_user:server_name", + "content": { + "msgtype": "m.text", + "body": "This is my message" + } +} +``` + +You can optionally include the following additional parameters: + +* `type`: the type of event. Defaults to `m.room.message`. +* `state_key`: Setting this will result in a state event being sent. + + +Once the notice has been sent, the APU will return the following response: + +```json +{ + "event_id": "" +} +``` + +Note that server notices must be enabled in `homeserver.yaml` before this API +can be used. See [server_notices.md](../server_notices.md) for more information. diff --git a/docs/server_notices.md b/docs/server_notices.md index 58f8776319..950a6608e9 100644 --- a/docs/server_notices.md +++ b/docs/server_notices.md @@ -1,5 +1,4 @@ -Server Notices -============== +# Server Notices 'Server Notices' are a new feature introduced in Synapse 0.30. They provide a channel whereby server administrators can send messages to users on the server. @@ -11,8 +10,7 @@ they may also find a use for features such as "Message of the day". This is a feature specific to Synapse, but it uses standard Matrix communication mechanisms, so should work with any Matrix client. -User experience ---------------- +## User experience When the user is first sent a server notice, they will get an invitation to a room (typically called 'Server Notices', though this is configurable in @@ -29,8 +27,7 @@ levels. Having joined the room, the user can leave the room if they want. Subsequent server notices will then cause a new room to be created. -Synapse configuration ---------------------- +## Synapse configuration Server notices come from a specific user id on the server. Server administrators are free to choose the user id - something like `server` is @@ -58,17 +55,7 @@ room which will be created. `system_mxid_display_name` and `system_mxid_avatar_url` can be used to set the displayname and avatar of the Server Notices user. -Sending notices ---------------- +## Sending notices -As of the current version of synapse, there is no convenient interface for -sending notices (other than the automated ones sent as part of consent -tracking). - -In the meantime, it is possible to test this feature using the manhole. Having -gone into the manhole as described in [manhole.md](manhole.md), a notice can be -sent with something like: - -``` ->>> hs.get_server_notices_manager().send_notice('@user:server.com', {'msgtype':'m.text', 'body':'foo'}) -``` +To send server notices to users you can use the +[admin_api](admin_api/server_notices.md). diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index e8e1bcddea..3a24d31d1b 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -117,4 +117,6 @@ class ClientRestResource(JsonResource): account_validity.register_servlets(hs, client_resource) # moving to /_synapse/admin - synapse.rest.admin.register_servlets(hs, client_resource) + synapse.rest.admin.register_servlets_for_client_rest_resource( + hs, client_resource + ) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index d02f5198b8..0ce89741f0 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -37,6 +37,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.rest.admin._base import assert_requester_is_admin, assert_user_is_admin +from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet from synapse.types import UserID, create_requester from synapse.util.versionstring import get_version_string @@ -813,16 +814,26 @@ class AccountValidityRenewServlet(RestServlet): } defer.returnValue((200, res)) +######################################################################################## +# +# please don't add more servlets here: this file is already long and unwieldy. Put +# them in separate files within the 'admin' package. +# +######################################################################################## + class AdminRestResource(JsonResource): """The REST resource which gets mounted at /_synapse/admin""" def __init__(self, hs): JsonResource.__init__(self, hs, canonical_json=False) - register_servlets(hs, self) + + register_servlets_for_client_rest_resource(hs, self) + SendServerNoticeServlet(hs).register(self) -def register_servlets(hs, http_server): +def register_servlets_for_client_rest_resource(hs, http_server): + """Register only the servlets which need to be exposed on /_matrix/client/xxx""" WhoisRestServlet(hs).register(http_server) PurgeMediaCacheRestServlet(hs).register(http_server) PurgeHistoryStatusRestServlet(hs).register(http_server) @@ -839,3 +850,5 @@ def register_servlets(hs, http_server): VersionServlet(hs).register(http_server) DeleteGroupAdminRestServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server) + # don't add more things here: new servlets should only be exposed on + # /_synapse/admin so should not go here. Instead register them in AdminRestResource. diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py new file mode 100644 index 0000000000..ae5aca9dac --- /dev/null +++ b/synapse/rest/admin/server_notice_servlet.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +from twisted.internet import defer + +from synapse.api.constants import EventTypes +from synapse.api.errors import SynapseError +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) +from synapse.rest.admin import assert_requester_is_admin +from synapse.rest.client.transactions import HttpTransactionCache +from synapse.types import UserID + + +class SendServerNoticeServlet(RestServlet): + """Servlet which will send a server notice to a given user + + POST /_synapse/admin/v1/send_server_notice + { + "user_id": "@target_user:server_name", + "content": { + "msgtype": "m.text", + "body": "This is my message" + } + } + + returns: + + { + "event_id": "$1895723857jgskldgujpious" + } + """ + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + self.hs = hs + self.auth = hs.get_auth() + self.txns = HttpTransactionCache(hs) + self.snm = hs.get_server_notices_manager() + + def register(self, json_resource): + PATTERN = "^/_synapse/admin/v1/send_server_notice" + json_resource.register_paths( + "POST", + (re.compile(PATTERN + "$"), ), + self.on_POST, + ) + json_resource.register_paths( + "PUT", + (re.compile(PATTERN + "/(?P[^/]*)$",), ), + self.on_PUT, + ) + + @defer.inlineCallbacks + def on_POST(self, request, txn_id=None): + yield assert_requester_is_admin(self.auth, request) + body = parse_json_object_from_request(request) + assert_params_in_dict(body, ("user_id", "content")) + event_type = body.get("type", EventTypes.Message) + state_key = body.get("state_key") + + if not self.snm.is_enabled(): + raise SynapseError(400, "Server notices are not enabled on this server") + + user_id = body["user_id"] + UserID.from_string(user_id) + if not self.hs.is_mine_id(user_id): + raise SynapseError(400, "Server notices can only be sent to local users") + + event = yield self.snm.send_notice( + user_id=body["user_id"], + type=event_type, + state_key=state_key, + event_content=body["content"], + ) + + defer.returnValue((200, {"event_id": event.event_id})) + + def on_PUT(self, request, txn_id): + return self.txns.fetch_or_execute_request( + request, self.on_POST, request, txn_id, + ) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 32ef83e9c0..7dd1a1daf8 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -30,7 +30,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): servlets = [ login.register_servlets, - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, ] @@ -328,7 +328,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): user_directory.register_servlets, room.register_servlets, login.register_servlets, - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, ] def make_homeserver(self, reactor, clock): diff --git a/tests/push/test_email.py b/tests/push/test_email.py index e29bd18ad7..325ea449ae 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -34,7 +34,7 @@ class EmailPusherTests(HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, ] diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 3f9f56bb79..13bd2c8688 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -33,7 +33,7 @@ class HTTPPusherTests(HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 42858b5fea..db4cfd8550 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -30,7 +30,7 @@ from tests import unittest class VersionTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, ] @@ -63,7 +63,7 @@ class VersionTestCase(unittest.HomeserverTestCase): class UserRegisterTestCase(unittest.HomeserverTestCase): - servlets = [synapse.rest.admin.register_servlets] + servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] def make_homeserver(self, reactor, clock): @@ -359,7 +359,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): class ShutdownRoomTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, events.register_servlets, room.register_servlets, @@ -496,7 +496,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): class DeleteGroupTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, groups.register_servlets, ] diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index 36e6c1c67d..5528971190 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -32,7 +32,7 @@ except Exception: class ConsentResourceTestCase(unittest.HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index d4fe0aee7d..2e51ffa418 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -24,7 +24,7 @@ from tests import unittest class IdentityTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index 5cb1c1ae9f..8a9a55a527 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -29,7 +29,7 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): servlets = [ events.register_servlets, room.register_servlets, - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, ] diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 8d9ef877f6..9ebd91f678 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -11,7 +11,7 @@ LOGIN_URL = b"/_matrix/client/r0/login" class LoginRestServletTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, ] diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 1a34924f3e..521ac80f9a 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -804,7 +804,7 @@ class RoomMessageListTestCase(RoomBase): class RoomSearchTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 67021185d0..0ca3c4657b 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -27,7 +27,7 @@ class FallbackAuthTests(unittest.HomeserverTestCase): servlets = [ auth.register_servlets, - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, register.register_servlets, ] hijack_auth = False diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index 8134163e20..f3ef977404 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -23,7 +23,7 @@ from tests import unittest class CapabilitiesTestCase(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, capabilities.register_servlets, login.register_servlets, ] diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 4d698af03a..1c3a621d26 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -199,7 +199,7 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): servlets = [ register.register_servlets, - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, sync.register_servlets, account_validity.register_servlets, @@ -308,7 +308,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ register.register_servlets, - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, sync.register_servlets, account_validity.register_servlets, diff --git a/tests/rest/client/v2_alpha/test_sync.py b/tests/rest/client/v2_alpha/test_sync.py index 65fac1d5ce..71895094bd 100644 --- a/tests/rest/client/v2_alpha/test_sync.py +++ b/tests/rest/client/v2_alpha/test_sync.py @@ -73,7 +73,7 @@ class FilterTestCase(unittest.HomeserverTestCase): class SyncTypingTests(unittest.HomeserverTestCase): servlets = [ - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, sync.register_servlets, diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index e8b8ac5725..e0b4e0eb63 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -23,7 +23,7 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): servlets = [ sync.register_servlets, - synapse.rest.admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, room.register_servlets, ] diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index b0f6fd34d8..b62eae7abc 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -206,7 +206,10 @@ class ClientIpStoreTestCase(unittest.HomeserverTestCase): class ClientIpAuthTestCase(unittest.HomeserverTestCase): - servlets = [synapse.rest.admin.register_servlets, login.register_servlets] + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + ] def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver() From 176f31c2e3e048353c4382cf5d1a34a1359f48b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 2 May 2019 15:23:08 +0100 Subject: [PATCH 074/429] Rate limit early --- synapse/handlers/room_member.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ad3df7cc7d..3e86b9c690 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -710,8 +710,9 @@ class RoomMemberHandler(object): Codes.FORBIDDEN, ) - # Check whether we'll be ratelimited - yield self.base_handler.ratelimit(requester, update=False) + # We need to rate limit *before* we send out any 3PID invites, so we + # can't just rely on the standard ratelimiting of events. + yield self.base_handler.ratelimit(requester) invitee = yield self._lookup_3pid( id_server, medium, address From 247dc1bd0bd9ee2b9525495c0dbd819baf10ec1f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 12:38:03 +0100 Subject: [PATCH 075/429] Use SystemRandom for token generation --- changelog.d/5133.bugfix | 1 + synapse/util/stringutils.py | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5133.bugfix diff --git a/changelog.d/5133.bugfix b/changelog.d/5133.bugfix new file mode 100644 index 0000000000..12a32a906b --- /dev/null +++ b/changelog.d/5133.bugfix @@ -0,0 +1 @@ +Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for for identifying and responsibly disclosing this issue! diff --git a/synapse/util/stringutils.py b/synapse/util/stringutils.py index fdcb375f95..69dffd8244 100644 --- a/synapse/util/stringutils.py +++ b/synapse/util/stringutils.py @@ -24,14 +24,19 @@ _string_with_symbols = ( string.digits + string.ascii_letters + ".,;:^&*-_+=#~@" ) +# random_string and random_string_with_symbols are used for a range of things, +# some cryptographically important, some less so. We use SystemRandom to make sure +# we get cryptographically-secure randoms. +rand = random.SystemRandom() + def random_string(length): - return ''.join(random.choice(string.ascii_letters) for _ in range(length)) + return ''.join(rand.choice(string.ascii_letters) for _ in range(length)) def random_string_with_symbols(length): return ''.join( - random.choice(_string_with_symbols) for _ in range(length) + rand.choice(_string_with_symbols) for _ in range(length) ) From 60c3635f0507699ccb63115ff974f54d45c90c83 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 3 May 2019 14:40:15 +0100 Subject: [PATCH 076/429] typo --- changelog.d/5133.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5133.bugfix b/changelog.d/5133.bugfix index 12a32a906b..be6474a692 100644 --- a/changelog.d/5133.bugfix +++ b/changelog.d/5133.bugfix @@ -1 +1 @@ -Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for for identifying and responsibly disclosing this issue! +Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for identifying and responsibly disclosing this issue! From 1a7104fde3abc5392b90ca084efa896d46e24f91 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 13:46:50 +0100 Subject: [PATCH 077/429] Blacklist 0.0.0.0 and :: by default for URL previews --- changelog.d/5134.bugfix | 1 + docs/sample_config.yaml | 14 +++++++++----- synapse/config/repository.py | 28 ++++++++++++++++++---------- 3 files changed, 28 insertions(+), 15 deletions(-) create mode 100644 changelog.d/5134.bugfix diff --git a/changelog.d/5134.bugfix b/changelog.d/5134.bugfix new file mode 100644 index 0000000000..684d48c53a --- /dev/null +++ b/changelog.d/5134.bugfix @@ -0,0 +1 @@ +Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too! diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 4ada0fba0e..0589734b8a 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -506,11 +506,12 @@ uploads_path: "DATADIR/uploads" # height: 600 # method: scale -# Is the preview URL API enabled? If enabled, you *must* specify -# an explicit url_preview_ip_range_blacklist of IPs that the spider is -# denied from accessing. +# Is the preview URL API enabled? # -#url_preview_enabled: false +# 'false' by default: uncomment the following to enable it (and specify a +# url_preview_ip_range_blacklist blacklist). +# +#url_preview_enabled: true # List of IP address CIDR ranges that the URL preview spider is denied # from accessing. There are no defaults: you must explicitly @@ -520,6 +521,9 @@ uploads_path: "DATADIR/uploads" # synapse to issue arbitrary GET requests to your internal services, # causing serious security issues. # +# This must be specified if url_preview_enabled. It is recommended that you +# uncomment the following list as a starting point. +# #url_preview_ip_range_blacklist: # - '127.0.0.0/8' # - '10.0.0.0/8' @@ -530,7 +534,7 @@ uploads_path: "DATADIR/uploads" # - '::1/128' # - 'fe80::/64' # - 'fc00::/7' -# + # List of IP address CIDR ranges that the URL preview spider is allowed # to access even if they are specified in url_preview_ip_range_blacklist. # This is useful for specifying exceptions to wide-ranging blacklisted diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 3f34ad9b2a..d155d69d8a 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -186,17 +186,21 @@ class ContentRepositoryConfig(Config): except ImportError: raise ConfigError(MISSING_NETADDR) - if "url_preview_ip_range_blacklist" in config: - self.url_preview_ip_range_blacklist = IPSet( - config["url_preview_ip_range_blacklist"] - ) - else: + if "url_preview_ip_range_blacklist" not in config: raise ConfigError( "For security, you must specify an explicit target IP address " "blacklist in url_preview_ip_range_blacklist for url previewing " "to work" ) + self.url_preview_ip_range_blacklist = IPSet( + config["url_preview_ip_range_blacklist"] + ) + + # we always blacklist '0.0.0.0' and '::', which are supposed to be + # unroutable addresses. + self.url_preview_ip_range_blacklist.update(['0.0.0.0', '::']) + self.url_preview_ip_range_whitelist = IPSet( config.get("url_preview_ip_range_whitelist", ()) ) @@ -260,11 +264,12 @@ class ContentRepositoryConfig(Config): #thumbnail_sizes: %(formatted_thumbnail_sizes)s - # Is the preview URL API enabled? If enabled, you *must* specify - # an explicit url_preview_ip_range_blacklist of IPs that the spider is - # denied from accessing. + # Is the preview URL API enabled? # - #url_preview_enabled: false + # 'false' by default: uncomment the following to enable it (and specify a + # url_preview_ip_range_blacklist blacklist). + # + #url_preview_enabled: true # List of IP address CIDR ranges that the URL preview spider is denied # from accessing. There are no defaults: you must explicitly @@ -274,6 +279,9 @@ class ContentRepositoryConfig(Config): # synapse to issue arbitrary GET requests to your internal services, # causing serious security issues. # + # This must be specified if url_preview_enabled. It is recommended that you + # uncomment the following list as a starting point. + # #url_preview_ip_range_blacklist: # - '127.0.0.0/8' # - '10.0.0.0/8' @@ -284,7 +292,7 @@ class ContentRepositoryConfig(Config): # - '::1/128' # - 'fe80::/64' # - 'fc00::/7' - # + # List of IP address CIDR ranges that the URL preview spider is allowed # to access even if they are specified in url_preview_ip_range_blacklist. # This is useful for specifying exceptions to wide-ranging blacklisted From 1565ebec2c7aa9f6f2a8b60227b405cae12e7170 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 15:50:59 +0100 Subject: [PATCH 078/429] more config comment updates --- docs/sample_config.yaml | 7 +++++-- synapse/config/repository.py | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 0589734b8a..6ed75ff764 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -521,8 +521,11 @@ uploads_path: "DATADIR/uploads" # synapse to issue arbitrary GET requests to your internal services, # causing serious security issues. # -# This must be specified if url_preview_enabled. It is recommended that you -# uncomment the following list as a starting point. +# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly +# listed here, since they correspond to unroutable addresses.) +# +# This must be specified if url_preview_enabled is set. It is recommended that +# you uncomment the following list as a starting point. # #url_preview_ip_range_blacklist: # - '127.0.0.0/8' diff --git a/synapse/config/repository.py b/synapse/config/repository.py index d155d69d8a..fbfcecc240 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -279,8 +279,11 @@ class ContentRepositoryConfig(Config): # synapse to issue arbitrary GET requests to your internal services, # causing serious security issues. # - # This must be specified if url_preview_enabled. It is recommended that you - # uncomment the following list as a starting point. + # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly + # listed here, since they correspond to unroutable addresses.) + # + # This must be specified if url_preview_enabled is set. It is recommended that + # you uncomment the following list as a starting point. # #url_preview_ip_range_blacklist: # - '127.0.0.0/8' From 863ec0962210fcb946e68caa7431f69583814c73 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 16:03:24 +0100 Subject: [PATCH 079/429] 0.99.3.1 --- CHANGES.md | 9 +++++++++ changelog.d/5133.bugfix | 1 - changelog.d/5134.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 5 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/5133.bugfix delete mode 100644 changelog.d/5134.bugfix diff --git a/CHANGES.md b/CHANGES.md index 490c2021e0..d8eba2ec60 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 0.99.3.1 (2019-05-03) +============================= + +Bugfixes +-------- + +- Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for identifying and responsibly disclosing this issue! ([\#5133](https://github.com/matrix-org/synapse/issues/5133)) +- Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too! ([\#5134](https://github.com/matrix-org/synapse/issues/5134)) + Synapse 0.99.3 (2019-04-01) =========================== diff --git a/changelog.d/5133.bugfix b/changelog.d/5133.bugfix deleted file mode 100644 index be6474a692..0000000000 --- a/changelog.d/5133.bugfix +++ /dev/null @@ -1 +0,0 @@ -Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for identifying and responsibly disclosing this issue! diff --git a/changelog.d/5134.bugfix b/changelog.d/5134.bugfix deleted file mode 100644 index 684d48c53a..0000000000 --- a/changelog.d/5134.bugfix +++ /dev/null @@ -1 +0,0 @@ -Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too! diff --git a/debian/changelog b/debian/changelog index 03df2e1c00..ea712f0dab 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (0.99.3.1) stable; urgency=medium + + * New synapse release 0.99.3.1. + + -- Synapse Packaging team Fri, 03 May 2019 16:02:43 +0100 + matrix-synapse-py3 (0.99.3) stable; urgency=medium [ Richard van der Hoff ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 6bb5a8b24d..8959312cb0 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.3" +__version__ = "0.99.3.1" From f73f18fe7baba7e6f442e308112eeba91e4b8a61 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 16:09:34 +0100 Subject: [PATCH 080/429] changelog tweaks --- CHANGES.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d8eba2ec60..4b84e20823 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,8 +1,10 @@ Synapse 0.99.3.1 (2019-05-03) ============================= -Bugfixes --------- +Security update +--------------- + +This release includes two security fixes: - Switch to using a cryptographically-secure random number generator for token strings, ensuring they cannot be predicted by an attacker. Thanks to @opnsec for identifying and responsibly disclosing this issue! ([\#5133](https://github.com/matrix-org/synapse/issues/5133)) - Blacklist 0.0.0.0 and :: by default for URL previews. Thanks to @opnsec for identifying and responsibly disclosing this issue too! ([\#5134](https://github.com/matrix-org/synapse/issues/5134)) From e3281d7d261ee533818ae1638014295bbb4b98af Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 18:30:13 +0100 Subject: [PATCH 081/429] pin urllib3 to <1.25 --- synapse/python_dependencies.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f71e21ff4d..c75119a030 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -69,6 +69,14 @@ REQUIREMENTS = [ "attrs>=17.4.0", "netaddr>=0.7.18", + + # requests is a transitive dep of treq, and urlib3 is a transitive dep + # of requests, as well as of sentry-sdk. + # + # As of requests 2.21, requests does not yet support urllib3 1.25. + # (If we do not pin it here, pip will give us the latest urllib3 + # due to the dep via sentry-sdk.) + "urllib3<1.25", ] CONDITIONAL_REQUIREMENTS = { From ecc09673156c340e390c9c8a19af80133622fd4f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 18:30:35 +0100 Subject: [PATCH 082/429] Debian: we now need libpq-dev. psycopg 2.8 is now out, which means that the C library gets built from source, so we now need libpq-dev when building. Turns out the need for this package is already documented in docs/postgres.rst. --- docker/Dockerfile-dhvirtualenv | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile-dhvirtualenv b/docker/Dockerfile-dhvirtualenv index 224c92352d..3de032cf8c 100644 --- a/docker/Dockerfile-dhvirtualenv +++ b/docker/Dockerfile-dhvirtualenv @@ -55,7 +55,8 @@ RUN apt-get update -qq -o Acquire::Languages=none \ python3-pip \ python3-setuptools \ python3-venv \ - sqlite3 + sqlite3 \ + libpq-dev COPY --from=builder /dh-virtualenv_1.1-1_all.deb / From 5485852b43a9eb5e8dbe892ee66790237c7e4db9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 18:36:55 +0100 Subject: [PATCH 083/429] changelog --- changelog.d/5135.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5135.misc diff --git a/changelog.d/5135.misc b/changelog.d/5135.misc new file mode 100644 index 0000000000..eab06dd91d --- /dev/null +++ b/changelog.d/5135.misc @@ -0,0 +1 @@ +Ensure that we have `urllib3` <1.25, to resolve incompatibility with `requests`. \ No newline at end of file From 0b5cf9560775297f31fb4afa384fc37be74d7490 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 18:44:14 +0100 Subject: [PATCH 084/429] include disco in deb build target list --- scripts-dev/build_debian_packages | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index 6b9be99060..93305ee9b1 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -24,6 +24,7 @@ DISTS = ( "ubuntu:xenial", "ubuntu:bionic", "ubuntu:cosmic", + "ubuntu:disco", ) DESC = '''\ From fa21455e08d4115cd14f6c4ca34a1b432d924b4f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 3 May 2019 18:56:24 +0100 Subject: [PATCH 085/429] 0.99.3.2 --- CHANGES.md | 9 +++++++++ changelog.d/5135.misc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/5135.misc diff --git a/CHANGES.md b/CHANGES.md index 4b84e20823..d8cfbbebef 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 0.99.3.2 (2019-05-03) +============================= + +Internal Changes +---------------- + +- Ensure that we have `urllib3` <1.25, to resolve incompatibility with `requests`. ([\#5135](https://github.com/matrix-org/synapse/issues/5135)) + + Synapse 0.99.3.1 (2019-05-03) ============================= diff --git a/changelog.d/5135.misc b/changelog.d/5135.misc deleted file mode 100644 index eab06dd91d..0000000000 --- a/changelog.d/5135.misc +++ /dev/null @@ -1 +0,0 @@ -Ensure that we have `urllib3` <1.25, to resolve incompatibility with `requests`. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index ea712f0dab..c25425bf26 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (0.99.3.2) stable; urgency=medium + + * New synapse release 0.99.3.2. + + -- Synapse Packaging team Fri, 03 May 2019 18:56:20 +0100 + matrix-synapse-py3 (0.99.3.1) stable; urgency=medium * New synapse release 0.99.3.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 8959312cb0..315fa96551 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.3.1" +__version__ = "0.99.3.2" From 4804206dbe292ca4887fe972fe15e972d53a4c73 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 6 May 2019 22:13:35 +0100 Subject: [PATCH 086/429] Fix sample config ... after it got broken in 1565ebec2c. --- docs/sample_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 84337a7c72..b6b9da6e41 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -566,7 +566,7 @@ uploads_path: "DATADIR/uploads" # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly # listed here, since they correspond to unroutable addresses.) # -# This must be specified if url_preview_enabled is set. It is recommended that +# This must be specified if url_preview_enabled is set. It is recommended that # you uncomment the following list as a starting point. # #url_preview_ip_range_blacklist: From 3fdff1420790a29331608ca33ce2ace67dbaa4cc Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 6 May 2019 15:15:02 -0600 Subject: [PATCH 087/429] Fix spelling in server notices admin API docs (#5142) --- changelog.d/5142.feature | 1 + docs/admin_api/server_notices.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5142.feature diff --git a/changelog.d/5142.feature b/changelog.d/5142.feature new file mode 100644 index 0000000000..54b228680d --- /dev/null +++ b/changelog.d/5142.feature @@ -0,0 +1 @@ +Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. diff --git a/docs/admin_api/server_notices.md b/docs/admin_api/server_notices.md index 5ddd21cfb2..858b052b84 100644 --- a/docs/admin_api/server_notices.md +++ b/docs/admin_api/server_notices.md @@ -36,7 +36,7 @@ You can optionally include the following additional parameters: * `state_key`: Setting this will result in a state event being sent. -Once the notice has been sent, the APU will return the following response: +Once the notice has been sent, the API will return the following response: ```json { From 59e2d2694deec13aaa0062e04b5460f978967dc1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 7 May 2019 09:29:30 +0100 Subject: [PATCH 088/429] Remove the requirement to authenticate for /admin/server_version. (#5122) This endpoint isn't much use for its intended purpose if you first need to get yourself an admin's auth token. I've restricted it to the `/_synapse/admin` path to make it a bit easier to lock down for those concerned about exposing this information. I don't imagine anyone is using it in anger currently. --- changelog.d/5122.misc | 1 + docs/admin_api/version_api.rst | 2 -- synapse/rest/admin/__init__.py | 15 +++++---------- tests/rest/admin/test_admin.py | 30 ++++++++---------------------- tests/unittest.py | 22 ++++++++++++++++++---- 5 files changed, 32 insertions(+), 38 deletions(-) create mode 100644 changelog.d/5122.misc diff --git a/changelog.d/5122.misc b/changelog.d/5122.misc new file mode 100644 index 0000000000..e1be8a6210 --- /dev/null +++ b/changelog.d/5122.misc @@ -0,0 +1 @@ +Remove the requirement to authenticate for /admin/server_version. diff --git a/docs/admin_api/version_api.rst b/docs/admin_api/version_api.rst index 6d66543b96..833d9028be 100644 --- a/docs/admin_api/version_api.rst +++ b/docs/admin_api/version_api.rst @@ -10,8 +10,6 @@ The api is:: GET /_synapse/admin/v1/server_version -including an ``access_token`` of a server admin. - It returns a JSON body like the following: .. code:: json diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 0ce89741f0..744d85594f 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -88,21 +88,16 @@ class UsersRestServlet(RestServlet): class VersionServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/server_version") + PATTERNS = (re.compile("^/_synapse/admin/v1/server_version$"), ) def __init__(self, hs): - self.auth = hs.get_auth() - - @defer.inlineCallbacks - def on_GET(self, request): - yield assert_requester_is_admin(self.auth, request) - - ret = { + self.res = { 'server_version': get_version_string(synapse), 'python_version': platform.python_version(), } - defer.returnValue((200, ret)) + def on_GET(self, request): + return 200, self.res class UserRegisterServlet(RestServlet): @@ -830,6 +825,7 @@ class AdminRestResource(JsonResource): register_servlets_for_client_rest_resource(hs, self) SendServerNoticeServlet(hs).register(self) + VersionServlet(hs).register(self) def register_servlets_for_client_rest_resource(hs, http_server): @@ -847,7 +843,6 @@ def register_servlets_for_client_rest_resource(hs, http_server): QuarantineMediaInRoom(hs).register(http_server) ListMediaInRoom(hs).register(http_server) UserRegisterServlet(hs).register(http_server) - VersionServlet(hs).register(http_server) DeleteGroupAdminRestServlet(hs).register(http_server) AccountValidityRenewServlet(hs).register(http_server) # don't add more things here: new servlets should only be exposed on diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index db4cfd8550..da19a83918 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -21,6 +21,8 @@ from mock import Mock import synapse.rest.admin from synapse.api.constants import UserTypes +from synapse.http.server import JsonResource +from synapse.rest.admin import VersionServlet from synapse.rest.client.v1 import events, login, room from synapse.rest.client.v2_alpha import groups @@ -28,20 +30,15 @@ from tests import unittest class VersionTestCase(unittest.HomeserverTestCase): + url = '/_synapse/admin/v1/server_version' - servlets = [ - synapse.rest.admin.register_servlets_for_client_rest_resource, - login.register_servlets, - ] - - url = '/_matrix/client/r0/admin/server_version' + def create_test_json_resource(self): + resource = JsonResource(self.hs) + VersionServlet(self.hs).register(resource) + return resource def test_version_string(self): - self.register_user("admin", "pass", admin=True) - self.admin_token = self.login("admin", "pass") - - request, channel = self.make_request("GET", self.url, - access_token=self.admin_token) + request, channel = self.make_request("GET", self.url, shorthand=False) self.render(request) self.assertEqual(200, int(channel.result["code"]), @@ -49,17 +46,6 @@ class VersionTestCase(unittest.HomeserverTestCase): self.assertEqual({'server_version', 'python_version'}, set(channel.json_body.keys())) - def test_inaccessible_to_non_admins(self): - self.register_user("unprivileged-user", "pass", admin=False) - user_token = self.login("unprivileged-user", "pass") - - request, channel = self.make_request("GET", self.url, - access_token=user_token) - self.render(request) - - self.assertEqual(403, int(channel.result['code']), - msg=channel.result['body']) - class UserRegisterTestCase(unittest.HomeserverTestCase): diff --git a/tests/unittest.py b/tests/unittest.py index 8c65736a51..029a88d770 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -181,10 +181,7 @@ class HomeserverTestCase(TestCase): raise Exception("A homeserver wasn't returned, but %r" % (self.hs,)) # Register the resources - self.resource = JsonResource(self.hs) - - for servlet in self.servlets: - servlet(self.hs, self.resource) + self.resource = self.create_test_json_resource() from tests.rest.client.v1.utils import RestHelper @@ -230,6 +227,23 @@ class HomeserverTestCase(TestCase): hs = self.setup_test_homeserver() return hs + def create_test_json_resource(self): + """ + Create a test JsonResource, with the relevant servlets registerd to it + + The default implementation calls each function in `servlets` to do the + registration. + + Returns: + JsonResource: + """ + resource = JsonResource(self.hs) + + for servlet in self.servlets: + servlet(self.hs, resource) + + return resource + def default_config(self, name="test"): """ Get a default HomeServer config object. From 1473058b5eb14b5128c0b6ee6e88e89602ad96c5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 8 May 2019 17:01:30 +0100 Subject: [PATCH 089/429] Do checks on aliases for incoming m.room.aliases events (#5128) Follow-up to #5124 Also added a bunch of checks to make sure everything (both the stuff added on #5124 and this PR) works as intended. --- changelog.d/5128.bugfix | 1 + synapse/api/constants.py | 3 + synapse/events/snapshot.py | 8 +- synapse/events/validator.py | 15 ++- synapse/handlers/directory.py | 7 +- synapse/handlers/message.py | 30 +++++ tests/rest/client/v1/test_directory.py | 169 +++++++++++++++++++++++++ 7 files changed, 225 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5128.bugfix create mode 100644 tests/rest/client/v1/test_directory.py diff --git a/changelog.d/5128.bugfix b/changelog.d/5128.bugfix new file mode 100644 index 0000000000..46df1e9fd5 --- /dev/null +++ b/changelog.d/5128.bugfix @@ -0,0 +1 @@ +Add some missing limitations to room alias creation. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 0860b75905..8547a63535 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -20,6 +20,9 @@ # the "depth" field on events is limited to 2**63 - 1 MAX_DEPTH = 2**63 - 1 +# the maximum length for a room alias is 255 characters +MAX_ALIAS_LENGTH = 255 + class Membership(object): diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 368b5f6ae4..fa09c132a0 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -187,7 +187,9 @@ class EventContext(object): Returns: Deferred[dict[(str, str), str]|None]: Returns None if state_group - is None, which happens when the associated event is an outlier. + is None, which happens when the associated event is an outlier. + Maps a (type, state_key) to the event ID of the state event matching + this tuple. """ if not self._fetching_state_deferred: @@ -205,7 +207,9 @@ class EventContext(object): Returns: Deferred[dict[(str, str), str]|None]: Returns None if state_group - is None, which happens when the associated event is an outlier. + is None, which happens when the associated event is an outlier. + Maps a (type, state_key) to the event ID of the state event matching + this tuple. """ if not self._fetching_state_deferred: diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 514273c792..711af512b2 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -15,8 +15,8 @@ from six import string_types -from synapse.api.constants import EventTypes, Membership -from synapse.api.errors import SynapseError +from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership +from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import EventFormatVersions from synapse.types import EventID, RoomID, UserID @@ -56,6 +56,17 @@ class EventValidator(object): if not isinstance(getattr(event, s), string_types): raise SynapseError(400, "'%s' not a string type" % (s,)) + if event.type == EventTypes.Aliases: + if "aliases" in event.content: + for alias in event.content["aliases"]: + if len(alias) > MAX_ALIAS_LENGTH: + raise SynapseError( + 400, + ("Can't create aliases longer than" + " %d characters" % (MAX_ALIAS_LENGTH,)), + Codes.INVALID_PARAM, + ) + def validate_builder(self, event): """Validates that the builder/event has roughly the right format. Only checks values that we expect a proto event to have, rather than all the diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 50c587aa61..a12f9508d8 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -19,7 +19,7 @@ import string from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes from synapse.api.errors import ( AuthError, CodeMessageException, @@ -36,7 +36,6 @@ logger = logging.getLogger(__name__) class DirectoryHandler(BaseHandler): - MAX_ALIAS_LENGTH = 255 def __init__(self, hs): super(DirectoryHandler, self).__init__(hs) @@ -105,10 +104,10 @@ class DirectoryHandler(BaseHandler): user_id = requester.user.to_string() - if len(room_alias.to_string()) > self.MAX_ALIAS_LENGTH: + if len(room_alias.to_string()) > MAX_ALIAS_LENGTH: raise SynapseError( 400, - "Can't create aliases longer than %s characters" % self.MAX_ALIAS_LENGTH, + "Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH, Codes.INVALID_PARAM, ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 224d34ef3a..e5afeadf68 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -228,6 +228,7 @@ class EventCreationHandler(object): self.ratelimiter = hs.get_ratelimiter() self.notifier = hs.get_notifier() self.config = hs.config + self.require_membership_for_aliases = hs.config.require_membership_for_aliases self.send_event_to_master = ReplicationSendEventRestServlet.make_client(hs) @@ -336,6 +337,35 @@ class EventCreationHandler(object): prev_events_and_hashes=prev_events_and_hashes, ) + # In an ideal world we wouldn't need the second part of this condition. However, + # this behaviour isn't spec'd yet, meaning we should be able to deactivate this + # behaviour. Another reason is that this code is also evaluated each time a new + # m.room.aliases event is created, which includes hitting a /directory route. + # Therefore not including this condition here would render the similar one in + # synapse.handlers.directory pointless. + if builder.type == EventTypes.Aliases and self.require_membership_for_aliases: + # Ideally we'd do the membership check in event_auth.check(), which + # describes a spec'd algorithm for authenticating events received over + # federation as well as those created locally. As of room v3, aliases events + # can be created by users that are not in the room, therefore we have to + # tolerate them in event_auth.check(). + prev_state_ids = yield context.get_prev_state_ids(self.store) + prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender)) + prev_event = yield self.store.get_event(prev_event_id, allow_none=True) + if not prev_event or prev_event.membership != Membership.JOIN: + logger.warning( + ("Attempt to send `m.room.aliases` in room %s by user %s but" + " membership is %s"), + event.room_id, + event.sender, + prev_event.membership if prev_event else None, + ) + + raise AuthError( + 403, + "You must be in the room to create an alias for it", + ) + self.validator.validate_new(event) defer.returnValue((event, context)) diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py new file mode 100644 index 0000000000..4804000ec7 --- /dev/null +++ b/tests/rest/client/v1/test_directory.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +from synapse.rest.admin import register_servlets +from synapse.rest.client.v1 import directory, login, room +from synapse.types import RoomAlias +from synapse.util.stringutils import random_string + +from tests import unittest + + +class DirectoryTestCase(unittest.HomeserverTestCase): + + servlets = [ + register_servlets, + directory.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config.require_membership_for_aliases = True + + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor, clock, homeserver): + self.room_owner = self.register_user("room_owner", "test") + self.room_owner_tok = self.login("room_owner", "test") + + self.room_id = self.helper.create_room_as( + self.room_owner, tok=self.room_owner_tok, + ) + + self.user = self.register_user("user", "test") + self.user_tok = self.login("user", "test") + + def test_state_event_not_in_room(self): + self.ensure_user_left_room() + self.set_alias_via_state_event(403) + + def test_directory_endpoint_not_in_room(self): + self.ensure_user_left_room() + self.set_alias_via_directory(403) + + def test_state_event_in_room_too_long(self): + self.ensure_user_joined_room() + self.set_alias_via_state_event(400, alias_length=256) + + def test_directory_in_room_too_long(self): + self.ensure_user_joined_room() + self.set_alias_via_directory(400, alias_length=256) + + def test_state_event_in_room(self): + self.ensure_user_joined_room() + self.set_alias_via_state_event(200) + + def test_directory_in_room(self): + self.ensure_user_joined_room() + self.set_alias_via_directory(200) + + def test_room_creation_too_long(self): + url = "/_matrix/client/r0/createRoom" + + # We use deliberately a localpart under the length threshold so + # that we can make sure that the check is done on the whole alias. + data = { + "room_alias_name": random_string(256 - len(self.hs.hostname)), + } + request_data = json.dumps(data) + request, channel = self.make_request( + "POST", url, request_data, access_token=self.user_tok, + ) + self.render(request) + self.assertEqual(channel.code, 400, channel.result) + + def test_room_creation(self): + url = "/_matrix/client/r0/createRoom" + + # Check with an alias of allowed length. There should already be + # a test that ensures it works in test_register.py, but let's be + # as cautious as possible here. + data = { + "room_alias_name": random_string(5), + } + request_data = json.dumps(data) + request, channel = self.make_request( + "POST", url, request_data, access_token=self.user_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + def set_alias_via_state_event(self, expected_code, alias_length=5): + url = ("/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" + % (self.room_id, self.hs.hostname)) + + data = { + "aliases": [ + self.random_alias(alias_length), + ], + } + request_data = json.dumps(data) + + request, channel = self.make_request( + "PUT", url, request_data, access_token=self.user_tok, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + + def set_alias_via_directory(self, expected_code, alias_length=5): + url = "/_matrix/client/r0/directory/room/%s" % self.random_alias(alias_length) + data = { + "room_id": self.room_id, + } + request_data = json.dumps(data) + + request, channel = self.make_request( + "PUT", url, request_data, access_token=self.user_tok, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + + def random_alias(self, length): + return RoomAlias( + random_string(length), + self.hs.hostname, + ).to_string() + + def ensure_user_left_room(self): + self.ensure_membership("leave") + + def ensure_user_joined_room(self): + self.ensure_membership("join") + + def ensure_membership(self, membership): + try: + if membership == "leave": + self.helper.leave( + room=self.room_id, + user=self.user, + tok=self.user_tok, + ) + if membership == "join": + self.helper.join( + room=self.room_id, + user=self.user, + tok=self.user_tok, + ) + except AssertionError: + # We don't care whether the leave request didn't return a 200 (e.g. + # if the user isn't already in the room), because we only want to + # make sure the user isn't in the room. + pass From c0e0740bef0db661abce352afaf6c958e276f11d Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 8 May 2019 18:26:56 +0100 Subject: [PATCH 090/429] add options to require an access_token to GET /profile and /publicRooms on CS API (#5083) This commit adds two config options: * `restrict_public_rooms_to_local_users` Requires auth to fetch the public rooms directory through the CS API and disables fetching it through the federation API. * `require_auth_for_profile_requests` When set to `true`, requires that requests to `/profile` over the CS API are authenticated, and only returns the user's profile if the requester shares a room with the profile's owner, as per MSC1301. MSC1301 also specifies a behaviour for federation (only returning the profile if the server asking for it shares a room with the profile's owner), but that's currently really non-trivial to do in a not too expensive way. Next step is writing down a MSC that allows a HS to specify which user sent the profile query. In this implementation, Synapse won't send a profile query over federation if it doesn't believe it already shares a room with the profile's owner, though. Groups have been intentionally omitted from this commit. --- changelog.d/5083.feature | 1 + docs/sample_config.yaml | 14 ++++ synapse/config/server.py | 27 ++++++++ synapse/federation/transport/server.py | 10 +++ synapse/handlers/profile.py | 43 ++++++++++++ synapse/rest/client/v1/profile.py | 40 +++++++---- synapse/rest/client/v1/room.py | 6 ++ tests/rest/client/v1/test_profile.py | 92 +++++++++++++++++++++++++- tests/rest/client/v1/test_rooms.py | 32 +++++++++ 9 files changed, 252 insertions(+), 13 deletions(-) create mode 100644 changelog.d/5083.feature diff --git a/changelog.d/5083.feature b/changelog.d/5083.feature new file mode 100644 index 0000000000..55d114b3fe --- /dev/null +++ b/changelog.d/5083.feature @@ -0,0 +1 @@ +Add an configuration option to require authentication on /publicRooms and /profile endpoints. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index b6b9da6e41..bdfc34c6bd 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -69,6 +69,20 @@ pid_file: DATADIR/homeserver.pid # #use_presence: false +# Whether to require authentication to retrieve profile data (avatars, +# display names) of other users through the client API. Defaults to +# 'false'. Note that profile data is also available via the federation +# API, so this setting is of limited value if federation is enabled on +# the server. +# +#require_auth_for_profile_requests: true + +# If set to 'true', requires authentication to access the server's +# public rooms directory through the client API, and forbids any other +# homeserver to fetch it via federation. Defaults to 'false'. +# +#restrict_public_rooms_to_local_users: true + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/config/server.py b/synapse/config/server.py index 147a976485..8dce75c56a 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -72,6 +72,19 @@ class ServerConfig(Config): # master, potentially causing inconsistency. self.enable_media_repo = config.get("enable_media_repo", True) + # Whether to require authentication to retrieve profile data (avatars, + # display names) of other users through the client API. + self.require_auth_for_profile_requests = config.get( + "require_auth_for_profile_requests", False, + ) + + # If set to 'True', requires authentication to access the server's + # public rooms directory through the client API, and forbids any other + # homeserver to fetch it via federation. + self.restrict_public_rooms_to_local_users = config.get( + "restrict_public_rooms_to_local_users", False, + ) + # whether to enable search. If disabled, new entries will not be inserted # into the search tables and they will not be indexed. Users will receive # errors when attempting to search for messages. @@ -327,6 +340,20 @@ class ServerConfig(Config): # #use_presence: false + # Whether to require authentication to retrieve profile data (avatars, + # display names) of other users through the client API. Defaults to + # 'false'. Note that profile data is also available via the federation + # API, so this setting is of limited value if federation is enabled on + # the server. + # + #require_auth_for_profile_requests: true + + # If set to 'true', requires authentication to access the server's + # public rooms directory through the client API, and forbids any other + # homeserver to fetch it via federation. Defaults to 'false'. + # + #restrict_public_rooms_to_local_users: true + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 452599e1a1..9030eb18c5 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -716,8 +716,17 @@ class PublicRoomList(BaseFederationServlet): PATH = "/publicRooms" + def __init__(self, handler, authenticator, ratelimiter, server_name, deny_access): + super(PublicRoomList, self).__init__( + handler, authenticator, ratelimiter, server_name, + ) + self.deny_access = deny_access + @defer.inlineCallbacks def on_GET(self, origin, content, query): + if self.deny_access: + raise FederationDeniedError(origin) + limit = parse_integer_from_args(query, "limit", 0) since_token = parse_string_from_args(query, "since", None) include_all_networks = parse_boolean_from_args( @@ -1417,6 +1426,7 @@ def register_servlets(hs, resource, authenticator, ratelimiter, servlet_groups=N authenticator=authenticator, ratelimiter=ratelimiter, server_name=hs.hostname, + deny_access=hs.config.restrict_public_rooms_to_local_users, ).register(resource) if "group_server" in servlet_groups: diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a65c98ff5c..91fc718ff8 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -53,6 +53,7 @@ class BaseProfileHandler(BaseHandler): @defer.inlineCallbacks def get_profile(self, user_id): target_user = UserID.from_string(user_id) + if self.hs.is_mine(target_user): try: displayname = yield self.store.get_profile_displayname( @@ -283,6 +284,48 @@ class BaseProfileHandler(BaseHandler): room_id, str(e) ) + @defer.inlineCallbacks + def check_profile_query_allowed(self, target_user, requester=None): + """Checks whether a profile query is allowed. If the + 'require_auth_for_profile_requests' config flag is set to True and a + 'requester' is provided, the query is only allowed if the two users + share a room. + + Args: + target_user (UserID): The owner of the queried profile. + requester (None|UserID): The user querying for the profile. + + Raises: + SynapseError(403): The two users share no room, or ne user couldn't + be found to be in any room the server is in, and therefore the query + is denied. + """ + # Implementation of MSC1301: don't allow looking up profiles if the + # requester isn't in the same room as the target. We expect requester to + # be None when this function is called outside of a profile query, e.g. + # when building a membership event. In this case, we must allow the + # lookup. + if not self.hs.config.require_auth_for_profile_requests or not requester: + return + + try: + requester_rooms = yield self.store.get_rooms_for_user( + requester.to_string() + ) + target_user_rooms = yield self.store.get_rooms_for_user( + target_user.to_string(), + ) + + # Check if the room lists have no elements in common. + if requester_rooms.isdisjoint(target_user_rooms): + raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) + except StoreError as e: + if e.code == 404: + # This likely means that one of the users doesn't exist, + # so we act as if we couldn't find the profile. + raise SynapseError(403, "Profile isn't available", Codes.FORBIDDEN) + raise + class MasterProfileHandler(BaseProfileHandler): PROFILE_UPDATE_MS = 60 * 1000 diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index a23edd8fe5..eac1966c5e 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -31,11 +31,17 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): + requester_user = None + + if self.hs.config.require_auth_for_profile_requests: + requester = yield self.auth.get_user_by_req(request) + requester_user = requester.user + user = UserID.from_string(user_id) - displayname = yield self.profile_handler.get_displayname( - user, - ) + yield self.profile_handler.check_profile_query_allowed(user, requester_user) + + displayname = yield self.profile_handler.get_displayname(user) ret = {} if displayname is not None: @@ -74,11 +80,17 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): + requester_user = None + + if self.hs.config.require_auth_for_profile_requests: + requester = yield self.auth.get_user_by_req(request) + requester_user = requester.user + user = UserID.from_string(user_id) - avatar_url = yield self.profile_handler.get_avatar_url( - user, - ) + yield self.profile_handler.check_profile_query_allowed(user, requester_user) + + avatar_url = yield self.profile_handler.get_avatar_url(user) ret = {} if avatar_url is not None: @@ -116,14 +128,18 @@ class ProfileRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_GET(self, request, user_id): + requester_user = None + + if self.hs.config.require_auth_for_profile_requests: + requester = yield self.auth.get_user_by_req(request) + requester_user = requester.user + user = UserID.from_string(user_id) - displayname = yield self.profile_handler.get_displayname( - user, - ) - avatar_url = yield self.profile_handler.get_avatar_url( - user, - ) + yield self.profile_handler.check_profile_query_allowed(user, requester_user) + + displayname = yield self.profile_handler.get_displayname(user) + avatar_url = yield self.profile_handler.get_avatar_url(user) ret = {} if displayname is not None: diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 48da4d557f..fab04965cb 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -301,6 +301,12 @@ class PublicRoomListRestServlet(ClientV1RestServlet): try: yield self.auth.get_user_by_req(request, allow_guest=True) except AuthError as e: + # Option to allow servers to require auth when accessing + # /publicRooms via CS API. This is especially helpful in private + # federations. + if self.hs.config.restrict_public_rooms_to_local_users: + raise + # We allow people to not be authed if they're just looking at our # room list, but require auth when we proxy the request. # In both cases we call the auth function, as that has the side diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 1eab9c3bdb..5d13de11e6 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -20,7 +20,7 @@ from twisted.internet import defer import synapse.types from synapse.api.errors import AuthError, SynapseError -from synapse.rest.client.v1 import profile +from synapse.rest.client.v1 import admin, login, profile, room from tests import unittest @@ -42,6 +42,7 @@ class ProfileTestCase(unittest.TestCase): "set_displayname", "get_avatar_url", "set_avatar_url", + "check_profile_query_allowed", ] ) @@ -155,3 +156,92 @@ class ProfileTestCase(unittest.TestCase): self.assertEquals(mocked_set.call_args[0][0].localpart, "1234ABCD") self.assertEquals(mocked_set.call_args[0][1].user.localpart, "1234ABCD") self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif") + + +class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + login.register_servlets, + profile.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + config = self.default_config() + config.require_auth_for_profile_requests = True + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor, clock, hs): + # User owning the requested profile. + self.owner = self.register_user("owner", "pass") + self.owner_tok = self.login("owner", "pass") + self.profile_url = "/profile/%s" % (self.owner) + + # User requesting the profile. + self.requester = self.register_user("requester", "pass") + self.requester_tok = self.login("requester", "pass") + + self.room_id = self.helper.create_room_as(self.owner, tok=self.owner_tok) + + def test_no_auth(self): + self.try_fetch_profile(401) + + def test_not_in_shared_room(self): + self.ensure_requester_left_room() + + self.try_fetch_profile(403, access_token=self.requester_tok) + + def test_in_shared_room(self): + self.ensure_requester_left_room() + + self.helper.join( + room=self.room_id, + user=self.requester, + tok=self.requester_tok, + ) + + self.try_fetch_profile(200, self.requester_tok) + + def try_fetch_profile(self, expected_code, access_token=None): + self.request_profile( + expected_code, + access_token=access_token + ) + + self.request_profile( + expected_code, + url_suffix="/displayname", + access_token=access_token, + ) + + self.request_profile( + expected_code, + url_suffix="/avatar_url", + access_token=access_token, + ) + + def request_profile(self, expected_code, url_suffix="", access_token=None): + request, channel = self.make_request( + "GET", + self.profile_url + url_suffix, + access_token=access_token, + ) + self.render(request) + self.assertEqual(channel.code, expected_code, channel.result) + + def ensure_requester_left_room(self): + try: + self.helper.leave( + room=self.room_id, + user=self.requester, + tok=self.requester_tok, + ) + except AssertionError: + # We don't care whether the leave request didn't return a 200 (e.g. + # if the user isn't already in the room), because we only want to + # make sure the user isn't in the room. + pass diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 521ac80f9a..28fbf6ae52 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -904,3 +904,35 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): self.assertEqual( context["profile_info"][self.other_user_id]["displayname"], "otheruser" ) + + +class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + + self.url = b"/_matrix/client/r0/publicRooms" + + config = self.default_config() + config.restrict_public_rooms_to_local_users = True + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def test_restricted_no_auth(self): + request, channel = self.make_request("GET", self.url) + self.render(request) + self.assertEqual(channel.code, 401, channel.result) + + def test_restricted_auth(self): + self.register_user("user", "pass") + tok = self.login("user", "pass") + + request, channel = self.make_request("GET", self.url, access_token=tok) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) From d216a36b3703e42906e2242526784598642c8505 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 8 May 2019 21:57:03 +0100 Subject: [PATCH 091/429] Fix bogus imports in tests (#5154) --- changelog.d/5154.bugfix | 1 + tests/rest/client/v1/test_directory.py | 4 ++-- tests/rest/client/v1/test_profile.py | 5 +++-- tests/rest/client/v1/test_rooms.py | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5154.bugfix diff --git a/changelog.d/5154.bugfix b/changelog.d/5154.bugfix new file mode 100644 index 0000000000..c7bd5ee6cf --- /dev/null +++ b/changelog.d/5154.bugfix @@ -0,0 +1 @@ +Fix bogus imports in unit tests. diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index 4804000ec7..f63c68e7ed 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -15,7 +15,7 @@ import json -from synapse.rest.admin import register_servlets +from synapse.rest import admin from synapse.rest.client.v1 import directory, login, room from synapse.types import RoomAlias from synapse.util.stringutils import random_string @@ -26,7 +26,7 @@ from tests import unittest class DirectoryTestCase(unittest.HomeserverTestCase): servlets = [ - register_servlets, + admin.register_servlets_for_client_rest_resource, directory.register_servlets, login.register_servlets, room.register_servlets, diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 5d13de11e6..7306e61b7c 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -20,7 +20,8 @@ from twisted.internet import defer import synapse.types from synapse.api.errors import AuthError, SynapseError -from synapse.rest.client.v1 import admin, login, profile, room +from synapse.rest import admin +from synapse.rest.client.v1 import login, profile, room from tests import unittest @@ -161,7 +162,7 @@ class ProfileTestCase(unittest.TestCase): class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + admin.register_servlets_for_client_rest_resource, login.register_servlets, profile.register_servlets, room.register_servlets, diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 28fbf6ae52..9b191436cc 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -909,7 +909,7 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): servlets = [ - admin.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, login.register_servlets, ] From 11ea16777f52264f0a1d6f4db5b7db5c6c147523 Mon Sep 17 00:00:00 2001 From: Quentin Dufour Date: Thu, 9 May 2019 12:01:41 +0200 Subject: [PATCH 092/429] Limit the number of EDUs in transactions to 100 as expected by receiver (#5138) Fixes #3951. --- changelog.d/5138.bugfix | 1 + .../sender/per_destination_queue.py | 66 ++++++++++--------- synapse/storage/deviceinbox.py | 2 +- 3 files changed, 37 insertions(+), 32 deletions(-) create mode 100644 changelog.d/5138.bugfix diff --git a/changelog.d/5138.bugfix b/changelog.d/5138.bugfix new file mode 100644 index 0000000000..c1945a8eb2 --- /dev/null +++ b/changelog.d/5138.bugfix @@ -0,0 +1 @@ +Limit the number of EDUs in transactions to 100 as expected by synapse. Thanks to @superboum for this work! diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index be99211003..4d96f026c6 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -33,6 +33,9 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage import UserPresenceState from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter +# This is defined in the Matrix spec and enforced by the receiver. +MAX_EDUS_PER_TRANSACTION = 100 + logger = logging.getLogger(__name__) @@ -197,7 +200,8 @@ class PerDestinationQueue(object): pending_pdus = [] while True: device_message_edus, device_stream_id, dev_list_id = ( - yield self._get_new_device_messages() + # We have to keep 2 free slots for presence and rr_edus + yield self._get_new_device_messages(MAX_EDUS_PER_TRANSACTION - 2) ) # BEGIN CRITICAL SECTION @@ -216,19 +220,9 @@ class PerDestinationQueue(object): pending_edus = [] - pending_edus.extend(self._get_rr_edus(force_flush=False)) - # We can only include at most 100 EDUs per transactions - pending_edus.extend(self._pop_pending_edus(100 - len(pending_edus))) - - pending_edus.extend( - self._pending_edus_keyed.values() - ) - - self._pending_edus_keyed = {} - - pending_edus.extend(device_message_edus) - + # rr_edus and pending_presence take at most one slot each + pending_edus.extend(self._get_rr_edus(force_flush=False)) pending_presence = self._pending_presence self._pending_presence = {} if pending_presence: @@ -248,6 +242,12 @@ class PerDestinationQueue(object): ) ) + pending_edus.extend(device_message_edus) + pending_edus.extend(self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))) + while len(pending_edus) < MAX_EDUS_PER_TRANSACTION and self._pending_edus_keyed: + _, val = self._pending_edus_keyed.popitem() + pending_edus.append(val) + if pending_pdus: logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d", self._destination, len(pending_pdus)) @@ -259,7 +259,7 @@ class PerDestinationQueue(object): # if we've decided to send a transaction anyway, and we have room, we # may as well send any pending RRs - if len(pending_edus) < 100: + if len(pending_edus) < MAX_EDUS_PER_TRANSACTION: pending_edus.extend(self._get_rr_edus(force_flush=True)) # END CRITICAL SECTION @@ -346,27 +346,13 @@ class PerDestinationQueue(object): return pending_edus @defer.inlineCallbacks - def _get_new_device_messages(self): - last_device_stream_id = self._last_device_stream_id - to_device_stream_id = self._store.get_to_device_stream_token() - contents, stream_id = yield self._store.get_new_device_msgs_for_remote( - self._destination, last_device_stream_id, to_device_stream_id - ) - edus = [ - Edu( - origin=self._server_name, - destination=self._destination, - edu_type="m.direct_to_device", - content=content, - ) - for content in contents - ] - + def _get_new_device_messages(self, limit): last_device_list = self._last_device_list_stream_id + # Will return at most 20 entries now_stream_id, results = yield self._store.get_devices_by_remote( self._destination, last_device_list ) - edus.extend( + edus = [ Edu( origin=self._server_name, destination=self._destination, @@ -374,5 +360,23 @@ class PerDestinationQueue(object): content=content, ) for content in results + ] + + assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs" + + last_device_stream_id = self._last_device_stream_id + to_device_stream_id = self._store.get_to_device_stream_token() + contents, stream_id = yield self._store.get_new_device_msgs_for_remote( + self._destination, last_device_stream_id, to_device_stream_id, limit - len(edus) ) + edus.extend( + Edu( + origin=self._server_name, + destination=self._destination, + edu_type="m.direct_to_device", + content=content, + ) + for content in contents + ) + defer.returnValue((edus, stream_id, now_stream_id)) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index fed4ea3610..9b0a99cb49 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -118,7 +118,7 @@ class DeviceInboxWorkerStore(SQLBaseStore): defer.returnValue(count) def get_new_device_msgs_for_remote( - self, destination, last_stream_id, current_stream_id, limit=100 + self, destination, last_stream_id, current_stream_id, limit ): """ Args: From 130f932cbc5ca5477af263c5290e0c9a51b5150c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 9 May 2019 16:27:02 +0100 Subject: [PATCH 093/429] Run `black` on per_destination_queue ... mostly to fix pep8 fails --- .../sender/per_destination_queue.py | 74 ++++++++++--------- 1 file changed, 39 insertions(+), 35 deletions(-) diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 4d96f026c6..fae8bea392 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -40,8 +40,7 @@ logger = logging.getLogger(__name__) sent_edus_counter = Counter( - "synapse_federation_client_sent_edus", - "Total number of EDUs successfully sent", + "synapse_federation_client_sent_edus", "Total number of EDUs successfully sent" ) sent_edus_by_type = Counter( @@ -61,6 +60,7 @@ class PerDestinationQueue(object): destination (str): the server_name of the destination that we are managing transmission for. """ + def __init__(self, hs, transaction_manager, destination): self._server_name = hs.hostname self._clock = hs.get_clock() @@ -71,17 +71,17 @@ class PerDestinationQueue(object): self.transmission_loop_running = False # a list of tuples of (pending pdu, order) - self._pending_pdus = [] # type: list[tuple[EventBase, int]] - self._pending_edus = [] # type: list[Edu] + self._pending_pdus = [] # type: list[tuple[EventBase, int]] + self._pending_edus = [] # type: list[Edu] # Pending EDUs by their "key". Keyed EDUs are EDUs that get clobbered # based on their key (e.g. typing events by room_id) # Map of (edu_type, key) -> Edu - self._pending_edus_keyed = {} # type: dict[tuple[str, str], Edu] + self._pending_edus_keyed = {} # type: dict[tuple[str, str], Edu] # Map of user_id -> UserPresenceState of pending presence to be sent to this # destination - self._pending_presence = {} # type: dict[str, UserPresenceState] + self._pending_presence = {} # type: dict[str, UserPresenceState] # room_id -> receipt_type -> user_id -> receipt_dict self._pending_rrs = {} @@ -123,9 +123,7 @@ class PerDestinationQueue(object): Args: states (iterable[UserPresenceState]): presence to send """ - self._pending_presence.update({ - state.user_id: state for state in states - }) + self._pending_presence.update({state.user_id: state for state in states}) self.attempt_new_transaction() def queue_read_receipt(self, receipt): @@ -135,14 +133,9 @@ class PerDestinationQueue(object): Args: receipt (synapse.api.receipt_info.ReceiptInfo): receipt to be queued """ - self._pending_rrs.setdefault( - receipt.room_id, {}, - ).setdefault( + self._pending_rrs.setdefault(receipt.room_id, {}).setdefault( receipt.receipt_type, {} - )[receipt.user_id] = { - "event_ids": receipt.event_ids, - "data": receipt.data, - } + )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data} def flush_read_receipts_for_room(self, room_id): # if we don't have any read-receipts for this room, it may be that we've already @@ -173,10 +166,7 @@ class PerDestinationQueue(object): # request at which point pending_pdus just keeps growing. # we need application-layer timeouts of some flavour of these # requests - logger.debug( - "TX [%s] Transaction already in progress", - self._destination - ) + logger.debug("TX [%s] Transaction already in progress", self._destination) return logger.debug("TX [%s] Starting transaction loop", self._destination) @@ -243,14 +233,22 @@ class PerDestinationQueue(object): ) pending_edus.extend(device_message_edus) - pending_edus.extend(self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))) - while len(pending_edus) < MAX_EDUS_PER_TRANSACTION and self._pending_edus_keyed: + pending_edus.extend( + self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus)) + ) + while ( + len(pending_edus) < MAX_EDUS_PER_TRANSACTION + and self._pending_edus_keyed + ): _, val = self._pending_edus_keyed.popitem() pending_edus.append(val) if pending_pdus: - logger.debug("TX [%s] len(pending_pdus_by_dest[dest]) = %d", - self._destination, len(pending_pdus)) + logger.debug( + "TX [%s] len(pending_pdus_by_dest[dest]) = %d", + self._destination, + len(pending_pdus), + ) if not pending_pdus and not pending_edus: logger.debug("TX [%s] Nothing to send", self._destination) @@ -303,22 +301,25 @@ class PerDestinationQueue(object): except HttpResponseException as e: logger.warning( "TX [%s] Received %d response to transaction: %s", - self._destination, e.code, e, + self._destination, + e.code, + e, ) except RequestSendFailed as e: - logger.warning("TX [%s] Failed to send transaction: %s", self._destination, e) + logger.warning( + "TX [%s] Failed to send transaction: %s", self._destination, e + ) for p, _ in pending_pdus: - logger.info("Failed to send event %s to %s", p.event_id, - self._destination) + logger.info( + "Failed to send event %s to %s", p.event_id, self._destination + ) except Exception: - logger.exception( - "TX [%s] Failed to send transaction", - self._destination, - ) + logger.exception("TX [%s] Failed to send transaction", self._destination) for p, _ in pending_pdus: - logger.info("Failed to send event %s to %s", p.event_id, - self._destination) + logger.info( + "Failed to send event %s to %s", p.event_id, self._destination + ) finally: # We want to be *very* sure we clear this after we stop processing self.transmission_loop_running = False @@ -367,7 +368,10 @@ class PerDestinationQueue(object): last_device_stream_id = self._last_device_stream_id to_device_stream_id = self._store.get_to_device_stream_token() contents, stream_id = yield self._store.get_new_device_msgs_for_remote( - self._destination, last_device_stream_id, to_device_stream_id, limit - len(edus) + self._destination, + last_device_stream_id, + to_device_stream_id, + limit - len(edus), ) edus.extend( Edu( From 84cebb89cc90bc5f03ce0f586d457f1a2226b34c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 9 May 2019 22:53:46 +0100 Subject: [PATCH 094/429] remove instructions for jessie installation (#5164) We don't ship jessie packages, so these were a bit misleading. --- INSTALL.md | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 1032a2c68e..b88d826f6c 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -257,9 +257,8 @@ https://github.com/spantaleev/matrix-docker-ansible-deploy #### Matrix.org packages Matrix.org provides Debian/Ubuntu packages of the latest stable version of -Synapse via https://packages.matrix.org/debian/. To use them: - -For Debian 9 (Stretch), Ubuntu 16.04 (Xenial), and later: +Synapse via https://packages.matrix.org/debian/. They are available for Debian +9 (Stretch), Ubuntu 16.04 (Xenial), and later. To use them: ``` sudo apt install -y lsb-release wget apt-transport-https @@ -270,19 +269,6 @@ sudo apt update sudo apt install matrix-synapse-py3 ``` -For Debian 8 (Jessie): - -``` -sudo apt install -y lsb-release wget apt-transport-https -sudo wget -O /etc/apt/trusted.gpg.d/matrix-org-archive-keyring.gpg https://packages.matrix.org/debian/matrix-org-archive-keyring.gpg -echo "deb [signed-by=5586CCC0CBBBEFC7A25811ADF473DD4473365DE1] https://packages.matrix.org/debian/ $(lsb_release -cs) main" | - sudo tee /etc/apt/sources.list.d/matrix-org.list -sudo apt update -sudo apt install matrix-synapse-py3 -``` - -The fingerprint of the repository signing key is AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058. - **Note**: if you followed a previous version of these instructions which recommended using `apt-key add` to add an old key from `https://matrix.org/packages/debian/`, you should note that this key has been @@ -290,6 +276,9 @@ revoked. You should remove the old key with `sudo apt-key remove C35EB17E1EAE708E6603A9B3AD0592FE47F0DF61`, and follow the above instructions to update your configuration. +The fingerprint of the repository signing key (as shown by `gpg +/usr/share/keyrings/matrix-org-archive-keyring.gpg`) is +`AAF9AE843A7584B5A3E4CD2BCF45A512DE2DA058`. #### Downstream Debian/Ubuntu packages From d9a02d1201d030dc846e194ecb6d4c5e4619ed83 Mon Sep 17 00:00:00 2001 From: colonelkrud Date: Thu, 9 May 2019 18:27:04 -0400 Subject: [PATCH 095/429] Add AllowEncodedSlashes to apache (#5068) * Add AllowEncodedSlashes to apache Add `AllowEncodedSlashes On` to apache config to support encoding for v3 rooms. "The AllowEncodedSlashes setting is not inherited by virtual hosts, and virtual hosts are used in many default Apache configurations, such as the one in Ubuntu. The workaround is to add the AllowEncodedSlashes setting inside a container (/etc/apache2/sites-available/default in Ubuntu)." Source: https://stackoverflow.com/questions/4390436/need-to-allow-encoded-slashes-on-apache * change allowencodedslashes to nodecode --- docs/reverse_proxy.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reverse_proxy.rst b/docs/reverse_proxy.rst index cc81ceb84b..7619b1097b 100644 --- a/docs/reverse_proxy.rst +++ b/docs/reverse_proxy.rst @@ -69,6 +69,7 @@ Let's assume that we expect clients to connect to our server at SSLEngine on ServerName matrix.example.com; + AllowEncodedSlashes NoDecode ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix @@ -77,6 +78,7 @@ Let's assume that we expect clients to connect to our server at SSLEngine on ServerName example.com; + AllowEncodedSlashes NoDecode ProxyPass /_matrix http://127.0.0.1:8008/_matrix nocanon ProxyPassReverse /_matrix http://127.0.0.1:8008/_matrix From b36c82576e3bb7ea72600ecf0e80c904ccf47d1d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Fri, 10 May 2019 00:12:11 -0500 Subject: [PATCH 096/429] Run Black on the tests again (#5170) --- changelog.d/5170.misc | 1 + tests/api/test_filtering.py | 1 - tests/api/test_ratelimiting.py | 6 +- tests/app/test_openid_listener.py | 46 ++- tests/config/test_generate.py | 8 +- tests/config/test_room_directory.py | 156 +++++----- tests/config/test_server.py | 1 - tests/config/test_tls.py | 11 +- tests/crypto/test_keyring.py | 3 +- tests/federation/test_federation_sender.py | 113 ++++--- tests/handlers/test_directory.py | 24 +- tests/handlers/test_e2e_room_keys.py | 277 +++++++++--------- tests/handlers/test_presence.py | 14 +- tests/handlers/test_typing.py | 152 ++++------ tests/handlers/test_user_directory.py | 8 +- tests/http/__init__.py | 6 +- .../test_matrix_federation_agent.py | 212 +++++--------- tests/http/federation/test_srv_resolver.py | 36 +-- tests/http/test_fedclient.py | 31 +- tests/patch_inline_callbacks.py | 16 +- tests/replication/slave/storage/_base.py | 15 +- .../replication/slave/storage/test_events.py | 19 +- tests/replication/tcp/streams/_base.py | 6 +- tests/rest/admin/test_admin.py | 103 +++---- tests/rest/client/test_identity.py | 8 +- tests/rest/client/v1/test_directory.py | 51 +--- tests/rest/client/v1/test_login.py | 36 +-- tests/rest/client/v1/test_profile.py | 29 +- tests/rest/client/v2_alpha/test_register.py | 82 +++--- tests/rest/media/v1/test_base.py | 14 +- tests/rest/test_well_known.py | 13 +- tests/server.py | 7 +- .../test_resource_limits_server_notices.py | 1 - tests/state/test_v2.py | 204 +++---------- tests/storage/test_background_update.py | 4 +- tests/storage/test_base.py | 5 +- tests/storage/test_end_to_end_keys.py | 1 - tests/storage/test_monthly_active_users.py | 17 +- tests/storage/test_redaction.py | 6 +- tests/storage/test_registration.py | 2 +- tests/storage/test_roommember.py | 2 +- tests/storage/test_state.py | 83 ++---- tests/storage/test_user_directory.py | 4 +- tests/test_event_auth.py | 8 +- tests/test_federation.py | 1 - tests/test_mau.py | 8 +- tests/test_metrics.py | 56 ++-- tests/test_terms_auth.py | 8 +- tests/test_types.py | 6 +- tests/test_utils/logging_setup.py | 4 +- tests/test_visibility.py | 6 +- tests/unittest.py | 13 +- tests/util/test_async_utils.py | 23 +- tests/utils.py | 9 +- 54 files changed, 818 insertions(+), 1158 deletions(-) create mode 100644 changelog.d/5170.misc diff --git a/changelog.d/5170.misc b/changelog.d/5170.misc new file mode 100644 index 0000000000..7919dac555 --- /dev/null +++ b/changelog.d/5170.misc @@ -0,0 +1 @@ +Run `black` on the tests directory. diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 2a7044801a..6ba623de13 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -109,7 +109,6 @@ class FilteringTestCase(unittest.TestCase): "event_format": "client", "event_fields": ["type", "content", "sender"], }, - # a single backslash should be permitted (though it is debatable whether # it should be permitted before anything other than `.`, and what that # actually means) diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index 30a255d441..dbdd427cac 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -10,19 +10,19 @@ class TestRatelimiter(unittest.TestCase): key="test_id", time_now_s=0, rate_hz=0.1, burst_count=1 ) self.assertTrue(allowed) - self.assertEquals(10., time_allowed) + self.assertEquals(10.0, time_allowed) allowed, time_allowed = limiter.can_do_action( key="test_id", time_now_s=5, rate_hz=0.1, burst_count=1 ) self.assertFalse(allowed) - self.assertEquals(10., time_allowed) + self.assertEquals(10.0, time_allowed) allowed, time_allowed = limiter.can_do_action( key="test_id", time_now_s=10, rate_hz=0.1, burst_count=1 ) self.assertTrue(allowed) - self.assertEquals(20., time_allowed) + self.assertEquals(20.0, time_allowed) def test_pruning(self): limiter = Ratelimiter() diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 590abc1e92..48792d1480 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -25,16 +25,18 @@ from tests.unittest import HomeserverTestCase class FederationReaderOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=FederationReaderServer, + http_client=None, homeserverToUse=FederationReaderServer ) return hs - @parameterized.expand([ - (["federation"], "auth_fail"), - ([], "no_resource"), - (["openid", "federation"], "auth_fail"), - (["openid"], "auth_fail"), - ]) + @parameterized.expand( + [ + (["federation"], "auth_fail"), + ([], "no_resource"), + (["openid", "federation"], "auth_fail"), + (["openid"], "auth_fail"), + ] + ) def test_openid_listener(self, names, expectation): """ Test different openid listener configurations. @@ -53,17 +55,14 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] try: - self.resource = ( - site.resource.children[b"_matrix"].children[b"federation"] - ) + self.resource = site.resource.children[b"_matrix"].children[b"federation"] except KeyError: if expectation == "no_resource": return raise request, channel = self.make_request( - "GET", - "/_matrix/federation/v1/openid/userinfo", + "GET", "/_matrix/federation/v1/openid/userinfo" ) self.render(request) @@ -74,16 +73,18 @@ class FederationReaderOpenIDListenerTests(HomeserverTestCase): class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - http_client=None, homeserverToUse=SynapseHomeServer, + http_client=None, homeserverToUse=SynapseHomeServer ) return hs - @parameterized.expand([ - (["federation"], "auth_fail"), - ([], "no_resource"), - (["openid", "federation"], "auth_fail"), - (["openid"], "auth_fail"), - ]) + @parameterized.expand( + [ + (["federation"], "auth_fail"), + ([], "no_resource"), + (["openid", "federation"], "auth_fail"), + (["openid"], "auth_fail"), + ] + ) def test_openid_listener(self, names, expectation): """ Test different openid listener configurations. @@ -102,17 +103,14 @@ class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] try: - self.resource = ( - site.resource.children[b"_matrix"].children[b"federation"] - ) + self.resource = site.resource.children[b"_matrix"].children[b"federation"] except KeyError: if expectation == "no_resource": return raise request, channel = self.make_request( - "GET", - "/_matrix/federation/v1/openid/userinfo", + "GET", "/_matrix/federation/v1/openid/userinfo" ) self.render(request) diff --git a/tests/config/test_generate.py b/tests/config/test_generate.py index 795b4c298d..5017cbce85 100644 --- a/tests/config/test_generate.py +++ b/tests/config/test_generate.py @@ -45,13 +45,7 @@ class ConfigGenerationTestCase(unittest.TestCase): ) self.assertSetEqual( - set( - [ - "homeserver.yaml", - "lemurs.win.log.config", - "lemurs.win.signing.key", - ] - ), + set(["homeserver.yaml", "lemurs.win.log.config", "lemurs.win.signing.key"]), set(os.listdir(self.dir)), ) diff --git a/tests/config/test_room_directory.py b/tests/config/test_room_directory.py index 47fffcfeb2..0ec10019b3 100644 --- a/tests/config/test_room_directory.py +++ b/tests/config/test_room_directory.py @@ -22,7 +22,8 @@ from tests import unittest class RoomDirectoryConfigTestCase(unittest.TestCase): def test_alias_creation_acl(self): - config = yaml.safe_load(""" + config = yaml.safe_load( + """ alias_creation_rules: - user_id: "*bob*" alias: "*" @@ -38,43 +39,49 @@ class RoomDirectoryConfigTestCase(unittest.TestCase): action: "allow" room_list_publication_rules: [] - """) + """ + ) rd_config = RoomDirectoryConfig() rd_config.read_config(config) - self.assertFalse(rd_config.is_alias_creation_allowed( - user_id="@bob:example.com", - room_id="!test", - alias="#test:example.com", - )) + self.assertFalse( + rd_config.is_alias_creation_allowed( + user_id="@bob:example.com", room_id="!test", alias="#test:example.com" + ) + ) - self.assertTrue(rd_config.is_alias_creation_allowed( - user_id="@test:example.com", - room_id="!test", - alias="#unofficial_st:example.com", - )) + self.assertTrue( + rd_config.is_alias_creation_allowed( + user_id="@test:example.com", + room_id="!test", + alias="#unofficial_st:example.com", + ) + ) - self.assertTrue(rd_config.is_alias_creation_allowed( - user_id="@foobar:example.com", - room_id="!test", - alias="#test:example.com", - )) + self.assertTrue( + rd_config.is_alias_creation_allowed( + user_id="@foobar:example.com", + room_id="!test", + alias="#test:example.com", + ) + ) - self.assertTrue(rd_config.is_alias_creation_allowed( - user_id="@gah:example.com", - room_id="!test", - alias="#goo:example.com", - )) + self.assertTrue( + rd_config.is_alias_creation_allowed( + user_id="@gah:example.com", room_id="!test", alias="#goo:example.com" + ) + ) - self.assertFalse(rd_config.is_alias_creation_allowed( - user_id="@test:example.com", - room_id="!test", - alias="#test:example.com", - )) + self.assertFalse( + rd_config.is_alias_creation_allowed( + user_id="@test:example.com", room_id="!test", alias="#test:example.com" + ) + ) def test_room_publish_acl(self): - config = yaml.safe_load(""" + config = yaml.safe_load( + """ alias_creation_rules: [] room_list_publication_rules: @@ -92,55 +99,66 @@ class RoomDirectoryConfigTestCase(unittest.TestCase): action: "allow" - room_id: "!test-deny" action: "deny" - """) + """ + ) rd_config = RoomDirectoryConfig() rd_config.read_config(config) - self.assertFalse(rd_config.is_publishing_room_allowed( - user_id="@bob:example.com", - room_id="!test", - aliases=["#test:example.com"], - )) + self.assertFalse( + rd_config.is_publishing_room_allowed( + user_id="@bob:example.com", + room_id="!test", + aliases=["#test:example.com"], + ) + ) - self.assertTrue(rd_config.is_publishing_room_allowed( - user_id="@test:example.com", - room_id="!test", - aliases=["#unofficial_st:example.com"], - )) + self.assertTrue( + rd_config.is_publishing_room_allowed( + user_id="@test:example.com", + room_id="!test", + aliases=["#unofficial_st:example.com"], + ) + ) - self.assertTrue(rd_config.is_publishing_room_allowed( - user_id="@foobar:example.com", - room_id="!test", - aliases=[], - )) + self.assertTrue( + rd_config.is_publishing_room_allowed( + user_id="@foobar:example.com", room_id="!test", aliases=[] + ) + ) - self.assertTrue(rd_config.is_publishing_room_allowed( - user_id="@gah:example.com", - room_id="!test", - aliases=["#goo:example.com"], - )) + self.assertTrue( + rd_config.is_publishing_room_allowed( + user_id="@gah:example.com", + room_id="!test", + aliases=["#goo:example.com"], + ) + ) - self.assertFalse(rd_config.is_publishing_room_allowed( - user_id="@test:example.com", - room_id="!test", - aliases=["#test:example.com"], - )) + self.assertFalse( + rd_config.is_publishing_room_allowed( + user_id="@test:example.com", + room_id="!test", + aliases=["#test:example.com"], + ) + ) - self.assertTrue(rd_config.is_publishing_room_allowed( - user_id="@foobar:example.com", - room_id="!test-deny", - aliases=[], - )) + self.assertTrue( + rd_config.is_publishing_room_allowed( + user_id="@foobar:example.com", room_id="!test-deny", aliases=[] + ) + ) - self.assertFalse(rd_config.is_publishing_room_allowed( - user_id="@gah:example.com", - room_id="!test-deny", - aliases=[], - )) + self.assertFalse( + rd_config.is_publishing_room_allowed( + user_id="@gah:example.com", room_id="!test-deny", aliases=[] + ) + ) - self.assertTrue(rd_config.is_publishing_room_allowed( - user_id="@test:example.com", - room_id="!test", - aliases=["#unofficial_st:example.com", "#blah:example.com"], - )) + self.assertTrue( + rd_config.is_publishing_room_allowed( + user_id="@test:example.com", + room_id="!test", + aliases=["#unofficial_st:example.com", "#blah:example.com"], + ) + ) diff --git a/tests/config/test_server.py b/tests/config/test_server.py index f5836d73ac..de64965a60 100644 --- a/tests/config/test_server.py +++ b/tests/config/test_server.py @@ -19,7 +19,6 @@ from tests import unittest class ServerConfigTestCase(unittest.TestCase): - def test_is_threepid_reserved(self): user1 = {'medium': 'email', 'address': 'user1@example.com'} user2 = {'medium': 'email', 'address': 'user2@example.com'} diff --git a/tests/config/test_tls.py b/tests/config/test_tls.py index c260d3359f..40ca428778 100644 --- a/tests/config/test_tls.py +++ b/tests/config/test_tls.py @@ -26,7 +26,6 @@ class TestConfig(TlsConfig): class TLSConfigTests(TestCase): - def test_warn_self_signed(self): """ Synapse will give a warning when it loads a self-signed certificate. @@ -34,7 +33,8 @@ class TLSConfigTests(TestCase): config_dir = self.mktemp() os.mkdir(config_dir) with open(os.path.join(config_dir, "cert.pem"), 'w') as f: - f.write("""-----BEGIN CERTIFICATE----- + f.write( + """-----BEGIN CERTIFICATE----- MIID6DCCAtACAws9CjANBgkqhkiG9w0BAQUFADCBtzELMAkGA1UEBhMCVFIxDzAN BgNVBAgMBsOHb3J1bTEUMBIGA1UEBwwLQmHFn21ha8OnxLExEjAQBgNVBAMMCWxv Y2FsaG9zdDEcMBoGA1UECgwTVHdpc3RlZCBNYXRyaXggTGFiczEkMCIGA1UECwwb @@ -56,11 +56,12 @@ I8OtG1xGwcok53lyDuuUUDexnK4O5BkjKiVlNPg4HPim5Kuj2hRNFfNt/F2BVIlj iZupikC5MT1LQaRwidkSNxCku1TfAyueiBwhLnFwTmIGNnhuDCutEVAD9kFmcJN2 SznugAcPk4doX2+rL+ila+ThqgPzIkwTUHtnmjI0TI6xsDUlXz5S3UyudrE2Qsfz s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= ------END CERTIFICATE-----""") +-----END CERTIFICATE-----""" + ) config = { "tls_certificate_path": os.path.join(config_dir, "cert.pem"), - "tls_fingerprints": [] + "tls_fingerprints": [], } t = TestConfig() @@ -75,5 +76,5 @@ s4niecZKPBizL6aucT59CsunNmmb5Glq8rlAcU+1ZTZZzGYqVYhF6axB9Qg= "Self-signed TLS certificates will not be accepted by " "Synapse 1.0. Please either provide a valid certificate, " "or use Synapse's ACME support to provision one." - ) + ), ) diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index f5bd7a1aa1..3c79d4afe7 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -169,7 +169,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1, )] + [("server10", json1)] ) res_deferreds_2[0].addBoth(self.check_context, None) yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) @@ -345,6 +345,7 @@ def _verify_json_for_server(keyring, server_name, json_object): """thin wrapper around verify_json_for_server which makes sure it is wrapped with the patched defer.inlineCallbacks. """ + @defer.inlineCallbacks def v(): rv1 = yield keyring.verify_json_for_server(server_name, json_object) diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 28e7e27416..7bb106b5f7 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -33,11 +33,15 @@ class FederationSenderTestCases(HomeserverTestCase): mock_state_handler = self.hs.get_state_handler() mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"] - mock_send_transaction = self.hs.get_federation_transport_client().send_transaction + mock_send_transaction = ( + self.hs.get_federation_transport_client().send_transaction + ) mock_send_transaction.return_value = defer.succeed({}) sender = self.hs.get_federation_sender() - receipt = ReadReceipt("room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}) + receipt = ReadReceipt( + "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234} + ) self.successResultOf(sender.send_read_receipt(receipt)) self.pump() @@ -46,21 +50,24 @@ class FederationSenderTestCases(HomeserverTestCase): mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() - self.assertEqual(data['edus'], [ - { - 'edu_type': 'm.receipt', - 'content': { - 'room_id': { - 'm.read': { - 'user_id': { - 'event_ids': ['event_id'], - 'data': {'ts': 1234}, - }, - }, + self.assertEqual( + data['edus'], + [ + { + 'edu_type': 'm.receipt', + 'content': { + 'room_id': { + 'm.read': { + 'user_id': { + 'event_ids': ['event_id'], + 'data': {'ts': 1234}, + } + } + } }, - }, - }, - ]) + } + ], + ) def test_send_receipts_with_backoff(self): """Send two receipts in quick succession; the second should be flushed, but @@ -68,11 +75,15 @@ class FederationSenderTestCases(HomeserverTestCase): mock_state_handler = self.hs.get_state_handler() mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"] - mock_send_transaction = self.hs.get_federation_transport_client().send_transaction + mock_send_transaction = ( + self.hs.get_federation_transport_client().send_transaction + ) mock_send_transaction.return_value = defer.succeed({}) sender = self.hs.get_federation_sender() - receipt = ReadReceipt("room_id", "m.read", "user_id", ["event_id"], {"ts": 1234}) + receipt = ReadReceipt( + "room_id", "m.read", "user_id", ["event_id"], {"ts": 1234} + ) self.successResultOf(sender.send_read_receipt(receipt)) self.pump() @@ -81,25 +92,30 @@ class FederationSenderTestCases(HomeserverTestCase): mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() - self.assertEqual(data['edus'], [ - { - 'edu_type': 'm.receipt', - 'content': { - 'room_id': { - 'm.read': { - 'user_id': { - 'event_ids': ['event_id'], - 'data': {'ts': 1234}, - }, - }, + self.assertEqual( + data['edus'], + [ + { + 'edu_type': 'm.receipt', + 'content': { + 'room_id': { + 'm.read': { + 'user_id': { + 'event_ids': ['event_id'], + 'data': {'ts': 1234}, + } + } + } }, - }, - }, - ]) + } + ], + ) mock_send_transaction.reset_mock() # send the second RR - receipt = ReadReceipt("room_id", "m.read", "user_id", ["other_id"], {"ts": 1234}) + receipt = ReadReceipt( + "room_id", "m.read", "user_id", ["other_id"], {"ts": 1234} + ) self.successResultOf(sender.send_read_receipt(receipt)) self.pump() mock_send_transaction.assert_not_called() @@ -111,18 +127,21 @@ class FederationSenderTestCases(HomeserverTestCase): mock_send_transaction.assert_called_once() json_cb = mock_send_transaction.call_args[0][1] data = json_cb() - self.assertEqual(data['edus'], [ - { - 'edu_type': 'm.receipt', - 'content': { - 'room_id': { - 'm.read': { - 'user_id': { - 'event_ids': ['other_id'], - 'data': {'ts': 1234}, - }, - }, + self.assertEqual( + data['edus'], + [ + { + 'edu_type': 'm.receipt', + 'content': { + 'room_id': { + 'm.read': { + 'user_id': { + 'event_ids': ['other_id'], + 'data': {'ts': 1234}, + } + } + } }, - }, - }, - ]) + } + ], + ) diff --git a/tests/handlers/test_directory.py b/tests/handlers/test_directory.py index 5b2105bc76..917548bb31 100644 --- a/tests/handlers/test_directory.py +++ b/tests/handlers/test_directory.py @@ -115,11 +115,7 @@ class TestCreateAliasACL(unittest.HomeserverTestCase): # We cheekily override the config to add custom alias creation rules config = {} config["alias_creation_rules"] = [ - { - "user_id": "*", - "alias": "#unofficial_*", - "action": "allow", - } + {"user_id": "*", "alias": "#unofficial_*", "action": "allow"} ] config["room_list_publication_rules"] = [] @@ -162,9 +158,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): room_id = self.helper.create_room_as(self.user_id) request, channel = self.make_request( - "PUT", - b"directory/list/room/%s" % (room_id.encode('ascii'),), - b'{}', + "PUT", b"directory/list/room/%s" % (room_id.encode('ascii'),), b'{}' ) self.render(request) self.assertEquals(200, channel.code, channel.result) @@ -179,10 +173,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): self.directory_handler.enable_room_list_search = True # Room list is enabled so we should get some results - request, channel = self.make_request( - "GET", - b"publicRooms", - ) + request, channel = self.make_request("GET", b"publicRooms") self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["chunk"]) > 0) @@ -191,10 +182,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): self.directory_handler.enable_room_list_search = False # Room list disabled so we should get no results - request, channel = self.make_request( - "GET", - b"publicRooms", - ) + request, channel = self.make_request("GET", b"publicRooms") self.render(request) self.assertEquals(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["chunk"]) == 0) @@ -202,9 +190,7 @@ class TestRoomListSearchDisabled(unittest.HomeserverTestCase): # Room list disabled so we shouldn't be allowed to publish rooms room_id = self.helper.create_room_as(self.user_id) request, channel = self.make_request( - "PUT", - b"directory/list/room/%s" % (room_id.encode('ascii'),), - b'{}', + "PUT", b"directory/list/room/%s" % (room_id.encode('ascii'),), b'{}' ) self.render(request) self.assertEquals(403, channel.code, channel.result) diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 1c49bbbc3c..2e72a1dd23 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -36,7 +36,7 @@ room_keys = { "first_message_index": 1, "forwarded_count": 1, "is_verified": False, - "session_data": "SSBBTSBBIEZJU0gK" + "session_data": "SSBBTSBBIEZJU0gK", } } } @@ -47,15 +47,13 @@ room_keys = { class E2eRoomKeysHandlerTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(E2eRoomKeysHandlerTestCase, self).__init__(*args, **kwargs) - self.hs = None # type: synapse.server.HomeServer + self.hs = None # type: synapse.server.HomeServer self.handler = None # type: synapse.handlers.e2e_keys.E2eRoomKeysHandler @defer.inlineCallbacks def setUp(self): self.hs = yield utils.setup_test_homeserver( - self.addCleanup, - handlers=None, - replication_layer=mock.Mock(), + self.addCleanup, handlers=None, replication_layer=mock.Mock() ) self.handler = synapse.handlers.e2e_room_keys.E2eRoomKeysHandler(self.hs) self.local_user = "@boris:" + self.hs.hostname @@ -88,67 +86,86 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_create_version(self): """Check that we can create and then retrieve versions. """ - res = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + res = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(res, "1") # check we can retrieve it as the current version res = yield self.handler.get_version_info(self.local_user) - self.assertDictEqual(res, { - "version": "1", - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + self.assertDictEqual( + res, + { + "version": "1", + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }, + ) # check we can retrieve it as a specific version res = yield self.handler.get_version_info(self.local_user, "1") - self.assertDictEqual(res, { - "version": "1", - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + self.assertDictEqual( + res, + { + "version": "1", + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }, + ) # upload a new one... - res = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "second_version_auth_data", - }) + res = yield self.handler.create_version( + self.local_user, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }, + ) self.assertEqual(res, "2") # check we can retrieve it as the current version res = yield self.handler.get_version_info(self.local_user) - self.assertDictEqual(res, { - "version": "2", - "algorithm": "m.megolm_backup.v1", - "auth_data": "second_version_auth_data", - }) + self.assertDictEqual( + res, + { + "version": "2", + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }, + ) @defer.inlineCallbacks def test_update_version(self): """Check that we can update versions. """ - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") - res = yield self.handler.update_version(self.local_user, version, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "revised_first_version_auth_data", - "version": version - }) + res = yield self.handler.update_version( + self.local_user, + version, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "revised_first_version_auth_data", + "version": version, + }, + ) self.assertDictEqual(res, {}) # check we can retrieve it as the current version res = yield self.handler.get_version_info(self.local_user) - self.assertDictEqual(res, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "revised_first_version_auth_data", - "version": version - }) + self.assertDictEqual( + res, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "revised_first_version_auth_data", + "version": version, + }, + ) @defer.inlineCallbacks def test_update_missing_version(self): @@ -156,11 +173,15 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """ res = None try: - yield self.handler.update_version(self.local_user, "1", { - "algorithm": "m.megolm_backup.v1", - "auth_data": "revised_first_version_auth_data", - "version": "1" - }) + yield self.handler.update_version( + self.local_user, + "1", + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "revised_first_version_auth_data", + "version": "1", + }, + ) except errors.SynapseError as e: res = e.code self.assertEqual(res, 404) @@ -170,29 +191,37 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """Check that we get a 400 if the version in the body is missing or doesn't match """ - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") res = None try: - yield self.handler.update_version(self.local_user, version, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "revised_first_version_auth_data" - }) + yield self.handler.update_version( + self.local_user, + version, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "revised_first_version_auth_data", + }, + ) except errors.SynapseError as e: res = e.code self.assertEqual(res, 400) res = None try: - yield self.handler.update_version(self.local_user, version, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "revised_first_version_auth_data", - "version": "incorrect" - }) + yield self.handler.update_version( + self.local_user, + version, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "revised_first_version_auth_data", + "version": "incorrect", + }, + ) except errors.SynapseError as e: res = e.code self.assertEqual(res, 400) @@ -223,10 +252,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_delete_version(self): """Check that we can create and then delete versions. """ - res = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + res = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(res, "1") # check we can delete it @@ -255,16 +284,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_get_missing_room_keys(self): """Check we get an empty response from an empty backup """ - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") res = yield self.handler.get_room_keys(self.local_user, version) - self.assertDictEqual(res, { - "rooms": {} - }) + self.assertDictEqual(res, {"rooms": {}}) # TODO: test the locking semantics when uploading room_keys, # although this is probably best done in sytest @@ -275,7 +302,9 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """ res = None try: - yield self.handler.upload_room_keys(self.local_user, "no_version", room_keys) + yield self.handler.upload_room_keys( + self.local_user, "no_version", room_keys + ) except errors.SynapseError as e: res = e.code self.assertEqual(res, 404) @@ -285,10 +314,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): """Check that we get a 404 on uploading keys when an nonexistent version is specified """ - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") res = None @@ -304,16 +333,19 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_upload_room_keys_wrong_version(self): """Check that we get a 403 on uploading keys for an old version """ - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "second_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }, + ) self.assertEqual(version, "2") res = None @@ -327,10 +359,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_upload_room_keys_insert(self): """Check that we can insert and retrieve keys for a session """ - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") yield self.handler.upload_room_keys(self.local_user, version, room_keys) @@ -340,18 +372,13 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # check getting room_keys for a given room res = yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org" + self.local_user, version, room_id="!abc:matrix.org" ) self.assertDictEqual(res, room_keys) # check getting room_keys for a given session_id res = yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", + self.local_user, version, room_id="!abc:matrix.org", session_id="c0ff33" ) self.assertDictEqual(res, room_keys) @@ -359,10 +386,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_upload_room_keys_merge(self): """Check that we can upload a new room_key for an existing session and have it correctly merged""" - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") yield self.handler.upload_room_keys(self.local_user, version, room_keys) @@ -378,7 +405,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = yield self.handler.get_room_keys(self.local_user, version) self.assertEqual( res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], - "SSBBTSBBIEZJU0gK" + "SSBBTSBBIEZJU0gK", ) # test that marking the session as verified however /does/ replace it @@ -387,8 +414,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = yield self.handler.get_room_keys(self.local_user, version) self.assertEqual( - res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], - "new" + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], "new" ) # test that a session with a higher forwarded_count doesn't replace one @@ -399,8 +425,7 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res = yield self.handler.get_room_keys(self.local_user, version) self.assertEqual( - res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], - "new" + res['rooms']['!abc:matrix.org']['sessions']['c0ff33']['session_data'], "new" ) # TODO: check edge cases as well as the common variations here @@ -409,56 +434,36 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): def test_delete_room_keys(self): """Check that we can insert and delete keys for a session """ - version = yield self.handler.create_version(self.local_user, { - "algorithm": "m.megolm_backup.v1", - "auth_data": "first_version_auth_data", - }) + version = yield self.handler.create_version( + self.local_user, + {"algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data"}, + ) self.assertEqual(version, "1") # check for bulk-delete yield self.handler.upload_room_keys(self.local_user, version, room_keys) yield self.handler.delete_room_keys(self.local_user, version) res = yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", + self.local_user, version, room_id="!abc:matrix.org", session_id="c0ff33" ) - self.assertDictEqual(res, { - "rooms": {} - }) + self.assertDictEqual(res, {"rooms": {}}) # check for bulk-delete per room yield self.handler.upload_room_keys(self.local_user, version, room_keys) yield self.handler.delete_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", + self.local_user, version, room_id="!abc:matrix.org" ) res = yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", + self.local_user, version, room_id="!abc:matrix.org", session_id="c0ff33" ) - self.assertDictEqual(res, { - "rooms": {} - }) + self.assertDictEqual(res, {"rooms": {}}) # check for bulk-delete per session yield self.handler.upload_room_keys(self.local_user, version, room_keys) yield self.handler.delete_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", + self.local_user, version, room_id="!abc:matrix.org", session_id="c0ff33" ) res = yield self.handler.get_room_keys( - self.local_user, - version, - room_id="!abc:matrix.org", - session_id="c0ff33", + self.local_user, version, room_id="!abc:matrix.org", session_id="c0ff33" ) - self.assertDictEqual(res, { - "rooms": {} - }) + self.assertDictEqual(res, {"rooms": {}}) diff --git a/tests/handlers/test_presence.py b/tests/handlers/test_presence.py index 94c6080e34..f70c6e7d65 100644 --- a/tests/handlers/test_presence.py +++ b/tests/handlers/test_presence.py @@ -424,8 +424,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs = self.setup_test_homeserver( - "server", http_client=None, - federation_sender=Mock(), + "server", http_client=None, federation_sender=Mock() ) return hs @@ -457,7 +456,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): # Mark test2 as online, test will be offline with a last_active of 0 self.presence_handler.set_state( - UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}, + UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} ) self.reactor.pump([0]) # Wait for presence updates to be handled @@ -506,13 +505,13 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): # Mark test as online self.presence_handler.set_state( - UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE}, + UserID.from_string("@test:server"), {"presence": PresenceState.ONLINE} ) # Mark test2 as online, test will be offline with a last_active of 0. # Note we don't join them to the room yet self.presence_handler.set_state( - UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE}, + UserID.from_string("@test2:server"), {"presence": PresenceState.ONLINE} ) # Add servers to the room @@ -541,8 +540,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): ) self.assertEqual(expected_state.state, PresenceState.ONLINE) self.federation_sender.send_presence_to_destinations.assert_called_once_with( - destinations=set(("server2", "server3")), - states=[expected_state] + destinations=set(("server2", "server3")), states=[expected_state] ) def _add_new_user(self, room_id, user_id): @@ -565,7 +563,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase): type=EventTypes.Member, sender=user_id, state_key=user_id, - content={"membership": Membership.JOIN} + content={"membership": Membership.JOIN}, ) prev_event_ids = self.get_success( diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 5a0b6c201c..cb8b4d2913 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -64,20 +64,22 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): mock_federation_client.put_json.return_value = defer.succeed((200, "OK")) hs = self.setup_test_homeserver( - datastore=(Mock( - spec=[ - # Bits that Federation needs - "prep_send_transaction", - "delivered_txn", - "get_received_txn_response", - "set_received_txn_response", - "get_destination_retry_timings", - "get_devices_by_remote", - # Bits that user_directory needs - "get_user_directory_stream_pos", - "get_current_state_deltas", - ] - )), + datastore=( + Mock( + spec=[ + # Bits that Federation needs + "prep_send_transaction", + "delivered_txn", + "get_received_txn_response", + "set_received_txn_response", + "get_destination_retry_timings", + "get_devices_by_remote", + # Bits that user_directory needs + "get_user_directory_stream_pos", + "get_current_state_deltas", + ] + ) + ), notifier=Mock(), http_client=mock_federation_client, keyring=mock_keyring, @@ -87,7 +89,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): def prepare(self, reactor, clock, hs): # the tests assume that we are starting at unix time 1000 - reactor.pump((1000, )) + reactor.pump((1000,)) mock_notifier = hs.get_notifier() self.on_new_event = mock_notifier.on_new_event @@ -114,6 +116,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): def check_joined_room(room_id, user_id): if user_id not in [u.to_string() for u in self.room_members]: raise AuthError(401, "User is not in the room") + hs.get_auth().check_joined_room = check_joined_room def get_joined_hosts_for_room(room_id): @@ -123,6 +126,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): def get_current_users_in_room(room_id): return set(str(u) for u in self.room_members) + hs.get_state_handler().get_current_users_in_room = get_current_users_in_room self.datastore.get_user_directory_stream_pos.return_value = ( @@ -141,21 +145,16 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.assertEquals(self.event_source.get_current_key(), 0) - self.successResultOf(self.handler.started_typing( - target_user=U_APPLE, - auth_user=U_APPLE, - room_id=ROOM_ID, - timeout=20000, - )) - - self.on_new_event.assert_has_calls( - [call('typing_key', 1, rooms=[ROOM_ID])] + self.successResultOf( + self.handler.started_typing( + target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=20000 + ) ) + self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])]) + self.assertEquals(self.event_source.get_current_key(), 1) - events = self.event_source.get_new_events( - room_ids=[ROOM_ID], from_key=0 - ) + events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0) self.assertEquals( events[0], [ @@ -170,12 +169,11 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): def test_started_typing_remote_send(self): self.room_members = [U_APPLE, U_ONION] - self.successResultOf(self.handler.started_typing( - target_user=U_APPLE, - auth_user=U_APPLE, - room_id=ROOM_ID, - timeout=20000, - )) + self.successResultOf( + self.handler.started_typing( + target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=20000 + ) + ) put_json = self.hs.get_http_client().put_json put_json.assert_called_once_with( @@ -216,14 +214,10 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEqual(channel.code, 200) - self.on_new_event.assert_has_calls( - [call('typing_key', 1, rooms=[ROOM_ID])] - ) + self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])]) self.assertEquals(self.event_source.get_current_key(), 1) - events = self.event_source.get_new_events( - room_ids=[ROOM_ID], from_key=0 - ) + events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0) self.assertEquals( events[0], [ @@ -247,14 +241,14 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.assertEquals(self.event_source.get_current_key(), 0) - self.successResultOf(self.handler.stopped_typing( - target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID - )) - - self.on_new_event.assert_has_calls( - [call('typing_key', 1, rooms=[ROOM_ID])] + self.successResultOf( + self.handler.stopped_typing( + target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID + ) ) + self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])]) + put_json = self.hs.get_http_client().put_json put_json.assert_called_once_with( "farm", @@ -274,18 +268,10 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ) self.assertEquals(self.event_source.get_current_key(), 1) - events = self.event_source.get_new_events( - room_ids=[ROOM_ID], from_key=0 - ) + events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0) self.assertEquals( events[0], - [ - { - "type": "m.typing", - "room_id": ROOM_ID, - "content": {"user_ids": []}, - } - ], + [{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}], ) def test_typing_timeout(self): @@ -293,22 +279,17 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): self.assertEquals(self.event_source.get_current_key(), 0) - self.successResultOf(self.handler.started_typing( - target_user=U_APPLE, - auth_user=U_APPLE, - room_id=ROOM_ID, - timeout=10000, - )) - - self.on_new_event.assert_has_calls( - [call('typing_key', 1, rooms=[ROOM_ID])] + self.successResultOf( + self.handler.started_typing( + target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=10000 + ) ) + + self.on_new_event.assert_has_calls([call('typing_key', 1, rooms=[ROOM_ID])]) self.on_new_event.reset_mock() self.assertEquals(self.event_source.get_current_key(), 1) - events = self.event_source.get_new_events( - room_ids=[ROOM_ID], from_key=0 - ) + events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0) self.assertEquals( events[0], [ @@ -320,45 +301,30 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ], ) - self.reactor.pump([16, ]) + self.reactor.pump([16]) - self.on_new_event.assert_has_calls( - [call('typing_key', 2, rooms=[ROOM_ID])] - ) + self.on_new_event.assert_has_calls([call('typing_key', 2, rooms=[ROOM_ID])]) self.assertEquals(self.event_source.get_current_key(), 2) - events = self.event_source.get_new_events( - room_ids=[ROOM_ID], from_key=1 - ) + events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=1) self.assertEquals( events[0], - [ - { - "type": "m.typing", - "room_id": ROOM_ID, - "content": {"user_ids": []}, - } - ], + [{"type": "m.typing", "room_id": ROOM_ID, "content": {"user_ids": []}}], ) # SYN-230 - see if we can still set after timeout - self.successResultOf(self.handler.started_typing( - target_user=U_APPLE, - auth_user=U_APPLE, - room_id=ROOM_ID, - timeout=10000, - )) - - self.on_new_event.assert_has_calls( - [call('typing_key', 3, rooms=[ROOM_ID])] + self.successResultOf( + self.handler.started_typing( + target_user=U_APPLE, auth_user=U_APPLE, room_id=ROOM_ID, timeout=10000 + ) ) + + self.on_new_event.assert_has_calls([call('typing_key', 3, rooms=[ROOM_ID])]) self.on_new_event.reset_mock() self.assertEquals(self.event_source.get_current_key(), 3) - events = self.event_source.get_new_events( - room_ids=[ROOM_ID], from_key=0 - ) + events = self.event_source.get_new_events(room_ids=[ROOM_ID], from_key=0) self.assertEquals( events[0], [ diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 7dd1a1daf8..44468f5382 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -352,9 +352,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): # Assert user directory is not empty request, channel = self.make_request( - "POST", - b"user_directory/search", - b'{"search_term":"user2"}', + "POST", b"user_directory/search", b'{"search_term":"user2"}' ) self.render(request) self.assertEquals(200, channel.code, channel.result) @@ -363,9 +361,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): # Disable user directory and check search returns nothing self.config.user_directory_search_enabled = False request, channel = self.make_request( - "POST", - b"user_directory/search", - b'{"search_term":"user2"}', + "POST", b"user_directory/search", b'{"search_term":"user2"}' ) self.render(request) self.assertEquals(200, channel.code, channel.result) diff --git a/tests/http/__init__.py b/tests/http/__init__.py index ee8010f598..851fc0eb33 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -24,14 +24,12 @@ def get_test_cert_file(): # # openssl req -x509 -newkey rsa:4096 -keyout server.pem -out server.pem -days 36500 \ # -nodes -subj '/CN=testserv' - return os.path.join( - os.path.dirname(__file__), - 'server.pem', - ) + return os.path.join(os.path.dirname(__file__), 'server.pem') class ServerTLSContext(object): """A TLS Context which presents our test cert.""" + def __init__(self): self.filename = get_test_cert_file() diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index e9eb662c4c..7036615041 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -79,12 +79,12 @@ class MatrixFederationAgentTests(TestCase): # stubbing that out here. client_protocol = client_factory.buildProtocol(None) client_protocol.makeConnection( - FakeTransport(server_tls_protocol, self.reactor, client_protocol), + FakeTransport(server_tls_protocol, self.reactor, client_protocol) ) # tell the server tls protocol to send its stuff back to the client, too server_tls_protocol.makeConnection( - FakeTransport(client_protocol, self.reactor, server_tls_protocol), + FakeTransport(client_protocol, self.reactor, server_tls_protocol) ) # give the reactor a pump to get the TLS juices flowing. @@ -125,7 +125,7 @@ class MatrixFederationAgentTests(TestCase): _check_logcontext(context) def _handle_well_known_connection( - self, client_factory, expected_sni, content, response_headers={}, + self, client_factory, expected_sni, content, response_headers={} ): """Handle an outgoing HTTPs connection: wire it up to a server, check that the request is for a .well-known, and send the response. @@ -139,8 +139,7 @@ class MatrixFederationAgentTests(TestCase): """ # make the connection for .well-known well_known_server = self._make_connection( - client_factory, - expected_sni=expected_sni, + client_factory, expected_sni=expected_sni ) # check the .well-known request and send a response self.assertEqual(len(well_known_server.requests), 1) @@ -154,17 +153,14 @@ class MatrixFederationAgentTests(TestCase): """ self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/.well-known/matrix/server') - self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'testserv'], - ) + self.assertEqual(request.requestHeaders.getRawHeaders(b'host'), [b'testserv']) # send back a response for k, v in headers.items(): request.setHeader(k, v) request.write(content) request.finish() - self.reactor.pump((0.1, )) + self.reactor.pump((0.1,)) def test_get(self): """ @@ -184,18 +180,14 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 8448) # make a test server, and wire up the client - http_server = self._make_connection( - client_factory, - expected_sni=b"testserv", - ) + http_server = self._make_connection(client_factory, expected_sni=b"testserv") self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'testserv:8448'] + request.requestHeaders.getRawHeaders(b'host'), [b'testserv:8448'] ) content = request.content.read() self.assertEqual(content, b'') @@ -244,19 +236,13 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 8448) # make a test server, and wire up the client - http_server = self._make_connection( - client_factory, - expected_sni=None, - ) + http_server = self._make_connection(client_factory, expected_sni=None) self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') - self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'1.2.3.4'], - ) + self.assertEqual(request.requestHeaders.getRawHeaders(b'host'), [b'1.2.3.4']) # finish the request request.finish() @@ -285,19 +271,13 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 8448) # make a test server, and wire up the client - http_server = self._make_connection( - client_factory, - expected_sni=None, - ) + http_server = self._make_connection(client_factory, expected_sni=None) self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') - self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'[::1]'], - ) + self.assertEqual(request.requestHeaders.getRawHeaders(b'host'), [b'[::1]']) # finish the request request.finish() @@ -326,19 +306,13 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 80) # make a test server, and wire up the client - http_server = self._make_connection( - client_factory, - expected_sni=None, - ) + http_server = self._make_connection(client_factory, expected_sni=None) self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') - self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'[::1]:80'], - ) + self.assertEqual(request.requestHeaders.getRawHeaders(b'host'), [b'[::1]:80']) # finish the request request.finish() @@ -377,7 +351,7 @@ class MatrixFederationAgentTests(TestCase): # now there should be a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv", + b"_matrix._tcp.testserv" ) # we should fall back to a direct connection @@ -387,19 +361,13 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 8448) # make a test server, and wire up the client - http_server = self._make_connection( - client_factory, - expected_sni=b'testserv', - ) + http_server = self._make_connection(client_factory, expected_sni=b'testserv') self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') - self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'testserv'], - ) + self.assertEqual(request.requestHeaders.getRawHeaders(b'host'), [b'testserv']) # finish the request request.finish() @@ -427,13 +395,14 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 443) self._handle_well_known_connection( - client_factory, expected_sni=b"testserv", + client_factory, + expected_sni=b"testserv", content=b'{ "m.server": "target-server" }', ) # there should be a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server", + b"_matrix._tcp.target-server" ) # now we should get a connection to the target server @@ -444,8 +413,7 @@ class MatrixFederationAgentTests(TestCase): # make a test server, and wire up the client http_server = self._make_connection( - client_factory, - expected_sni=b'target-server', + client_factory, expected_sni=b'target-server' ) self.assertEqual(len(http_server.requests), 1) @@ -453,8 +421,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'target-server'], + request.requestHeaders.getRawHeaders(b'host'), [b'target-server'] ) # finish the request @@ -490,8 +457,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 443) redirect_server = self._make_connection( - client_factory, - expected_sni=b"testserv", + client_factory, expected_sni=b"testserv" ) # send a 302 redirect @@ -500,7 +466,7 @@ class MatrixFederationAgentTests(TestCase): request.redirect(b'https://testserv/even_better_known') request.finish() - self.reactor.pump((0.1, )) + self.reactor.pump((0.1,)) # now there should be another connection clients = self.reactor.tcpClients @@ -510,8 +476,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 443) well_known_server = self._make_connection( - client_factory, - expected_sni=b"testserv", + client_factory, expected_sni=b"testserv" ) self.assertEqual(len(well_known_server.requests), 1, "No request after 302") @@ -521,11 +486,11 @@ class MatrixFederationAgentTests(TestCase): request.write(b'{ "m.server": "target-server" }') request.finish() - self.reactor.pump((0.1, )) + self.reactor.pump((0.1,)) # there should be a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server", + b"_matrix._tcp.target-server" ) # now we should get a connection to the target server @@ -536,8 +501,7 @@ class MatrixFederationAgentTests(TestCase): # make a test server, and wire up the client http_server = self._make_connection( - client_factory, - expected_sni=b'target-server', + client_factory, expected_sni=b'target-server' ) self.assertEqual(len(http_server.requests), 1) @@ -545,8 +509,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'target-server'], + request.requestHeaders.getRawHeaders(b'host'), [b'target-server'] ) # finish the request @@ -585,12 +548,12 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 443) self._handle_well_known_connection( - client_factory, expected_sni=b"testserv", content=b'NOT JSON', + client_factory, expected_sni=b"testserv", content=b'NOT JSON' ) # now there should be a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv", + b"_matrix._tcp.testserv" ) # we should fall back to a direct connection @@ -600,19 +563,13 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 8448) # make a test server, and wire up the client - http_server = self._make_connection( - client_factory, - expected_sni=b'testserv', - ) + http_server = self._make_connection(client_factory, expected_sni=b'testserv') self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') - self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'testserv'], - ) + self.assertEqual(request.requestHeaders.getRawHeaders(b'host'), [b'testserv']) # finish the request request.finish() @@ -635,7 +592,7 @@ class MatrixFederationAgentTests(TestCase): # the request for a .well-known will have failed with a DNS lookup error. self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.testserv", + b"_matrix._tcp.testserv" ) # Make sure treq is trying to connect @@ -646,19 +603,13 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 8443) # make a test server, and wire up the client - http_server = self._make_connection( - client_factory, - expected_sni=b'testserv', - ) + http_server = self._make_connection(client_factory, expected_sni=b'testserv') self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') - self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'testserv'], - ) + self.assertEqual(request.requestHeaders.getRawHeaders(b'host'), [b'testserv']) # finish the request request.finish() @@ -685,17 +636,18 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(port, 443) self.mock_resolver.resolve_service.side_effect = lambda _: [ - Server(host=b"srvtarget", port=8443), + Server(host=b"srvtarget", port=8443) ] self._handle_well_known_connection( - client_factory, expected_sni=b"testserv", + client_factory, + expected_sni=b"testserv", content=b'{ "m.server": "target-server" }', ) # there should be a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.target-server", + b"_matrix._tcp.target-server" ) # now we should get a connection to the target of the SRV record @@ -706,8 +658,7 @@ class MatrixFederationAgentTests(TestCase): # make a test server, and wire up the client http_server = self._make_connection( - client_factory, - expected_sni=b'target-server', + client_factory, expected_sni=b'target-server' ) self.assertEqual(len(http_server.requests), 1) @@ -715,8 +666,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'target-server'], + request.requestHeaders.getRawHeaders(b'host'), [b'target-server'] ) # finish the request @@ -757,7 +707,7 @@ class MatrixFederationAgentTests(TestCase): # now there should have been a SRV lookup self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.xn--bcher-kva.com", + b"_matrix._tcp.xn--bcher-kva.com" ) # We should fall back to port 8448 @@ -769,8 +719,7 @@ class MatrixFederationAgentTests(TestCase): # make a test server, and wire up the client http_server = self._make_connection( - client_factory, - expected_sni=b'xn--bcher-kva.com', + client_factory, expected_sni=b'xn--bcher-kva.com' ) self.assertEqual(len(http_server.requests), 1) @@ -778,8 +727,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'xn--bcher-kva.com'], + request.requestHeaders.getRawHeaders(b'host'), [b'xn--bcher-kva.com'] ) # finish the request @@ -801,7 +749,7 @@ class MatrixFederationAgentTests(TestCase): self.assertNoResult(test_d) self.mock_resolver.resolve_service.assert_called_once_with( - b"_matrix._tcp.xn--bcher-kva.com", + b"_matrix._tcp.xn--bcher-kva.com" ) # Make sure treq is trying to connect @@ -813,8 +761,7 @@ class MatrixFederationAgentTests(TestCase): # make a test server, and wire up the client http_server = self._make_connection( - client_factory, - expected_sni=b'xn--bcher-kva.com', + client_factory, expected_sni=b'xn--bcher-kva.com' ) self.assertEqual(len(http_server.requests), 1) @@ -822,8 +769,7 @@ class MatrixFederationAgentTests(TestCase): self.assertEqual(request.method, b'GET') self.assertEqual(request.path, b'/foo/bar') self.assertEqual( - request.requestHeaders.getRawHeaders(b'host'), - [b'xn--bcher-kva.com'], + request.requestHeaders.getRawHeaders(b'host'), [b'xn--bcher-kva.com'] ) # finish the request @@ -897,67 +843,70 @@ class TestCachePeriodFromHeaders(TestCase): # uppercase self.assertEqual( _cache_period_from_headers( - Headers({b'Cache-Control': [b'foo, Max-Age = 100, bar']}), - ), 100, + Headers({b'Cache-Control': [b'foo, Max-Age = 100, bar']}) + ), + 100, ) # missing value - self.assertIsNone(_cache_period_from_headers( - Headers({b'Cache-Control': [b'max-age=, bar']}), - )) + self.assertIsNone( + _cache_period_from_headers(Headers({b'Cache-Control': [b'max-age=, bar']})) + ) # hackernews: bogus due to semicolon - self.assertIsNone(_cache_period_from_headers( - Headers({b'Cache-Control': [b'private; max-age=0']}), - )) + self.assertIsNone( + _cache_period_from_headers( + Headers({b'Cache-Control': [b'private; max-age=0']}) + ) + ) # github self.assertEqual( _cache_period_from_headers( - Headers({b'Cache-Control': [b'max-age=0, private, must-revalidate']}), - ), 0, + Headers({b'Cache-Control': [b'max-age=0, private, must-revalidate']}) + ), + 0, ) # google self.assertEqual( _cache_period_from_headers( - Headers({b'cache-control': [b'private, max-age=0']}), - ), 0, + Headers({b'cache-control': [b'private, max-age=0']}) + ), + 0, ) def test_expires(self): self.assertEqual( _cache_period_from_headers( Headers({b'Expires': [b'Wed, 30 Jan 2019 07:35:33 GMT']}), - time_now=lambda: 1548833700 - ), 33, + time_now=lambda: 1548833700, + ), + 33, ) # cache-control overrides expires self.assertEqual( _cache_period_from_headers( - Headers({ - b'cache-control': [b'max-age=10'], - b'Expires': [b'Wed, 30 Jan 2019 07:35:33 GMT'] - }), - time_now=lambda: 1548833700 - ), 10, + Headers( + { + b'cache-control': [b'max-age=10'], + b'Expires': [b'Wed, 30 Jan 2019 07:35:33 GMT'], + } + ), + time_now=lambda: 1548833700, + ), + 10, ) # invalid expires means immediate expiry - self.assertEqual( - _cache_period_from_headers( - Headers({b'Expires': [b'0']}), - ), 0, - ) + self.assertEqual(_cache_period_from_headers(Headers({b'Expires': [b'0']})), 0) def _check_logcontext(context): current = LoggingContext.current_context() if current is not context: - raise AssertionError( - "Expected logcontext %s but was %s" % (context, current), - ) + raise AssertionError("Expected logcontext %s but was %s" % (context, current)) def _build_test_server(): @@ -973,7 +922,7 @@ def _build_test_server(): server_factory.log = _log_request server_tls_factory = TLSMemoryBIOFactory( - ServerTLSContext(), isClient=False, wrappedFactory=server_factory, + ServerTLSContext(), isClient=False, wrappedFactory=server_factory ) return server_tls_factory.buildProtocol(None) @@ -987,6 +936,7 @@ def _log_request(request): @implementer(IPolicyForHTTPS) class TrustingTLSPolicyForHTTPS(object): """An IPolicyForHTTPS which doesn't do any certificate verification""" + def creatorForNetloc(self, hostname, port): certificateOptions = OpenSSLCertificateOptions() return ClientTLSOptions(hostname, certificateOptions.getContext()) diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index a872e2441e..034c0db8d2 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -68,9 +68,7 @@ class SrvResolverTestCase(unittest.TestCase): dns_client_mock.lookupService.assert_called_once_with(service_name) - result_deferred.callback( - ([answer_srv], None, None) - ) + result_deferred.callback(([answer_srv], None, None)) servers = self.successResultOf(test_d) @@ -112,7 +110,7 @@ class SrvResolverTestCase(unittest.TestCase): cache = {service_name: [entry]} resolver = SrvResolver( - dns_client=dns_client_mock, cache=cache, get_time=clock.time, + dns_client=dns_client_mock, cache=cache, get_time=clock.time ) servers = yield resolver.resolve_service(service_name) @@ -168,11 +166,13 @@ class SrvResolverTestCase(unittest.TestCase): self.assertNoResult(resolve_d) # returning a single "." should make the lookup fail with a ConenctError - lookup_deferred.callback(( - [dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"."))], - None, - None, - )) + lookup_deferred.callback( + ( + [dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"."))], + None, + None, + ) + ) self.failureResultOf(resolve_d, ConnectError) @@ -191,14 +191,16 @@ class SrvResolverTestCase(unittest.TestCase): resolve_d = resolver.resolve_service(service_name) self.assertNoResult(resolve_d) - lookup_deferred.callback(( - [ - dns.RRHeader(type=dns.A, payload=dns.Record_A()), - dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"host")), - ], - None, - None, - )) + lookup_deferred.callback( + ( + [ + dns.RRHeader(type=dns.A, payload=dns.Record_A()), + dns.RRHeader(type=dns.SRV, payload=dns.Record_SRV(target=b"host")), + ], + None, + None, + ) + ) servers = self.successResultOf(resolve_d) diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index cd8e086f86..279e456614 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -36,9 +36,7 @@ from tests.unittest import HomeserverTestCase def check_logcontext(context): current = LoggingContext.current_context() if current is not context: - raise AssertionError( - "Expected logcontext %s but was %s" % (context, current), - ) + raise AssertionError("Expected logcontext %s but was %s" % (context, current)) class FederationClientTests(HomeserverTestCase): @@ -54,6 +52,7 @@ class FederationClientTests(HomeserverTestCase): """ happy-path test of a GET request """ + @defer.inlineCallbacks def do_request(): with LoggingContext("one") as context: @@ -175,8 +174,7 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance( - f.value.inner_exception, - (ConnectingCancelledError, TimeoutError), + f.value.inner_exception, (ConnectingCancelledError, TimeoutError) ) def test_client_connect_no_response(self): @@ -216,9 +214,7 @@ class FederationClientTests(HomeserverTestCase): Once the client gets the headers, _request returns successfully. """ request = MatrixFederationRequest( - method="GET", - destination="testserv:8008", - path="foo/bar", + method="GET", destination="testserv:8008", path="foo/bar" ) d = self.cl._send_request(request, timeout=10000) @@ -258,8 +254,10 @@ class FederationClientTests(HomeserverTestCase): # Send it the HTTP response client.dataReceived( - (b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n" - b"Server: Fake\r\n\r\n") + ( + b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\n" + b"Server: Fake\r\n\r\n" + ) ) # Push by enough to time it out @@ -274,9 +272,7 @@ class FederationClientTests(HomeserverTestCase): requiring a trailing slash. We need to retry the request with a trailing slash. Workaround for Synapse <= v0.99.3, explained in #3622. """ - d = self.cl.get_json( - "testserv:8008", "foo/bar", try_trailing_slash_on_400=True, - ) + d = self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True) # Send the request self.pump() @@ -329,9 +325,7 @@ class FederationClientTests(HomeserverTestCase): See test_client_requires_trailing_slashes() for context. """ - d = self.cl.get_json( - "testserv:8008", "foo/bar", try_trailing_slash_on_400=True, - ) + d = self.cl.get_json("testserv:8008", "foo/bar", try_trailing_slash_on_400=True) # Send the request self.pump() @@ -368,10 +362,7 @@ class FederationClientTests(HomeserverTestCase): self.failureResultOf(d) def test_client_sends_body(self): - self.cl.post_json( - "testserv:8008", "foo/bar", timeout=10000, - data={"a": "b"} - ) + self.cl.post_json("testserv:8008", "foo/bar", timeout=10000, data={"a": "b"}) self.pump() diff --git a/tests/patch_inline_callbacks.py b/tests/patch_inline_callbacks.py index 0f613945c8..ee0add3455 100644 --- a/tests/patch_inline_callbacks.py +++ b/tests/patch_inline_callbacks.py @@ -45,7 +45,9 @@ def do_patch(): except Exception: if LoggingContext.current_context() != start_context: err = "%s changed context from %s to %s on exception" % ( - f, start_context, LoggingContext.current_context() + f, + start_context, + LoggingContext.current_context(), ) print(err, file=sys.stderr) raise Exception(err) @@ -54,7 +56,9 @@ def do_patch(): if not isinstance(res, Deferred) or res.called: if LoggingContext.current_context() != start_context: err = "%s changed context from %s to %s" % ( - f, start_context, LoggingContext.current_context() + f, + start_context, + LoggingContext.current_context(), ) # print the error to stderr because otherwise all we # see in travis-ci is the 500 error @@ -66,9 +70,7 @@ def do_patch(): err = ( "%s returned incomplete deferred in non-sentinel context " "%s (start was %s)" - ) % ( - f, LoggingContext.current_context(), start_context, - ) + ) % (f, LoggingContext.current_context(), start_context) print(err, file=sys.stderr) raise Exception(err) @@ -76,7 +78,9 @@ def do_patch(): if LoggingContext.current_context() != start_context: err = "%s completion of %s changed context from %s to %s" % ( "Failure" if isinstance(r, Failure) else "Success", - f, start_context, LoggingContext.current_context(), + f, + start_context, + LoggingContext.current_context(), ) print(err, file=sys.stderr) raise Exception(err) diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 1f72a2a04f..104349cdbd 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -74,21 +74,18 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): self.assertEqual( master_result, expected_result, - "Expected master result to be %r but was %r" % ( - expected_result, master_result - ), + "Expected master result to be %r but was %r" + % (expected_result, master_result), ) self.assertEqual( slaved_result, expected_result, - "Expected slave result to be %r but was %r" % ( - expected_result, slaved_result - ), + "Expected slave result to be %r but was %r" + % (expected_result, slaved_result), ) self.assertEqual( master_result, slaved_result, - "Slave result %r does not match master result %r" % ( - slaved_result, master_result - ), + "Slave result %r does not match master result %r" + % (slaved_result, master_result), ) diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 65ecff3bd6..a368117b43 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -234,10 +234,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" ) msg, msgctx = self.build_event() - self.get_success(self.master_store.persist_events([ - (j2, j2ctx), - (msg, msgctx), - ])) + self.get_success(self.master_store.persist_events([(j2, j2ctx), (msg, msgctx)])) self.replicate() event_source = RoomEventSource(self.hs) @@ -257,15 +254,13 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): # # First, we get a list of the rooms we are joined to joined_rooms = self.get_success( - self.slaved_store.get_rooms_for_user_with_stream_ordering( - USER_ID_2, - ), + self.slaved_store.get_rooms_for_user_with_stream_ordering(USER_ID_2) ) # Then, we get a list of the events since the last sync membership_changes = self.get_success( self.slaved_store.get_membership_changes_for_user( - USER_ID_2, prev_token, current_token, + USER_ID_2, prev_token, current_token ) ) @@ -298,9 +293,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): self.master_store.persist_events([(event, context)], backfilled=True) ) else: - self.get_success( - self.master_store.persist_event(event, context) - ) + self.get_success(self.master_store.persist_event(event, context)) return event @@ -359,9 +352,7 @@ class SlavedEventStoreTestCase(BaseSlavedStoreTestCase): ) else: state_handler = self.hs.get_state_handler() - context = self.get_success(state_handler.compute_event_context( - event - )) + context = self.get_success(state_handler.compute_event_context(event)) self.master_store.add_push_actions_to_staging( event.event_id, {user_id: actions for user_id, actions in push_actions} diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py index 38b368a972..ce3835ae6a 100644 --- a/tests/replication/tcp/streams/_base.py +++ b/tests/replication/tcp/streams/_base.py @@ -22,6 +22,7 @@ from tests.server import FakeTransport class BaseStreamTestCase(unittest.HomeserverTestCase): """Base class for tests of the replication streams""" + def prepare(self, reactor, clock, hs): # build a replication server server_factory = ReplicationStreamProtocolFactory(self.hs) @@ -52,6 +53,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): class TestReplicationClientHandler(object): """Drop-in for ReplicationClientHandler which just collects RDATA rows""" + def __init__(self): self.received_rdata_rows = [] @@ -69,6 +71,4 @@ class TestReplicationClientHandler(object): def on_rdata(self, stream_name, token, rows): for r in rows: - self.received_rdata_rows.append( - (stream_name, token, r) - ) + self.received_rdata_rows.append((stream_name, token, r)) diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index da19a83918..ee5f09041f 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -41,10 +41,10 @@ class VersionTestCase(unittest.HomeserverTestCase): request, channel = self.make_request("GET", self.url, shorthand=False) self.render(request) - self.assertEqual(200, int(channel.result["code"]), - msg=channel.result["body"]) - self.assertEqual({'server_version', 'python_version'}, - set(channel.json_body.keys())) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) + self.assertEqual( + {'server_version', 'python_version'}, set(channel.json_body.keys()) + ) class UserRegisterTestCase(unittest.HomeserverTestCase): @@ -200,9 +200,7 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): nonce = channel.json_body["nonce"] want_mac = hmac.new(key=b"shared", digestmod=hashlib.sha1) - want_mac.update( - nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin" - ) + want_mac.update(nonce.encode('ascii') + b"\x00bob\x00abc123\x00admin") want_mac = want_mac.hexdigest() body = json.dumps( @@ -330,11 +328,13 @@ class UserRegisterTestCase(unittest.HomeserverTestCase): # # Invalid user_type - body = json.dumps({ - "nonce": nonce(), - "username": "a", - "password": "1234", - "user_type": "invalid"} + body = json.dumps( + { + "nonce": nonce(), + "username": "a", + "password": "1234", + "user_type": "invalid", + } ) request, channel = self.make_request("POST", self.url, body.encode('utf8')) self.render(request) @@ -357,9 +357,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): hs.config.user_consent_version = "1" consent_uri_builder = Mock() - consent_uri_builder.build_user_consent_uri.return_value = ( - "http://example.com" - ) + consent_uri_builder.build_user_consent_uri.return_value = "http://example.com" self.event_creation_handler._consent_uri_builder = consent_uri_builder self.store = hs.get_datastore() @@ -371,9 +369,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): self.other_user_token = self.login("user", "pass") # Mark the admin user as having consented - self.get_success( - self.store.user_set_consent_version(self.admin_user, "1"), - ) + self.get_success(self.store.user_set_consent_version(self.admin_user, "1")) def test_shutdown_room_consent(self): """Test that we can shutdown rooms with local users who have not @@ -385,9 +381,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_token) # Assert one user in room - users_in_room = self.get_success( - self.store.get_users_in_room(room_id), - ) + users_in_room = self.get_success(self.store.get_users_in_room(room_id)) self.assertEqual([self.other_user], users_in_room) # Enable require consent to send events @@ -395,8 +389,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): # Assert that the user is getting consent error self.helper.send( - room_id, - body="foo", tok=self.other_user_token, expect_code=403, + room_id, body="foo", tok=self.other_user_token, expect_code=403 ) # Test that the admin can still send shutdown @@ -412,9 +405,7 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Assert there is now no longer anyone in the room - users_in_room = self.get_success( - self.store.get_users_in_room(room_id), - ) + users_in_room = self.get_success(self.store.get_users_in_room(room_id)) self.assertEqual([], users_in_room) @unittest.DEBUG @@ -459,24 +450,20 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): url = "rooms/%s/initialSync" % (room_id,) request, channel = self.make_request( - "GET", - url.encode('ascii'), - access_token=self.admin_user_tok, + "GET", url.encode('ascii'), access_token=self.admin_user_tok ) self.render(request) self.assertEqual( - expect_code, int(channel.result["code"]), msg=channel.result["body"], + expect_code, int(channel.result["code"]), msg=channel.result["body"] ) url = "events?timeout=0&room_id=" + room_id request, channel = self.make_request( - "GET", - url.encode('ascii'), - access_token=self.admin_user_tok, + "GET", url.encode('ascii'), access_token=self.admin_user_tok ) self.render(request) self.assertEqual( - expect_code, int(channel.result["code"]), msg=channel.result["body"], + expect_code, int(channel.result["code"]), msg=channel.result["body"] ) @@ -502,15 +489,11 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): "POST", "/create_group".encode('ascii'), access_token=self.admin_user_tok, - content={ - "localpart": "test", - } + content={"localpart": "test"}, ) self.render(request) - self.assertEqual( - 200, int(channel.result["code"]), msg=channel.result["body"], - ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) group_id = channel.json_body["group_id"] @@ -520,27 +503,17 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): url = "/groups/%s/admin/users/invite/%s" % (group_id, self.other_user) request, channel = self.make_request( - "PUT", - url.encode('ascii'), - access_token=self.admin_user_tok, - content={} + "PUT", url.encode('ascii'), access_token=self.admin_user_tok, content={} ) self.render(request) - self.assertEqual( - 200, int(channel.result["code"]), msg=channel.result["body"], - ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) url = "/groups/%s/self/accept_invite" % (group_id,) request, channel = self.make_request( - "PUT", - url.encode('ascii'), - access_token=self.other_user_token, - content={} + "PUT", url.encode('ascii'), access_token=self.other_user_token, content={} ) self.render(request) - self.assertEqual( - 200, int(channel.result["code"]), msg=channel.result["body"], - ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Check other user knows they're in the group self.assertIn(group_id, self._get_groups_user_is_in(self.admin_user_tok)) @@ -552,15 +525,11 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): "POST", url.encode('ascii'), access_token=self.admin_user_tok, - content={ - "localpart": "test", - } + content={"localpart": "test"}, ) self.render(request) - self.assertEqual( - 200, int(channel.result["code"]), msg=channel.result["body"], - ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) # Check group returns 404 self._check_group(group_id, expect_code=404) @@ -576,28 +545,22 @@ class DeleteGroupTestCase(unittest.HomeserverTestCase): url = "/groups/%s/profile" % (group_id,) request, channel = self.make_request( - "GET", - url.encode('ascii'), - access_token=self.admin_user_tok, + "GET", url.encode('ascii'), access_token=self.admin_user_tok ) self.render(request) self.assertEqual( - expect_code, int(channel.result["code"]), msg=channel.result["body"], + expect_code, int(channel.result["code"]), msg=channel.result["body"] ) def _get_groups_user_is_in(self, access_token): """Returns the list of groups the user is in (given their access token) """ request, channel = self.make_request( - "GET", - "/joined_groups".encode('ascii'), - access_token=access_token, + "GET", "/joined_groups".encode('ascii'), access_token=access_token ) self.render(request) - self.assertEqual( - 200, int(channel.result["code"]), msg=channel.result["body"], - ) + self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"]) return channel.json_body["groups"] diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index 2e51ffa418..1a714ff58a 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -44,7 +44,7 @@ class IdentityTestCase(unittest.HomeserverTestCase): tok = self.login("kermit", "monkey") request, channel = self.make_request( - b"POST", "/createRoom", b"{}", access_token=tok, + b"POST", "/createRoom", b"{}", access_token=tok ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -56,11 +56,9 @@ class IdentityTestCase(unittest.HomeserverTestCase): "address": "test@example.com", } request_data = json.dumps(params) - request_url = ( - "/rooms/%s/invite" % (room_id) - ).encode('ascii') + request_url = ("/rooms/%s/invite" % (room_id)).encode('ascii') request, channel = self.make_request( - b"POST", request_url, request_data, access_token=tok, + b"POST", request_url, request_data, access_token=tok ) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index f63c68e7ed..73c5b44b46 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -45,7 +45,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): self.room_owner_tok = self.login("room_owner", "test") self.room_id = self.helper.create_room_as( - self.room_owner, tok=self.room_owner_tok, + self.room_owner, tok=self.room_owner_tok ) self.user = self.register_user("user", "test") @@ -80,12 +80,10 @@ class DirectoryTestCase(unittest.HomeserverTestCase): # We use deliberately a localpart under the length threshold so # that we can make sure that the check is done on the whole alias. - data = { - "room_alias_name": random_string(256 - len(self.hs.hostname)), - } + data = {"room_alias_name": random_string(256 - len(self.hs.hostname))} request_data = json.dumps(data) request, channel = self.make_request( - "POST", url, request_data, access_token=self.user_tok, + "POST", url, request_data, access_token=self.user_tok ) self.render(request) self.assertEqual(channel.code, 400, channel.result) @@ -96,51 +94,42 @@ class DirectoryTestCase(unittest.HomeserverTestCase): # Check with an alias of allowed length. There should already be # a test that ensures it works in test_register.py, but let's be # as cautious as possible here. - data = { - "room_alias_name": random_string(5), - } + data = {"room_alias_name": random_string(5)} request_data = json.dumps(data) request, channel = self.make_request( - "POST", url, request_data, access_token=self.user_tok, + "POST", url, request_data, access_token=self.user_tok ) self.render(request) self.assertEqual(channel.code, 200, channel.result) def set_alias_via_state_event(self, expected_code, alias_length=5): - url = ("/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" - % (self.room_id, self.hs.hostname)) + url = "/_matrix/client/r0/rooms/%s/state/m.room.aliases/%s" % ( + self.room_id, + self.hs.hostname, + ) - data = { - "aliases": [ - self.random_alias(alias_length), - ], - } + data = {"aliases": [self.random_alias(alias_length)]} request_data = json.dumps(data) request, channel = self.make_request( - "PUT", url, request_data, access_token=self.user_tok, + "PUT", url, request_data, access_token=self.user_tok ) self.render(request) self.assertEqual(channel.code, expected_code, channel.result) def set_alias_via_directory(self, expected_code, alias_length=5): url = "/_matrix/client/r0/directory/room/%s" % self.random_alias(alias_length) - data = { - "room_id": self.room_id, - } + data = {"room_id": self.room_id} request_data = json.dumps(data) request, channel = self.make_request( - "PUT", url, request_data, access_token=self.user_tok, + "PUT", url, request_data, access_token=self.user_tok ) self.render(request) self.assertEqual(channel.code, expected_code, channel.result) def random_alias(self, length): - return RoomAlias( - random_string(length), - self.hs.hostname, - ).to_string() + return RoomAlias(random_string(length), self.hs.hostname).to_string() def ensure_user_left_room(self): self.ensure_membership("leave") @@ -151,17 +140,9 @@ class DirectoryTestCase(unittest.HomeserverTestCase): def ensure_membership(self, membership): try: if membership == "leave": - self.helper.leave( - room=self.room_id, - user=self.user, - tok=self.user_tok, - ) + self.helper.leave(room=self.room_id, user=self.user, tok=self.user_tok) if membership == "join": - self.helper.join( - room=self.room_id, - user=self.user, - tok=self.user_tok, - ) + self.helper.join(room=self.room_id, user=self.user, tok=self.user_tok) except AssertionError: # We don't care whether the leave request didn't return a 200 (e.g. # if the user isn't already in the room), because we only want to diff --git a/tests/rest/client/v1/test_login.py b/tests/rest/client/v1/test_login.py index 9ebd91f678..0397f91a9e 100644 --- a/tests/rest/client/v1/test_login.py +++ b/tests/rest/client/v1/test_login.py @@ -37,10 +37,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): for i in range(0, 6): params = { "type": "m.login.password", - "identifier": { - "type": "m.id.user", - "user": "kermit" + str(i), - }, + "identifier": {"type": "m.id.user", "user": "kermit" + str(i)}, "password": "monkey", } request_data = json.dumps(params) @@ -57,14 +54,11 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # than 1min. self.assertTrue(retry_after_ms < 6000) - self.reactor.advance(retry_after_ms / 1000.) + self.reactor.advance(retry_after_ms / 1000.0) params = { "type": "m.login.password", - "identifier": { - "type": "m.id.user", - "user": "kermit" + str(i), - }, + "identifier": {"type": "m.id.user", "user": "kermit" + str(i)}, "password": "monkey", } request_data = json.dumps(params) @@ -82,10 +76,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): for i in range(0, 6): params = { "type": "m.login.password", - "identifier": { - "type": "m.id.user", - "user": "kermit", - }, + "identifier": {"type": "m.id.user", "user": "kermit"}, "password": "monkey", } request_data = json.dumps(params) @@ -102,14 +93,11 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # than 1min. self.assertTrue(retry_after_ms < 6000) - self.reactor.advance(retry_after_ms / 1000.) + self.reactor.advance(retry_after_ms / 1000.0) params = { "type": "m.login.password", - "identifier": { - "type": "m.id.user", - "user": "kermit", - }, + "identifier": {"type": "m.id.user", "user": "kermit"}, "password": "monkey", } request_data = json.dumps(params) @@ -127,10 +115,7 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): for i in range(0, 6): params = { "type": "m.login.password", - "identifier": { - "type": "m.id.user", - "user": "kermit", - }, + "identifier": {"type": "m.id.user", "user": "kermit"}, "password": "notamonkey", } request_data = json.dumps(params) @@ -147,14 +132,11 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): # than 1min. self.assertTrue(retry_after_ms < 6000) - self.reactor.advance(retry_after_ms / 1000.) + self.reactor.advance(retry_after_ms / 1000.0) params = { "type": "m.login.password", - "identifier": { - "type": "m.id.user", - "user": "kermit", - }, + "identifier": {"type": "m.id.user", "user": "kermit"}, "password": "notamonkey", } request_data = json.dumps(params) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 7306e61b7c..ed034879cf 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -199,37 +199,24 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): def test_in_shared_room(self): self.ensure_requester_left_room() - self.helper.join( - room=self.room_id, - user=self.requester, - tok=self.requester_tok, - ) + self.helper.join(room=self.room_id, user=self.requester, tok=self.requester_tok) self.try_fetch_profile(200, self.requester_tok) def try_fetch_profile(self, expected_code, access_token=None): + self.request_profile(expected_code, access_token=access_token) + self.request_profile( - expected_code, - access_token=access_token + expected_code, url_suffix="/displayname", access_token=access_token ) self.request_profile( - expected_code, - url_suffix="/displayname", - access_token=access_token, - ) - - self.request_profile( - expected_code, - url_suffix="/avatar_url", - access_token=access_token, + expected_code, url_suffix="/avatar_url", access_token=access_token ) def request_profile(self, expected_code, url_suffix="", access_token=None): request, channel = self.make_request( - "GET", - self.profile_url + url_suffix, - access_token=access_token, + "GET", self.profile_url + url_suffix, access_token=access_token ) self.render(request) self.assertEqual(channel.code, expected_code, channel.result) @@ -237,9 +224,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): def ensure_requester_left_room(self): try: self.helper.leave( - room=self.room_id, - user=self.requester, - tok=self.requester_tok, + room=self.room_id, user=self.requester, tok=self.requester_tok ) except AssertionError: # We don't care whether the leave request didn't return a 200 (e.g. diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 1c3a621d26..be95dc592d 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -41,11 +41,10 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): as_token = "i_am_an_app_service" appservice = ApplicationService( - as_token, self.hs.config.server_name, + as_token, + self.hs.config.server_name, id="1234", - namespaces={ - "users": [{"regex": r"@as_user.*", "exclusive": True}], - }, + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, ) self.hs.get_datastore().services_cache.append(appservice) @@ -57,10 +56,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) - det_data = { - "user_id": user_id, - "home_server": self.hs.hostname, - } + det_data = {"user_id": user_id, "home_server": self.hs.hostname} self.assertDictContainsSubset(det_data, channel.json_body) def test_POST_appservice_registration_invalid(self): @@ -128,10 +124,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") self.render(request) - det_data = { - "home_server": self.hs.hostname, - "device_id": "guest_device", - } + det_data = {"home_server": self.hs.hostname, "device_id": "guest_device"} self.assertEquals(channel.result["code"], b"200", channel.result) self.assertDictContainsSubset(det_data, channel.json_body) @@ -159,7 +152,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): else: self.assertEquals(channel.result["code"], b"200", channel.result) - self.reactor.advance(retry_after_ms / 1000.) + self.reactor.advance(retry_after_ms / 1000.0) request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") self.render(request) @@ -187,7 +180,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): else: self.assertEquals(channel.result["code"], b"200", channel.result) - self.reactor.advance(retry_after_ms / 1000.) + self.reactor.advance(retry_after_ms / 1000.0) request, channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") self.render(request) @@ -221,23 +214,19 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. - request, channel = self.make_request( - b"GET", "/sync", access_token=tok, - ) + request, channel = self.make_request(b"GET", "/sync", access_token=tok) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) self.reactor.advance(datetime.timedelta(weeks=1).total_seconds()) - request, channel = self.make_request( - b"GET", "/sync", access_token=tok, - ) + request, channel = self.make_request(b"GET", "/sync", access_token=tok) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals( - channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result, + channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) def test_manual_renewal(self): @@ -253,21 +242,17 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): admin_tok = self.login("admin", "adminpassword") url = "/_matrix/client/unstable/admin/account_validity/validity" - params = { - "user_id": user_id, - } + params = {"user_id": user_id} request_data = json.dumps(params) request, channel = self.make_request( - b"POST", url, request_data, access_token=admin_tok, + b"POST", url, request_data, access_token=admin_tok ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. - request, channel = self.make_request( - b"GET", "/sync", access_token=tok, - ) + request, channel = self.make_request(b"GET", "/sync", access_token=tok) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -286,20 +271,18 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): } request_data = json.dumps(params) request, channel = self.make_request( - b"POST", url, request_data, access_token=admin_tok, + b"POST", url, request_data, access_token=admin_tok ) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) # The specific endpoint doesn't matter, all we need is an authenticated # endpoint. - request, channel = self.make_request( - b"GET", "/sync", access_token=tok, - ) + request, channel = self.make_request(b"GET", "/sync", access_token=tok) self.render(request) self.assertEquals(channel.result["code"], b"403", channel.result) self.assertEquals( - channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result, + channel.json_body["errcode"], Codes.EXPIRED_ACCOUNT, channel.result ) @@ -358,10 +341,15 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # We need to manually add an email address otherwise the handler will do # nothing. now = self.hs.clock.time_msec() - self.get_success(self.store.user_add_threepid( - user_id=user_id, medium="email", address="kermit@example.com", - validated_at=now, added_at=now, - )) + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address="kermit@example.com", + validated_at=now, + added_at=now, + ) + ) # Move 6 days forward. This should trigger a renewal email to be sent. self.reactor.advance(datetime.timedelta(days=6).total_seconds()) @@ -379,9 +367,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # our access token should be denied from now, otherwise they should # succeed. self.reactor.advance(datetime.timedelta(days=3).total_seconds()) - request, channel = self.make_request( - b"GET", "/sync", access_token=tok, - ) + request, channel = self.make_request(b"GET", "/sync", access_token=tok) self.render(request) self.assertEquals(channel.result["code"], b"200", channel.result) @@ -393,13 +379,19 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): # We need to manually add an email address otherwise the handler will do # nothing. now = self.hs.clock.time_msec() - self.get_success(self.store.user_add_threepid( - user_id=user_id, medium="email", address="kermit@example.com", - validated_at=now, added_at=now, - )) + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address="kermit@example.com", + validated_at=now, + added_at=now, + ) + ) request, channel = self.make_request( - b"POST", "/_matrix/client/unstable/account_validity/send_mail", + b"POST", + "/_matrix/client/unstable/account_validity/send_mail", access_token=tok, ) self.render(request) diff --git a/tests/rest/media/v1/test_base.py b/tests/rest/media/v1/test_base.py index af8f74eb42..00688a7325 100644 --- a/tests/rest/media/v1/test_base.py +++ b/tests/rest/media/v1/test_base.py @@ -26,20 +26,14 @@ class GetFileNameFromHeadersTests(unittest.TestCase): b'inline; filename="aze%20rty"': u"aze%20rty", b'inline; filename="aze\"rty"': u'aze"rty', b'inline; filename="azer;ty"': u"azer;ty", - b"inline; filename*=utf-8''foo%C2%A3bar": u"foo£bar", } def tests(self): for hdr, expected in self.TEST_CASES.items(): - res = get_filename_from_headers( - { - b'Content-Disposition': [hdr], - }, - ) + res = get_filename_from_headers({b'Content-Disposition': [hdr]}) self.assertEqual( - res, expected, - "expected output for %s to be %s but was %s" % ( - hdr, expected, res, - ) + res, + expected, + "expected output for %s to be %s but was %s" % (hdr, expected, res), ) diff --git a/tests/rest/test_well_known.py b/tests/rest/test_well_known.py index 8d8f03e005..b090bb974c 100644 --- a/tests/rest/test_well_known.py +++ b/tests/rest/test_well_known.py @@ -31,27 +31,24 @@ class WellKnownTests(unittest.HomeserverTestCase): self.hs.config.default_identity_server = "https://testis" request, channel = self.make_request( - "GET", - "/.well-known/matrix/client", - shorthand=False, + "GET", "/.well-known/matrix/client", shorthand=False ) self.render(request) self.assertEqual(request.code, 200) self.assertEqual( - channel.json_body, { + channel.json_body, + { "m.homeserver": {"base_url": "https://tesths"}, "m.identity_server": {"base_url": "https://testis"}, - } + }, ) def test_well_known_no_public_baseurl(self): self.hs.config.public_baseurl = None request, channel = self.make_request( - "GET", - "/.well-known/matrix/client", - shorthand=False, + "GET", "/.well-known/matrix/client", shorthand=False ) self.render(request) diff --git a/tests/server.py b/tests/server.py index 8f89f4a83d..fc41345488 100644 --- a/tests/server.py +++ b/tests/server.py @@ -182,7 +182,8 @@ def make_request( if federation_auth_origin is not None: req.requestHeaders.addRawHeader( - b"Authorization", b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,) + b"Authorization", + b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,), ) if content: @@ -233,7 +234,7 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): class FakeResolver(object): def getHostByName(self, name, timeout=None): if name not in lookups: - return fail(DNSLookupError("OH NO: unknown %s" % (name, ))) + return fail(DNSLookupError("OH NO: unknown %s" % (name,))) return succeed(lookups[name]) self.nameResolver = SimpleResolverComplexifier(FakeResolver()) @@ -454,6 +455,6 @@ class FakeTransport(object): logger.warning("Exception writing to protocol: %s", e) return - self.buffer = self.buffer[len(to_write):] + self.buffer = self.buffer[len(to_write) :] if self.buffer and self.autoflush: self._reactor.callLater(0.0, self.flush) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index be73e718c2..a490b81ed4 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -27,7 +27,6 @@ from tests import unittest class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): - def make_homeserver(self, reactor, clock): hs_config = self.default_config("test") hs_config.server_notices_mxid = "@server:test" diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index f448b01326..9c5311d916 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -50,6 +50,7 @@ class FakeEvent(object): refer to events. The event_id has node_id as localpart and example.com as domain. """ + def __init__(self, id, sender, type, state_key, content): self.node_id = id self.event_id = EventID(id, "example.com").to_string() @@ -142,24 +143,14 @@ INITIAL_EVENTS = [ content=MEMBERSHIP_CONTENT_JOIN, ), FakeEvent( - id="START", - sender=ZARA, - type=EventTypes.Message, - state_key=None, - content={}, + id="START", sender=ZARA, type=EventTypes.Message, state_key=None, content={} ), FakeEvent( - id="END", - sender=ZARA, - type=EventTypes.Message, - state_key=None, - content={}, + id="END", sender=ZARA, type=EventTypes.Message, state_key=None, content={} ), ] -INITIAL_EDGES = [ - "START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE", -] +INITIAL_EDGES = ["START", "IMZ", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] class StateTestCase(unittest.TestCase): @@ -170,12 +161,7 @@ class StateTestCase(unittest.TestCase): sender=ALICE, type=EventTypes.PowerLevels, state_key="", - content={ - "users": { - ALICE: 100, - BOB: 50, - } - }, + content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( id="MA", @@ -196,19 +182,11 @@ class StateTestCase(unittest.TestCase): sender=BOB, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - }, - }, + content={"users": {ALICE: 100, BOB: 50}}, ), ] - edges = [ - ["END", "MB", "MA", "PA", "START"], - ["END", "PB", "PA"], - ] + edges = [["END", "MB", "MA", "PA", "START"], ["END", "PB", "PA"]] expected_state_ids = ["PA", "MA", "MB"] @@ -232,10 +210,7 @@ class StateTestCase(unittest.TestCase): ), ] - edges = [ - ["END", "JR", "START"], - ["END", "ME", "START"], - ] + edges = [["END", "JR", "START"], ["END", "ME", "START"]] expected_state_ids = ["JR"] @@ -248,45 +223,25 @@ class StateTestCase(unittest.TestCase): sender=ALICE, type=EventTypes.PowerLevels, state_key="", - content={ - "users": { - ALICE: 100, - BOB: 50, - } - }, + content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( id="PB", sender=BOB, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - CHARLIE: 50, - }, - }, + content={"users": {ALICE: 100, BOB: 50, CHARLIE: 50}}, ), FakeEvent( id="PC", sender=CHARLIE, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - CHARLIE: 0, - }, - }, + content={"users": {ALICE: 100, BOB: 50, CHARLIE: 0}}, ), ] - edges = [ - ["END", "PC", "PB", "PA", "START"], - ["END", "PA"], - ] + edges = [["END", "PC", "PB", "PA", "START"], ["END", "PA"]] expected_state_ids = ["PC"] @@ -295,68 +250,38 @@ class StateTestCase(unittest.TestCase): def test_topic_basic(self): events = [ FakeEvent( - id="T1", - sender=ALICE, - type=EventTypes.Topic, - state_key="", - content={}, + id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} ), FakeEvent( id="PA1", sender=ALICE, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - }, - }, + content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( - id="T2", - sender=ALICE, - type=EventTypes.Topic, - state_key="", - content={}, + id="T2", sender=ALICE, type=EventTypes.Topic, state_key="", content={} ), FakeEvent( id="PA2", sender=ALICE, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 0, - }, - }, + content={"users": {ALICE: 100, BOB: 0}}, ), FakeEvent( id="PB", sender=BOB, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - }, - }, + content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( - id="T3", - sender=BOB, - type=EventTypes.Topic, - state_key="", - content={}, + id="T3", sender=BOB, type=EventTypes.Topic, state_key="", content={} ), ] - edges = [ - ["END", "PA2", "T2", "PA1", "T1", "START"], - ["END", "T3", "PB", "PA1"], - ] + edges = [["END", "PA2", "T2", "PA1", "T1", "START"], ["END", "T3", "PB", "PA1"]] expected_state_ids = ["PA2", "T2"] @@ -365,30 +290,17 @@ class StateTestCase(unittest.TestCase): def test_topic_reset(self): events = [ FakeEvent( - id="T1", - sender=ALICE, - type=EventTypes.Topic, - state_key="", - content={}, + id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} ), FakeEvent( id="PA", sender=ALICE, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - }, - }, + content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( - id="T2", - sender=BOB, - type=EventTypes.Topic, - state_key="", - content={}, + id="T2", sender=BOB, type=EventTypes.Topic, state_key="", content={} ), FakeEvent( id="MB", @@ -399,10 +311,7 @@ class StateTestCase(unittest.TestCase): ), ] - edges = [ - ["END", "MB", "T2", "PA", "T1", "START"], - ["END", "T1"], - ] + edges = [["END", "MB", "T2", "PA", "T1", "START"], ["END", "T1"]] expected_state_ids = ["T1", "MB", "PA"] @@ -411,61 +320,34 @@ class StateTestCase(unittest.TestCase): def test_topic(self): events = [ FakeEvent( - id="T1", - sender=ALICE, - type=EventTypes.Topic, - state_key="", - content={}, + id="T1", sender=ALICE, type=EventTypes.Topic, state_key="", content={} ), FakeEvent( id="PA1", sender=ALICE, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - }, - }, + content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( - id="T2", - sender=ALICE, - type=EventTypes.Topic, - state_key="", - content={}, + id="T2", sender=ALICE, type=EventTypes.Topic, state_key="", content={} ), FakeEvent( id="PA2", sender=ALICE, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 0, - }, - }, + content={"users": {ALICE: 100, BOB: 0}}, ), FakeEvent( id="PB", sender=BOB, type=EventTypes.PowerLevels, state_key='', - content={ - "users": { - ALICE: 100, - BOB: 50, - }, - }, + content={"users": {ALICE: 100, BOB: 50}}, ), FakeEvent( - id="T3", - sender=BOB, - type=EventTypes.Topic, - state_key="", - content={}, + id="T3", sender=BOB, type=EventTypes.Topic, state_key="", content={} ), FakeEvent( id="MZ1", @@ -475,11 +357,7 @@ class StateTestCase(unittest.TestCase): content={}, ), FakeEvent( - id="T4", - sender=ALICE, - type=EventTypes.Topic, - state_key="", - content={}, + id="T4", sender=ALICE, type=EventTypes.Topic, state_key="", content={} ), ] @@ -587,13 +465,7 @@ class StateTestCase(unittest.TestCase): class LexicographicalTestCase(unittest.TestCase): def test_simple(self): - graph = { - "l": {"o"}, - "m": {"n", "o"}, - "n": {"o"}, - "o": set(), - "p": {"o"}, - } + graph = {"l": {"o"}, "m": {"n", "o"}, "n": {"o"}, "o": set(), "p": {"o"}} res = list(lexicographical_topological_sort(graph, key=lambda x: x)) @@ -680,7 +552,13 @@ class SimpleParamStateTestCase(unittest.TestCase): self.expected_combined_state = { (e.type, e.state_key): e.event_id - for e in [create_event, alice_member, join_rules, bob_member, charlie_member] + for e in [ + create_event, + alice_member, + join_rules, + bob_member, + charlie_member, + ] } def test_event_map_none(self): @@ -720,11 +598,7 @@ class TestStateResolutionStore(object): Deferred[dict[str, FrozenEvent]]: Dict from event_id to event. """ - return { - eid: self.event_map[eid] - for eid in event_ids - if eid in self.event_map - } + return {eid: self.event_map[eid] for eid in event_ids if eid in self.event_map} def get_auth_chain(self, event_ids): """Gets the full auth chain for a set of events (including rejected diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 5568a607c7..fbb9302694 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -9,9 +9,7 @@ from tests.utils import setup_test_homeserver class BackgroundUpdateTestCase(unittest.TestCase): @defer.inlineCallbacks def setUp(self): - hs = yield setup_test_homeserver( - self.addCleanup - ) + hs = yield setup_test_homeserver(self.addCleanup) self.store = hs.get_datastore() self.clock = hs.get_clock() diff --git a/tests/storage/test_base.py b/tests/storage/test_base.py index f18db8c384..c778de1f0c 100644 --- a/tests/storage/test_base.py +++ b/tests/storage/test_base.py @@ -56,10 +56,7 @@ class SQLBaseStoreTestCase(unittest.TestCase): fake_engine = Mock(wraps=engine) fake_engine.can_native_upsert = False hs = TestHomeServer( - "test", - db_pool=self.db_pool, - config=config, - database_engine=fake_engine, + "test", db_pool=self.db_pool, config=config, database_engine=fake_engine ) self.datastore = SQLBaseStore(None, hs) diff --git a/tests/storage/test_end_to_end_keys.py b/tests/storage/test_end_to_end_keys.py index 11fb8c0c19..cd2bcd4ca3 100644 --- a/tests/storage/test_end_to_end_keys.py +++ b/tests/storage/test_end_to_end_keys.py @@ -20,7 +20,6 @@ import tests.utils class EndToEndKeyStoreTestCase(tests.unittest.TestCase): - @defer.inlineCallbacks def setUp(self): hs = yield tests.utils.setup_test_homeserver(self.addCleanup) diff --git a/tests/storage/test_monthly_active_users.py b/tests/storage/test_monthly_active_users.py index d6569a82bb..f458c03054 100644 --- a/tests/storage/test_monthly_active_users.py +++ b/tests/storage/test_monthly_active_users.py @@ -56,8 +56,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): self.store.register(user_id=user1, token="123", password_hash=None) self.store.register(user_id=user2, token="456", password_hash=None) self.store.register( - user_id=user3, token="789", - password_hash=None, user_type=UserTypes.SUPPORT + user_id=user3, token="789", password_hash=None, user_type=UserTypes.SUPPORT ) self.pump() @@ -173,9 +172,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): def test_populate_monthly_users_should_update(self): self.store.upsert_monthly_active_user = Mock() - self.store.is_trial_user = Mock( - return_value=defer.succeed(False) - ) + self.store.is_trial_user = Mock(return_value=defer.succeed(False)) self.store.user_last_seen_monthly_active = Mock( return_value=defer.succeed(None) @@ -187,13 +184,9 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): def test_populate_monthly_users_should_not_update(self): self.store.upsert_monthly_active_user = Mock() - self.store.is_trial_user = Mock( - return_value=defer.succeed(False) - ) + self.store.is_trial_user = Mock(return_value=defer.succeed(False)) self.store.user_last_seen_monthly_active = Mock( - return_value=defer.succeed( - self.hs.get_clock().time_msec() - ) + return_value=defer.succeed(self.hs.get_clock().time_msec()) ) self.store.populate_monthly_active_users('user_id') self.pump() @@ -243,7 +236,7 @@ class MonthlyActiveUsersTestCase(unittest.HomeserverTestCase): user_id=support_user_id, token="123", password_hash=None, - user_type=UserTypes.SUPPORT + user_type=UserTypes.SUPPORT, ) self.store.upsert_monthly_active_user(support_user_id) diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index 0fc5019e9f..4823d44dec 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -60,7 +60,7 @@ class RedactionTestCase(unittest.TestCase): "state_key": user.to_string(), "room_id": room.to_string(), "content": content, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( @@ -83,7 +83,7 @@ class RedactionTestCase(unittest.TestCase): "state_key": user.to_string(), "room_id": room.to_string(), "content": {"body": body, "msgtype": u"message"}, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( @@ -105,7 +105,7 @@ class RedactionTestCase(unittest.TestCase): "room_id": room.to_string(), "content": {"reason": reason}, "redacts": event_id, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( diff --git a/tests/storage/test_registration.py b/tests/storage/test_registration.py index cb3cc4d2e5..c0e0155bb4 100644 --- a/tests/storage/test_registration.py +++ b/tests/storage/test_registration.py @@ -116,7 +116,7 @@ class RegistrationStoreTestCase(unittest.TestCase): user_id=SUPPORT_USER, token="456", password_hash=None, - user_type=UserTypes.SUPPORT + user_type=UserTypes.SUPPORT, ) res = yield self.store.is_support_user(SUPPORT_USER) self.assertTrue(res) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 063387863e..73ed943f5a 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -58,7 +58,7 @@ class RoomMemberStoreTestCase(unittest.TestCase): "state_key": user.to_string(), "room_id": room.to_string(), "content": {"membership": membership}, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 78e260a7fa..b6169436de 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -29,7 +29,6 @@ logger = logging.getLogger(__name__) class StateStoreTestCase(tests.unittest.TestCase): - @defer.inlineCallbacks def setUp(self): hs = yield tests.utils.setup_test_homeserver(self.addCleanup) @@ -57,7 +56,7 @@ class StateStoreTestCase(tests.unittest.TestCase): "state_key": state_key, "room_id": room.to_string(), "content": content, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( @@ -83,15 +82,14 @@ class StateStoreTestCase(tests.unittest.TestCase): self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} ) - state_group_map = yield self.store.get_state_groups_ids(self.room, [e2.event_id]) + state_group_map = yield self.store.get_state_groups_ids( + self.room, [e2.event_id] + ) self.assertEqual(len(state_group_map), 1) state_map = list(state_group_map.values())[0] self.assertDictEqual( state_map, - { - (EventTypes.Create, ''): e1.event_id, - (EventTypes.Name, ''): e2.event_id, - }, + {(EventTypes.Create, ''): e1.event_id, (EventTypes.Name, ''): e2.event_id}, ) @defer.inlineCallbacks @@ -103,15 +101,11 @@ class StateStoreTestCase(tests.unittest.TestCase): self.room, self.u_alice, EventTypes.Name, '', {"name": "test room"} ) - state_group_map = yield self.store.get_state_groups( - self.room, [e2.event_id]) + state_group_map = yield self.store.get_state_groups(self.room, [e2.event_id]) self.assertEqual(len(state_group_map), 1) state_list = list(state_group_map.values())[0] - self.assertEqual( - {ev.event_id for ev in state_list}, - {e1.event_id, e2.event_id}, - ) + self.assertEqual({ev.event_id for ev in state_list}, {e1.event_id, e2.event_id}) @defer.inlineCallbacks def test_get_state_for_event(self): @@ -147,9 +141,7 @@ class StateStoreTestCase(tests.unittest.TestCase): ) # check we get the full state as of the final event - state = yield self.store.get_state_for_event( - e5.event_id, - ) + state = yield self.store.get_state_for_event(e5.event_id) self.assertIsNotNone(e4) @@ -194,7 +186,7 @@ class StateStoreTestCase(tests.unittest.TestCase): state_filter=StateFilter( types={EventTypes.Member: {self.u_alice.to_string()}}, include_others=True, - ) + ), ) self.assertStateMapEqual( @@ -208,9 +200,9 @@ class StateStoreTestCase(tests.unittest.TestCase): # check that we can grab everything except members state = yield self.store.get_state_for_event( - e5.event_id, state_filter=StateFilter( - types={EventTypes.Member: set()}, - include_others=True, + e5.event_id, + state_filter=StateFilter( + types={EventTypes.Member: set()}, include_others=True ), ) @@ -229,10 +221,10 @@ class StateStoreTestCase(tests.unittest.TestCase): # test _get_state_for_group_using_cache correctly filters out members # with types=[] (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( - self.store._state_group_cache, group, + self.store._state_group_cache, + group, state_filter=StateFilter( - types={EventTypes.Member: set()}, - include_others=True, + types={EventTypes.Member: set()}, include_others=True ), ) @@ -249,8 +241,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: set()}, - include_others=True, + types={EventTypes.Member: set()}, include_others=True ), ) @@ -263,8 +254,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_cache, group, state_filter=StateFilter( - types={EventTypes.Member: None}, - include_others=True, + types={EventTypes.Member: None}, include_others=True ), ) @@ -281,8 +271,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: None}, - include_others=True, + types={EventTypes.Member: None}, include_others=True ), ) @@ -302,8 +291,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_cache, group, state_filter=StateFilter( - types={EventTypes.Member: {e5.state_key}}, - include_others=True, + types={EventTypes.Member: {e5.state_key}}, include_others=True ), ) @@ -320,8 +308,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: {e5.state_key}}, - include_others=True, + types={EventTypes.Member: {e5.state_key}}, include_others=True ), ) @@ -334,8 +321,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: {e5.state_key}}, - include_others=False, + types={EventTypes.Member: {e5.state_key}}, include_others=False ), ) @@ -384,10 +370,10 @@ class StateStoreTestCase(tests.unittest.TestCase): # with types=[] room_id = self.room.to_string() (state_dict, is_all) = yield self.store._get_state_for_group_using_cache( - self.store._state_group_cache, group, + self.store._state_group_cache, + group, state_filter=StateFilter( - types={EventTypes.Member: set()}, - include_others=True, + types={EventTypes.Member: set()}, include_others=True ), ) @@ -399,8 +385,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: set()}, - include_others=True, + types={EventTypes.Member: set()}, include_others=True ), ) @@ -413,8 +398,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_cache, group, state_filter=StateFilter( - types={EventTypes.Member: None}, - include_others=True, + types={EventTypes.Member: None}, include_others=True ), ) @@ -425,8 +409,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: None}, - include_others=True, + types={EventTypes.Member: None}, include_others=True ), ) @@ -445,8 +428,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_cache, group, state_filter=StateFilter( - types={EventTypes.Member: {e5.state_key}}, - include_others=True, + types={EventTypes.Member: {e5.state_key}}, include_others=True ), ) @@ -457,8 +439,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: {e5.state_key}}, - include_others=True, + types={EventTypes.Member: {e5.state_key}}, include_others=True ), ) @@ -471,8 +452,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_cache, group, state_filter=StateFilter( - types={EventTypes.Member: {e5.state_key}}, - include_others=False, + types={EventTypes.Member: {e5.state_key}}, include_others=False ), ) @@ -483,8 +463,7 @@ class StateStoreTestCase(tests.unittest.TestCase): self.store._state_group_members_cache, group, state_filter=StateFilter( - types={EventTypes.Member: {e5.state_key}}, - include_others=False, + types={EventTypes.Member: {e5.state_key}}, include_others=False ), ) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index fd3361404f..d7d244ce97 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -36,9 +36,7 @@ class UserDirectoryStoreTestCase(unittest.TestCase): yield self.store.update_profile_in_user_dir(ALICE, "alice", None) yield self.store.update_profile_in_user_dir(BOB, "bob", None) yield self.store.update_profile_in_user_dir(BOBBY, "bobby", None) - yield self.store.add_users_in_public_rooms( - "!room:id", (ALICE, BOB) - ) + yield self.store.add_users_in_public_rooms("!room:id", (ALICE, BOB)) @defer.inlineCallbacks def test_search_user_dir(self): diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 4c8f87e958..8b2741d277 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -37,7 +37,9 @@ class EventAuthTestCase(unittest.TestCase): # creator should be able to send state event_auth.check( - RoomVersions.V1.identifier, _random_state_event(creator), auth_events, + RoomVersions.V1.identifier, + _random_state_event(creator), + auth_events, do_sig_check=False, ) @@ -82,7 +84,9 @@ class EventAuthTestCase(unittest.TestCase): # king should be able to send state event_auth.check( - RoomVersions.V1.identifier, _random_state_event(king), auth_events, + RoomVersions.V1.identifier, + _random_state_event(king), + auth_events, do_sig_check=False, ) diff --git a/tests/test_federation.py b/tests/test_federation.py index 1a5dc32c88..6a8339b561 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -1,4 +1,3 @@ - from mock import Mock from twisted.internet.defer import maybeDeferred, succeed diff --git a/tests/test_mau.py b/tests/test_mau.py index 00be1a8c21..1fbe0d51ff 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -33,9 +33,7 @@ class TestMauLimit(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.hs = self.setup_test_homeserver( - "red", - http_client=None, - federation_client=Mock(), + "red", http_client=None, federation_client=Mock() ) self.store = self.hs.get_datastore() @@ -210,9 +208,7 @@ class TestMauLimit(unittest.HomeserverTestCase): return access_token def do_sync_for_user(self, token): - request, channel = self.make_request( - "GET", "/sync", access_token=token - ) + request, channel = self.make_request("GET", "/sync", access_token=token) self.render(request) if channel.code != 200: diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 0ff6d0e283..2edbae5c6d 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -44,9 +44,7 @@ def get_sample_labels_value(sample): class TestMauLimit(unittest.TestCase): def test_basic(self): gauge = InFlightGauge( - "test1", "", - labels=["test_label"], - sub_metrics=["foo", "bar"], + "test1", "", labels=["test_label"], sub_metrics=["foo", "bar"] ) def handle1(metrics): @@ -59,37 +57,49 @@ class TestMauLimit(unittest.TestCase): gauge.register(("key1",), handle1) - self.assert_dict({ - "test1_total": {("key1",): 1}, - "test1_foo": {("key1",): 2}, - "test1_bar": {("key1",): 5}, - }, self.get_metrics_from_gauge(gauge)) + self.assert_dict( + { + "test1_total": {("key1",): 1}, + "test1_foo": {("key1",): 2}, + "test1_bar": {("key1",): 5}, + }, + self.get_metrics_from_gauge(gauge), + ) gauge.unregister(("key1",), handle1) - self.assert_dict({ - "test1_total": {("key1",): 0}, - "test1_foo": {("key1",): 0}, - "test1_bar": {("key1",): 0}, - }, self.get_metrics_from_gauge(gauge)) + self.assert_dict( + { + "test1_total": {("key1",): 0}, + "test1_foo": {("key1",): 0}, + "test1_bar": {("key1",): 0}, + }, + self.get_metrics_from_gauge(gauge), + ) gauge.register(("key1",), handle1) gauge.register(("key2",), handle2) - self.assert_dict({ - "test1_total": {("key1",): 1, ("key2",): 1}, - "test1_foo": {("key1",): 2, ("key2",): 3}, - "test1_bar": {("key1",): 5, ("key2",): 7}, - }, self.get_metrics_from_gauge(gauge)) + self.assert_dict( + { + "test1_total": {("key1",): 1, ("key2",): 1}, + "test1_foo": {("key1",): 2, ("key2",): 3}, + "test1_bar": {("key1",): 5, ("key2",): 7}, + }, + self.get_metrics_from_gauge(gauge), + ) gauge.unregister(("key2",), handle2) gauge.register(("key1",), handle2) - self.assert_dict({ - "test1_total": {("key1",): 2, ("key2",): 0}, - "test1_foo": {("key1",): 5, ("key2",): 0}, - "test1_bar": {("key1",): 7, ("key2",): 0}, - }, self.get_metrics_from_gauge(gauge)) + self.assert_dict( + { + "test1_total": {("key1",): 2, ("key2",): 0}, + "test1_foo": {("key1",): 5, ("key2",): 0}, + "test1_bar": {("key1",): 7, ("key2",): 0}, + }, + self.get_metrics_from_gauge(gauge), + ) def get_metrics_from_gauge(self, gauge): results = {} diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index 0968e86a7b..f412985d2c 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -69,10 +69,10 @@ class TermsTestCase(unittest.HomeserverTestCase): "name": "My Cool Privacy Policy", "url": "https://example.org/_matrix/consent?v=1.0", }, - "version": "1.0" - }, - }, - }, + "version": "1.0", + } + } + } } self.assertIsInstance(channel.json_body["params"], dict) self.assertDictContainsSubset(channel.json_body["params"], expected_params) diff --git a/tests/test_types.py b/tests/test_types.py index d314a7ff58..d83c36559f 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -94,8 +94,7 @@ class MapUsernameTestCase(unittest.TestCase): def testSymbols(self): self.assertEqual( - map_username_to_mxid_localpart("test=$?_1234"), - "test=3d=24=3f_1234", + map_username_to_mxid_localpart("test=$?_1234"), "test=3d=24=3f_1234" ) def testLeadingUnderscore(self): @@ -105,6 +104,5 @@ class MapUsernameTestCase(unittest.TestCase): # this should work with either a unicode or a bytes self.assertEqual(map_username_to_mxid_localpart(u'têst'), "t=c3=aast") self.assertEqual( - map_username_to_mxid_localpart(u'têst'.encode('utf-8')), - "t=c3=aast", + map_username_to_mxid_localpart(u'têst'.encode('utf-8')), "t=c3=aast" ) diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index d0bc8e2112..fde0baee8e 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -22,6 +22,7 @@ from synapse.util.logcontext import LoggingContextFilter class ToTwistedHandler(logging.Handler): """logging handler which sends the logs to the twisted log""" + tx_log = twisted.logger.Logger() def emit(self, record): @@ -41,7 +42,8 @@ def setup_logging(): root_logger = logging.getLogger() log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s" + "%(asctime)s - %(name)s - %(lineno)d - " + "%(levelname)s - %(request)s - %(message)s" ) handler = ToTwistedHandler() diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 3bdb500514..6a180ddc32 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -132,7 +132,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): "state_key": "", "room_id": TEST_ROOM_ID, "content": content, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( @@ -153,7 +153,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): "state_key": user_id, "room_id": TEST_ROOM_ID, "content": content, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( @@ -174,7 +174,7 @@ class FilterEventsForServerTestCase(tests.unittest.TestCase): "sender": user_id, "room_id": TEST_ROOM_ID, "content": content, - } + }, ) event, context = yield self.event_creation_handler.create_new_client_event( diff --git a/tests/unittest.py b/tests/unittest.py index 029a88d770..94df8cf47e 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -84,9 +84,8 @@ class TestCase(unittest.TestCase): # all future bets are off. if LoggingContext.current_context() is not LoggingContext.sentinel: self.fail( - "Test starting with non-sentinel logging context %s" % ( - LoggingContext.current_context(), - ) + "Test starting with non-sentinel logging context %s" + % (LoggingContext.current_context(),) ) old_level = logging.getLogger().level @@ -300,7 +299,13 @@ class HomeserverTestCase(TestCase): content = json.dumps(content).encode('utf8') return make_request( - self.reactor, method, path, content, access_token, request, shorthand, + self.reactor, + method, + path, + content, + access_token, + request, + shorthand, federation_auth_origin, ) diff --git a/tests/util/test_async_utils.py b/tests/util/test_async_utils.py index 84dd71e47a..bf85d3b8ec 100644 --- a/tests/util/test_async_utils.py +++ b/tests/util/test_async_utils.py @@ -42,10 +42,10 @@ class TimeoutDeferredTest(TestCase): self.assertNoResult(timing_out_d) self.assertFalse(cancelled[0], "deferred was cancelled prematurely") - self.clock.pump((1.0, )) + self.clock.pump((1.0,)) self.assertTrue(cancelled[0], "deferred was not cancelled by timeout") - self.failureResultOf(timing_out_d, defer.TimeoutError, ) + self.failureResultOf(timing_out_d, defer.TimeoutError) def test_times_out_when_canceller_throws(self): """Test that we have successfully worked around @@ -59,9 +59,9 @@ class TimeoutDeferredTest(TestCase): self.assertNoResult(timing_out_d) - self.clock.pump((1.0, )) + self.clock.pump((1.0,)) - self.failureResultOf(timing_out_d, defer.TimeoutError, ) + self.failureResultOf(timing_out_d, defer.TimeoutError) def test_logcontext_is_preserved_on_cancellation(self): blocking_was_cancelled = [False] @@ -80,10 +80,10 @@ class TimeoutDeferredTest(TestCase): # the errbacks should be run in the test logcontext def errback(res, deferred_name): self.assertIs( - LoggingContext.current_context(), context_one, - "errback %s run in unexpected logcontext %s" % ( - deferred_name, LoggingContext.current_context(), - ) + LoggingContext.current_context(), + context_one, + "errback %s run in unexpected logcontext %s" + % (deferred_name, LoggingContext.current_context()), ) return res @@ -94,11 +94,10 @@ class TimeoutDeferredTest(TestCase): self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) timing_out_d.addErrback(errback, "timingout") - self.clock.pump((1.0, )) + self.clock.pump((1.0,)) self.assertTrue( - blocking_was_cancelled[0], - "non-completing deferred was not cancelled", + blocking_was_cancelled[0], "non-completing deferred was not cancelled" ) - self.failureResultOf(timing_out_d, defer.TimeoutError, ) + self.failureResultOf(timing_out_d, defer.TimeoutError) self.assertIs(LoggingContext.current_context(), context_one) diff --git a/tests/utils.py b/tests/utils.py index cb75514851..c2ef4b0bb5 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -68,7 +68,9 @@ def setupdb(): # connect to postgres to create the base database. db_conn = db_engine.module.connect( - user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD, + user=POSTGRES_USER, + host=POSTGRES_HOST, + password=POSTGRES_PASSWORD, dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE, ) db_conn.autocommit = True @@ -94,7 +96,9 @@ def setupdb(): def _cleanup(): db_conn = db_engine.module.connect( - user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD, + user=POSTGRES_USER, + host=POSTGRES_HOST, + password=POSTGRES_PASSWORD, dbname=POSTGRES_DBNAME_FOR_INITIAL_CREATE, ) db_conn.autocommit = True @@ -114,7 +118,6 @@ def default_config(name): "server_name": name, "media_store_path": "media", "uploads_path": "uploads", - # the test signing key is just an arbitrary ed25519 key to keep the config # parser happy "signing_key": "ed25519 a_lPym qvioDNmfExFBRPgdTU+wtFYKq4JfwFRv7sYVgWvmgJg", From ee90c06e38c16f9ca6d5e01c081ee99720fafafb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20M=C3=BCller?= Date: Fri, 10 May 2019 10:09:25 +0200 Subject: [PATCH 097/429] Set syslog identifiers in systemd units (#5023) --- changelog.d/5023.feature | 3 +++ .../system/matrix-synapse-worker@.service | 1 + contrib/systemd-with-workers/system/matrix-synapse.service | 1 + contrib/systemd/matrix-synapse.service | 2 +- debian/changelog | 7 +++++++ debian/matrix-synapse.service | 1 + 6 files changed, 14 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5023.feature diff --git a/changelog.d/5023.feature b/changelog.d/5023.feature new file mode 100644 index 0000000000..98551f79a3 --- /dev/null +++ b/changelog.d/5023.feature @@ -0,0 +1,3 @@ +Configure the example systemd units to have a log identifier of `matrix-synapse` +instead of the executable name, `python`. +Contributed by Christoph Müller. diff --git a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service index 912984b9d2..9d980d5168 100644 --- a/contrib/systemd-with-workers/system/matrix-synapse-worker@.service +++ b/contrib/systemd-with-workers/system/matrix-synapse-worker@.service @@ -12,6 +12,7 @@ ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.%i --config-path=/ ExecReload=/bin/kill -HUP $MAINPID Restart=always RestartSec=3 +SyslogIdentifier=matrix-synapse-%i [Install] WantedBy=matrix-synapse.service diff --git a/contrib/systemd-with-workers/system/matrix-synapse.service b/contrib/systemd-with-workers/system/matrix-synapse.service index 8bb4e400dc..3aae19034c 100644 --- a/contrib/systemd-with-workers/system/matrix-synapse.service +++ b/contrib/systemd-with-workers/system/matrix-synapse.service @@ -11,6 +11,7 @@ ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --confi ExecReload=/bin/kill -HUP $MAINPID Restart=always RestartSec=3 +SyslogIdentifier=matrix-synapse [Install] WantedBy=matrix.target diff --git a/contrib/systemd/matrix-synapse.service b/contrib/systemd/matrix-synapse.service index efb157e941..595b69916c 100644 --- a/contrib/systemd/matrix-synapse.service +++ b/contrib/systemd/matrix-synapse.service @@ -22,10 +22,10 @@ Group=nogroup WorkingDirectory=/opt/synapse ExecStart=/opt/synapse/env/bin/python -m synapse.app.homeserver --config-path=/opt/synapse/homeserver.yaml +SyslogIdentifier=matrix-synapse # adjust the cache factor if necessary # Environment=SYNAPSE_CACHE_FACTOR=2.0 [Install] WantedBy=multi-user.target - diff --git a/debian/changelog b/debian/changelog index c25425bf26..454fa8eb16 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +matrix-synapse-py3 (0.99.3.2+nmu1) UNRELEASED; urgency=medium + + [ Christoph Müller ] + * Configure the systemd units to have a log identifier of `matrix-synapse` + + -- Christoph Müller Wed, 17 Apr 2019 16:17:32 +0200 + matrix-synapse-py3 (0.99.3.2) stable; urgency=medium * New synapse release 0.99.3.2. diff --git a/debian/matrix-synapse.service b/debian/matrix-synapse.service index 942e4b83fe..b0a8d72e6d 100644 --- a/debian/matrix-synapse.service +++ b/debian/matrix-synapse.service @@ -11,6 +11,7 @@ ExecStart=/opt/venvs/matrix-synapse/bin/python -m synapse.app.homeserver --confi ExecReload=/bin/kill -HUP $MAINPID Restart=always RestartSec=3 +SyslogIdentifier=matrix-synapse [Install] WantedBy=multi-user.target From cd3f30014ad7bd71b4c86a34487a7719d3ac8caf Mon Sep 17 00:00:00 2001 From: Gergely Polonkai Date: Fri, 10 May 2019 10:15:08 +0200 Subject: [PATCH 098/429] Make Prometheus snippet less confusing on the metrics collection doc (#4288) Signed-off-by: Gergely Polonkai --- docs/metrics-howto.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/metrics-howto.rst b/docs/metrics-howto.rst index 5bbb5a4f3a..32b064e2da 100644 --- a/docs/metrics-howto.rst +++ b/docs/metrics-howto.rst @@ -48,7 +48,10 @@ How to monitor Synapse metrics using Prometheus - job_name: "synapse" metrics_path: "/_synapse/metrics" static_configs: - - targets: ["my.server.here:9092"] + - targets: ["my.server.here:port"] + + where ``my.server.here`` is the IP address of Synapse, and ``port`` is the listener port + configured with the ``metrics`` resource. If your prometheus is older than 1.5.2, you will need to replace ``static_configs`` in the above with ``target_groups``. From a78996cc4a48660d1b11abacf764b6928a8ffee8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 10 May 2019 09:46:28 +0100 Subject: [PATCH 099/429] fix sample config --- docs/sample_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 6ed75ff764..45585f09e7 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -524,7 +524,7 @@ uploads_path: "DATADIR/uploads" # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly # listed here, since they correspond to unroutable addresses.) # -# This must be specified if url_preview_enabled is set. It is recommended that +# This must be specified if url_preview_enabled is set. It is recommended that # you uncomment the following list as a starting point. # #url_preview_ip_range_blacklist: From 085ae346ace418e0fc043ac5f568f85ebf80038e Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 10:52:24 +0100 Subject: [PATCH 100/429] Add a DUMMY stage to captcha-only registration flow This allows the client to complete the email last which is more natual for the user. Without this stage, if the client would complete the recaptcha (and terms, if enabled) stages and then the registration request would complete because you've now completed a flow, even if you were intending to complete the flow that's the same except has email auth at the end. Adding a dummy auth stage to the recaptcha-only flow means it's always unambiguous which flow the client was trying to complete. Longer term we should think about changing the protocol so the client explicitly says which flow it's trying to complete. https://github.com/vector-im/riot-web/issues/9586 --- synapse/rest/client/v2_alpha/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index dc3e265bcd..95578b76ae 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -345,7 +345,7 @@ class RegisterRestServlet(RestServlet): if self.hs.config.enable_registration_captcha: # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: - flows.extend([[LoginType.RECAPTCHA]]) + flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]]) # only support the email-only flow if we don't require MSISDN 3PIDs if not require_msisdn: flows.extend([[LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA]]) From c2bb7476c93d1f5026a4c8b715b2f70316d3a506 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 11:08:01 +0100 Subject: [PATCH 101/429] Revert 085ae346ace418e0fc043ac5f568f85ebf80038e Accidentally went straight to develop --- synapse/rest/client/v2_alpha/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 95578b76ae..dc3e265bcd 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -345,7 +345,7 @@ class RegisterRestServlet(RestServlet): if self.hs.config.enable_registration_captcha: # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: - flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]]) + flows.extend([[LoginType.RECAPTCHA]]) # only support the email-only flow if we don't require MSISDN 3PIDs if not require_msisdn: flows.extend([[LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA]]) From 8714ff6d515ab0fb937518d4ba7d1d3b1593617d Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 11:09:53 +0100 Subject: [PATCH 102/429] Add a DUMMY stage to captcha-only registration flow This allows the client to complete the email last which is more natual for the user. Without this stage, if the client would complete the recaptcha (and terms, if enabled) stages and then the registration request would complete because you've now completed a flow, even if you were intending to complete the flow that's the same except has email auth at the end. Adding a dummy auth stage to the recaptcha-only flow means it's always unambiguous which flow the client was trying to complete. Longer term we should think about changing the protocol so the client explicitly says which flow it's trying to complete. vector-im/riot-web#9586 --- synapse/rest/client/v2_alpha/register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index dc3e265bcd..95578b76ae 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -345,7 +345,7 @@ class RegisterRestServlet(RestServlet): if self.hs.config.enable_registration_captcha: # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: - flows.extend([[LoginType.RECAPTCHA]]) + flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]]) # only support the email-only flow if we don't require MSISDN 3PIDs if not require_msisdn: flows.extend([[LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA]]) From a18f93279eb7b8505db63af062695e8c7fe263c4 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 11:11:59 +0100 Subject: [PATCH 103/429] Add changelog entry --- changelog.d/5174.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5174.bugfix diff --git a/changelog.d/5174.bugfix b/changelog.d/5174.bugfix new file mode 100644 index 0000000000..f4d3192bd9 --- /dev/null +++ b/changelog.d/5174.bugfix @@ -0,0 +1 @@ +Allow clients to complete email auth first when registering From 9c61dce3c81283f9c7bb47b634512dd848eb116e Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 11:14:55 +0100 Subject: [PATCH 104/429] Comment --- synapse/rest/client/v2_alpha/register.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 95578b76ae..5c4c8dca28 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -345,6 +345,10 @@ class RegisterRestServlet(RestServlet): if self.hs.config.enable_registration_captcha: # only support 3PIDless registration if no 3PIDs are required if not require_email and not require_msisdn: + # Also add a dummy flow here, otherwise if a client completes + # recaptcha first we'll assume they were going for this flow + # and complete the request, when they could have been trying to + # complete one of the flows with email/msisdn auth. flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]]) # only support the email-only flow if we don't require MSISDN 3PIDs if not require_msisdn: From 7a3eb8657d66bf0ae479b5a0db9529df5d0226c1 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 11:18:35 +0100 Subject: [PATCH 105/429] Thanks, automated grammar pedantry. --- changelog.d/5174.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5174.bugfix b/changelog.d/5174.bugfix index f4d3192bd9..2d7c71739b 100644 --- a/changelog.d/5174.bugfix +++ b/changelog.d/5174.bugfix @@ -1 +1 @@ -Allow clients to complete email auth first when registering +Allow clients to complete email auth first when registering. From 04299132af8dee4047a28f9d92ccd7ba2d1c10a0 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 13:58:03 +0100 Subject: [PATCH 106/429] Re-order flows so that email auth is done last It's more natural for the user if the bit that takes them away from the registration flow comes last. Adding the dummy stage allows us to do the stages in this order without the ambiguity. --- synapse/rest/client/v2_alpha/register.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 5c4c8dca28..c51241fa01 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -352,15 +352,15 @@ class RegisterRestServlet(RestServlet): flows.extend([[LoginType.RECAPTCHA, LoginType.DUMMY]]) # only support the email-only flow if we don't require MSISDN 3PIDs if not require_msisdn: - flows.extend([[LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA]]) + flows.extend([[LoginType.RECAPTCHA, LoginType.EMAIL_IDENTITY]]) if show_msisdn: # only support the MSISDN-only flow if we don't require email 3PIDs if not require_email: - flows.extend([[LoginType.MSISDN, LoginType.RECAPTCHA]]) + flows.extend([[LoginType.RECAPTCHA, LoginType.MSISDN]]) # always let users provide both MSISDN & email flows.extend([ - [LoginType.MSISDN, LoginType.EMAIL_IDENTITY, LoginType.RECAPTCHA], + [LoginType.RECAPTCHA, LoginType.MSISDN, LoginType.EMAIL_IDENTITY], ]) else: # only support 3PIDless registration if no 3PIDs are required @@ -383,7 +383,15 @@ class RegisterRestServlet(RestServlet): if self.hs.config.user_consent_at_registration: new_flows = [] for flow in flows: - flow.append(LoginType.TERMS) + inserted = False + # m.login.terms should go near the end but before msisdn or email auth + for i, stage in enumerate(flow): + if stage == LoginType.EMAIL_IDENTITY or stage == LoginType.MSISDN: + flow.insert(i, LoginType.TERMS) + inserted = True + break + if not inserted: + flow.append(LoginType.TERMS) flows.extend(new_flows) auth_result, params, session_id = yield self.auth_handler.check_auth( From c9f811c5d45ac52d4b55ed74d2919b0d41790983 Mon Sep 17 00:00:00 2001 From: David Baker Date: Fri, 10 May 2019 14:01:19 +0100 Subject: [PATCH 107/429] Update changelog --- changelog.d/5174.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5174.bugfix b/changelog.d/5174.bugfix index 2d7c71739b..0f26d46b2c 100644 --- a/changelog.d/5174.bugfix +++ b/changelog.d/5174.bugfix @@ -1 +1 @@ -Allow clients to complete email auth first when registering. +Re-order stages in registration flows such that msisdn and email verification are done last. From 2f48c4e1ae40869a1bc5dcb36557ad0a0d8a5728 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 10 May 2019 10:32:44 -0700 Subject: [PATCH 108/429] URL preview blacklisting fixes (#5155) Prevents a SynapseError being raised inside of a IResolutionReceiver and instead opts to just return 0 results. This thus means that we have to lump a failed lookup and a blacklisted lookup together with the same error message, but the substitute should be generic enough to cover both cases. --- changelog.d/5155.misc | 1 + synapse/http/client.py | 45 ++++++++++--------- synapse/rest/media/v1/preview_url_resource.py | 10 +++++ tests/rest/media/v1/test_url_preview.py | 22 ++++----- 4 files changed, 47 insertions(+), 31 deletions(-) create mode 100644 changelog.d/5155.misc diff --git a/changelog.d/5155.misc b/changelog.d/5155.misc new file mode 100644 index 0000000000..a81dbae67a --- /dev/null +++ b/changelog.d/5155.misc @@ -0,0 +1 @@ +Prevent an exception from being raised in a IResolutionReceiver and use a more generic error message for blacklisted URL previews. \ No newline at end of file diff --git a/synapse/http/client.py b/synapse/http/client.py index ad454f4964..ddbfb72228 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -90,9 +90,32 @@ class IPBlacklistingResolver(object): def resolveHostName(self, recv, hostname, portNumber=0): r = recv() - d = defer.Deferred() addresses = [] + def _callback(): + r.resolutionBegan(None) + + has_bad_ip = False + for i in addresses: + ip_address = IPAddress(i.host) + + if check_against_blacklist( + ip_address, self._ip_whitelist, self._ip_blacklist + ): + logger.info( + "Dropped %s from DNS resolution to %s due to blacklist" % + (ip_address, hostname) + ) + has_bad_ip = True + + # if we have a blacklisted IP, we'd like to raise an error to block the + # request, but all we can really do from here is claim that there were no + # valid results. + if not has_bad_ip: + for i in addresses: + r.addressResolved(i) + r.resolutionComplete() + @provider(IResolutionReceiver) class EndpointReceiver(object): @staticmethod @@ -101,34 +124,16 @@ class IPBlacklistingResolver(object): @staticmethod def addressResolved(address): - ip_address = IPAddress(address.host) - - if check_against_blacklist( - ip_address, self._ip_whitelist, self._ip_blacklist - ): - logger.info( - "Dropped %s from DNS resolution to %s" % (ip_address, hostname) - ) - raise SynapseError(403, "IP address blocked by IP blacklist entry") - addresses.append(address) @staticmethod def resolutionComplete(): - d.callback(addresses) + _callback() self._reactor.nameResolver.resolveHostName( EndpointReceiver, hostname, portNumber=portNumber ) - def _callback(addrs): - r.resolutionBegan(None) - for i in addrs: - r.addressResolved(i) - r.resolutionComplete() - - d.addCallback(_callback) - return r diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index ba3ab1d37d..acf87709f2 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -31,6 +31,7 @@ from six.moves import urllib_parse as urlparse from canonicaljson import json from twisted.internet import defer +from twisted.internet.error import DNSLookupError from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET @@ -328,9 +329,18 @@ class PreviewUrlResource(Resource): # handler will return a SynapseError to the client instead of # blank data or a 500. raise + except DNSLookupError: + # DNS lookup returned no results + # Note: This will also be the case if one of the resolved IP + # addresses is blacklisted + raise SynapseError( + 502, "DNS resolution failure during URL preview generation", + Codes.UNKNOWN + ) except Exception as e: # FIXME: pass through 404s and other error messages nicely logger.warn("Error downloading %s: %r", url, e) + raise SynapseError( 500, "Failed to download content: %s" % ( traceback.format_exception_only(sys.exc_info()[0], e), diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 650ce95a6f..f696395f3c 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -297,12 +297,12 @@ class URLPreviewTests(unittest.HomeserverTestCase): # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) - self.assertEqual(channel.code, 403) + self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, { 'errcode': 'M_UNKNOWN', - 'error': 'IP address blocked by IP blacklist entry', + 'error': 'DNS resolution failure during URL preview generation', }, ) @@ -318,12 +318,12 @@ class URLPreviewTests(unittest.HomeserverTestCase): request.render(self.preview_url) self.pump() - self.assertEqual(channel.code, 403) + self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, { 'errcode': 'M_UNKNOWN', - 'error': 'IP address blocked by IP blacklist entry', + 'error': 'DNS resolution failure during URL preview generation', }, ) @@ -339,7 +339,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) - self.assertEqual(channel.code, 403) self.assertEqual( channel.json_body, { @@ -347,6 +346,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): 'error': 'IP address blocked by IP blacklist entry', }, ) + self.assertEqual(channel.code, 403) def test_blacklisted_ip_range_direct(self): """ @@ -414,12 +414,12 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) request.render(self.preview_url) self.pump() - self.assertEqual(channel.code, 403) + self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, { 'errcode': 'M_UNKNOWN', - 'error': 'IP address blocked by IP blacklist entry', + 'error': 'DNS resolution failure during URL preview generation', }, ) @@ -439,12 +439,12 @@ class URLPreviewTests(unittest.HomeserverTestCase): # No requests made. self.assertEqual(len(self.reactor.tcpClients), 0) - self.assertEqual(channel.code, 403) + self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, { 'errcode': 'M_UNKNOWN', - 'error': 'IP address blocked by IP blacklist entry', + 'error': 'DNS resolution failure during URL preview generation', }, ) @@ -460,11 +460,11 @@ class URLPreviewTests(unittest.HomeserverTestCase): request.render(self.preview_url) self.pump() - self.assertEqual(channel.code, 403) + self.assertEqual(channel.code, 502) self.assertEqual( channel.json_body, { 'errcode': 'M_UNKNOWN', - 'error': 'IP address blocked by IP blacklist entry', + 'error': 'DNS resolution failure during URL preview generation', }, ) From bb93757b32e1f13cc6e091dfdd485b1b2ae47373 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 13 May 2019 15:19:44 +0100 Subject: [PATCH 109/429] Fix CI after new release of isort --- synapse/server.pyi | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/server.pyi b/synapse/server.pyi index 3ba3a967c2..9583e82d52 100644 --- a/synapse/server.pyi +++ b/synapse/server.pyi @@ -18,7 +18,6 @@ import synapse.server_notices.server_notices_sender import synapse.state import synapse.storage - class HomeServer(object): @property def config(self) -> synapse.config.homeserver.HomeServerConfig: From 1a536699fde3c4ae80a5da9fab725eb90b0ed148 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 13 May 2019 15:21:23 +0100 Subject: [PATCH 110/429] Changelog --- changelog.d/5179.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5179.bugfix diff --git a/changelog.d/5179.bugfix b/changelog.d/5179.bugfix new file mode 100644 index 0000000000..92369cb2fc --- /dev/null +++ b/changelog.d/5179.bugfix @@ -0,0 +1 @@ +Fix CI after new release of isort. From 2725cd2290445db0931f45dc10e508f20bfc2d05 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 13 May 2019 15:32:07 +0100 Subject: [PATCH 111/429] Fix changelog --- changelog.d/{5179.bugfix => 5179.misc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{5179.bugfix => 5179.misc} (100%) diff --git a/changelog.d/5179.bugfix b/changelog.d/5179.misc similarity index 100% rename from changelog.d/5179.bugfix rename to changelog.d/5179.misc From 8782bfb783ef4da3882951e56d8911a1d81eada5 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 13 May 2019 15:34:11 +0100 Subject: [PATCH 112/429] And now I realise why the test is failing... --- tests/rest/client/v2_alpha/test_auth.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 0ca3c4657b..fa833ba5a2 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -92,7 +92,14 @@ class FallbackAuthTests(unittest.HomeserverTestCase): self.assertEqual(len(self.recaptcha_attempts), 1) self.assertEqual(self.recaptcha_attempts[0][0]["response"], "a") - # Now we have fufilled the recaptcha fallback step, we can then send a + # also complete the dummy auth + request, channel = self.make_request( + "POST", "register", {"auth": {"session": session, "type": "m.login.dummy"}} + ) + self.render(request) + + # Now we should have fufilled a complete auth flow, including + # the recaptcha fallback step, we can then send a # request to the register API with the session in the authdict. request, channel = self.make_request( "POST", "register", {"auth": {"session": session}} From 822072b1bb8c68006dffff196885202d80099361 Mon Sep 17 00:00:00 2001 From: David Baker Date: Mon, 13 May 2019 16:10:26 +0100 Subject: [PATCH 113/429] Terms might not be the last stage --- tests/test_terms_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index f412985d2c..52739fbabc 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -59,7 +59,7 @@ class TermsTestCase(unittest.HomeserverTestCase): for flow in channel.json_body["flows"]: self.assertIsInstance(flow["stages"], list) self.assertTrue(len(flow["stages"]) > 0) - self.assertEquals(flow["stages"][-1], "m.login.terms") + self.assertTrue("m.login.terms" in flow["stages"]) expected_params = { "m.login.terms": { From 2e1129b5f7aae4f74b23ad063b12ab34a5c82eb3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 13 May 2019 16:11:21 +0100 Subject: [PATCH 114/429] 0.99.4rc1 --- CHANGES.md | 82 ++++++++++++++++++++++++++++++++++++++++ changelog.d/4339.feature | 1 - changelog.d/4474.misc | 1 - changelog.d/4555.bugfix | 1 - changelog.d/4867.feature | 1 - changelog.d/4942.bugfix | 1 - changelog.d/4947.feature | 1 - changelog.d/4949.misc | 1 - changelog.d/4953.misc | 2 - changelog.d/4954.misc | 1 - changelog.d/4955.bugfix | 1 - changelog.d/4956.bugfix | 1 - changelog.d/4959.misc | 1 - changelog.d/4965.misc | 1 - changelog.d/4967.feature | 1 - changelog.d/4968.misc | 1 - changelog.d/4969.misc | 2 - changelog.d/4972.misc | 1 - changelog.d/4974.misc | 1 - changelog.d/4981.bugfix | 1 - changelog.d/4982.misc | 1 - changelog.d/4985.misc | 1 - changelog.d/4987.misc | 1 - changelog.d/4989.feature | 1 - changelog.d/4990.bugfix | 1 - changelog.d/4991.feature | 1 - changelog.d/4992.misc | 1 - changelog.d/4996.misc | 1 - changelog.d/4998.misc | 1 - changelog.d/4999.bugfix | 1 - changelog.d/5001.misc | 1 - changelog.d/5002.feature | 1 - changelog.d/5003.bugfix | 1 - changelog.d/5005.misc | 1 - changelog.d/5007.misc | 1 - changelog.d/5009.bugfix | 1 - changelog.d/5010.feature | 1 - changelog.d/5020.feature | 1 - changelog.d/5023.feature | 3 -- changelog.d/5024.misc | 1 - changelog.d/5027.feature | 1 - changelog.d/5028.misc | 1 - changelog.d/5030.misc | 1 - changelog.d/5032.bugfix | 1 - changelog.d/5033.misc | 1 - changelog.d/5035.bugfix | 1 - changelog.d/5037.bugfix | 1 - changelog.d/5046.misc | 1 - changelog.d/5047.feature | 1 - changelog.d/5063.feature | 1 - changelog.d/5065.feature | 1 - changelog.d/5067.misc | 1 - changelog.d/5070.feature | 1 - changelog.d/5071.bugfix | 1 - changelog.d/5073.feature | 1 - changelog.d/5077.bugfix | 1 - changelog.d/5083.feature | 1 - changelog.d/5098.misc | 1 - changelog.d/5100.misc | 1 - changelog.d/5103.bugfix | 1 - changelog.d/5104.bugfix | 1 - changelog.d/5116.feature | 1 - changelog.d/5119.feature | 1 - changelog.d/5120.misc | 1 - changelog.d/5121.feature | 1 - changelog.d/5122.misc | 1 - changelog.d/5124.bugfix | 1 - changelog.d/5128.bugfix | 1 - changelog.d/5138.bugfix | 1 - changelog.d/5142.feature | 1 - changelog.d/5154.bugfix | 1 - changelog.d/5155.misc | 1 - changelog.d/5170.misc | 1 - changelog.d/5179.misc | 1 - synapse/__init__.py | 2 +- 75 files changed, 83 insertions(+), 78 deletions(-) delete mode 100644 changelog.d/4339.feature delete mode 100644 changelog.d/4474.misc delete mode 100644 changelog.d/4555.bugfix delete mode 100644 changelog.d/4867.feature delete mode 100644 changelog.d/4942.bugfix delete mode 100644 changelog.d/4947.feature delete mode 100644 changelog.d/4949.misc delete mode 100644 changelog.d/4953.misc delete mode 100644 changelog.d/4954.misc delete mode 100644 changelog.d/4955.bugfix delete mode 100644 changelog.d/4956.bugfix delete mode 100644 changelog.d/4959.misc delete mode 100644 changelog.d/4965.misc delete mode 100644 changelog.d/4967.feature delete mode 100644 changelog.d/4968.misc delete mode 100644 changelog.d/4969.misc delete mode 100644 changelog.d/4972.misc delete mode 100644 changelog.d/4974.misc delete mode 100644 changelog.d/4981.bugfix delete mode 100644 changelog.d/4982.misc delete mode 100644 changelog.d/4985.misc delete mode 100644 changelog.d/4987.misc delete mode 100644 changelog.d/4989.feature delete mode 100644 changelog.d/4990.bugfix delete mode 100644 changelog.d/4991.feature delete mode 100644 changelog.d/4992.misc delete mode 100644 changelog.d/4996.misc delete mode 100644 changelog.d/4998.misc delete mode 100644 changelog.d/4999.bugfix delete mode 100644 changelog.d/5001.misc delete mode 100644 changelog.d/5002.feature delete mode 100644 changelog.d/5003.bugfix delete mode 100644 changelog.d/5005.misc delete mode 100644 changelog.d/5007.misc delete mode 100644 changelog.d/5009.bugfix delete mode 100644 changelog.d/5010.feature delete mode 100644 changelog.d/5020.feature delete mode 100644 changelog.d/5023.feature delete mode 100644 changelog.d/5024.misc delete mode 100644 changelog.d/5027.feature delete mode 100644 changelog.d/5028.misc delete mode 100644 changelog.d/5030.misc delete mode 100644 changelog.d/5032.bugfix delete mode 100644 changelog.d/5033.misc delete mode 100644 changelog.d/5035.bugfix delete mode 100644 changelog.d/5037.bugfix delete mode 100644 changelog.d/5046.misc delete mode 100644 changelog.d/5047.feature delete mode 100644 changelog.d/5063.feature delete mode 100644 changelog.d/5065.feature delete mode 100644 changelog.d/5067.misc delete mode 100644 changelog.d/5070.feature delete mode 100644 changelog.d/5071.bugfix delete mode 100644 changelog.d/5073.feature delete mode 100644 changelog.d/5077.bugfix delete mode 100644 changelog.d/5083.feature delete mode 100644 changelog.d/5098.misc delete mode 100644 changelog.d/5100.misc delete mode 100644 changelog.d/5103.bugfix delete mode 100644 changelog.d/5104.bugfix delete mode 100644 changelog.d/5116.feature delete mode 100644 changelog.d/5119.feature delete mode 100644 changelog.d/5120.misc delete mode 100644 changelog.d/5121.feature delete mode 100644 changelog.d/5122.misc delete mode 100644 changelog.d/5124.bugfix delete mode 100644 changelog.d/5128.bugfix delete mode 100644 changelog.d/5138.bugfix delete mode 100644 changelog.d/5142.feature delete mode 100644 changelog.d/5154.bugfix delete mode 100644 changelog.d/5155.misc delete mode 100644 changelog.d/5170.misc delete mode 100644 changelog.d/5179.misc diff --git a/CHANGES.md b/CHANGES.md index d8cfbbebef..3cacca5a6b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,85 @@ +Synapse 0.99.4rc1 (2019-05-13) +============================== + +Features +-------- + +- Add systemd-python to the optional dependencies to enable logging to the systemd journal. Install with `pip install matrix-synapse[systemd]`. ([\#4339](https://github.com/matrix-org/synapse/issues/4339)) +- Add a default .m.rule.tombstone push rule. ([\#4867](https://github.com/matrix-org/synapse/issues/4867)) +- Add ability for password provider modules to bind email addresses to users upon registration. ([\#4947](https://github.com/matrix-org/synapse/issues/4947)) +- Implementation of [MSC1711](https://github.com/matrix-org/matrix-doc/pull/1711) including config options for requiring valid TLS certificates for federation traffic, the ability to disable TLS validation for specific domains, and the ability to specify your own list of CA certificates. ([\#4967](https://github.com/matrix-org/synapse/issues/4967)) +- Remove presence list support as per MSC 1819. ([\#4989](https://github.com/matrix-org/synapse/issues/4989)) +- Reduce CPU usage starting pushers during start up. ([\#4991](https://github.com/matrix-org/synapse/issues/4991)) +- Add a delete group admin API. ([\#5002](https://github.com/matrix-org/synapse/issues/5002)) +- Add config option to block users from looking up 3PIDs. ([\#5010](https://github.com/matrix-org/synapse/issues/5010)) +- Add context to phonehome stats. ([\#5020](https://github.com/matrix-org/synapse/issues/5020)) +- Configure the example systemd units to have a log identifier of `matrix-synapse` + instead of the executable name, `python`. + Contributed by Christoph Müller. ([\#5023](https://github.com/matrix-org/synapse/issues/5023)) +- Add time-based account expiration. ([\#5027](https://github.com/matrix-org/synapse/issues/5027), [\#5047](https://github.com/matrix-org/synapse/issues/5047), [\#5073](https://github.com/matrix-org/synapse/issues/5073), [\#5116](https://github.com/matrix-org/synapse/issues/5116)) +- Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. ([\#5063](https://github.com/matrix-org/synapse/issues/5063), [\#5065](https://github.com/matrix-org/synapse/issues/5065), [\#5070](https://github.com/matrix-org/synapse/issues/5070)) +- Add an configuration option to require authentication on /publicRooms and /profile endpoints. ([\#5083](https://github.com/matrix-org/synapse/issues/5083)) +- Move admin APIs to `/_synapse/admin/v1`. (The old paths are retained for backwards-compatibility, for now). ([\#5119](https://github.com/matrix-org/synapse/issues/5119)) +- Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. ([\#5121](https://github.com/matrix-org/synapse/issues/5121), [\#5142](https://github.com/matrix-org/synapse/issues/5142)) + + +Bugfixes +-------- + +- Avoid redundant URL encoding of redirect URL for SSO login in the fallback login page. Fixes a regression introduced in [#4220](https://github.com/matrix-org/synapse/pull/4220). Contributed by Marcel Fabian Krüger ("[zaugin](https://github.com/zauguin)"). ([\#4555](https://github.com/matrix-org/synapse/issues/4555)) +- Fix bug where presence updates were sent to all servers in a room when a new server joined, rather than to just the new server. ([\#4942](https://github.com/matrix-org/synapse/issues/4942), [\#5103](https://github.com/matrix-org/synapse/issues/5103)) +- Fix sync bug which made accepting invites unreliable in worker-mode synapses. ([\#4955](https://github.com/matrix-org/synapse/issues/4955), [\#4956](https://github.com/matrix-org/synapse/issues/4956)) +- start.sh: Fix the --no-rate-limit option for messages and make it bypass rate limit on registration and login too. ([\#4981](https://github.com/matrix-org/synapse/issues/4981)) +- Transfer related groups on room upgrade. ([\#4990](https://github.com/matrix-org/synapse/issues/4990)) +- Prevent the ability to kick users from a room they aren't in. ([\#4999](https://github.com/matrix-org/synapse/issues/4999)) +- Fix issue #4596 so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud . ([\#5003](https://github.com/matrix-org/synapse/issues/5003)) +- Clients timing out/disappearing while downloading from the media repository will now no longer log a spurious "Producer was not unregistered" message. ([\#5009](https://github.com/matrix-org/synapse/issues/5009)) +- Fix "cannot import name execute_batch" error with postgres. ([\#5032](https://github.com/matrix-org/synapse/issues/5032)) +- Fix disappearing exceptions in manhole. ([\#5035](https://github.com/matrix-org/synapse/issues/5035)) +- Workaround bug in twisted where attempting too many concurrent DNS requests could cause it to hang due to running out of file descriptors. ([\#5037](https://github.com/matrix-org/synapse/issues/5037)) +- Make sure we're not registering the same 3pid twice on registration. ([\#5071](https://github.com/matrix-org/synapse/issues/5071)) +- Don't crash on lack of expiry templates. ([\#5077](https://github.com/matrix-org/synapse/issues/5077)) +- Fix the ratelimting on third party invites. ([\#5104](https://github.com/matrix-org/synapse/issues/5104)) +- Add some missing limitations to room alias creation. ([\#5124](https://github.com/matrix-org/synapse/issues/5124), [\#5128](https://github.com/matrix-org/synapse/issues/5128)) +- Limit the number of EDUs in transactions to 100 as expected by synapse. Thanks to @superboum for this work! ([\#5138](https://github.com/matrix-org/synapse/issues/5138)) +- Fix bogus imports in unit tests. ([\#5154](https://github.com/matrix-org/synapse/issues/5154)) + + +Internal Changes +---------------- + +- Add test to verify threepid auth check added in #4435. ([\#4474](https://github.com/matrix-org/synapse/issues/4474)) +- Fix/improve some docstrings in the replication code. ([\#4949](https://github.com/matrix-org/synapse/issues/4949)) +- Split synapse.replication.tcp.streams into smaller files. ([\#4953](https://github.com/matrix-org/synapse/issues/4953)) +- Refactor replication row generation/parsing. ([\#4954](https://github.com/matrix-org/synapse/issues/4954)) +- Run `black` to clean up formatting on `synapse/storage/roommember.py` and `synapse/storage/events.py`. ([\#4959](https://github.com/matrix-org/synapse/issues/4959)) +- Remove log line for password via the admin API. ([\#4965](https://github.com/matrix-org/synapse/issues/4965)) +- Fix typo in TLS filenames in docker/README.md. Also add the '-p' commandline option to the 'docker run' example. Contributed by Jurrie Overgoor. ([\#4968](https://github.com/matrix-org/synapse/issues/4968)) +- Refactor room version definitions. ([\#4969](https://github.com/matrix-org/synapse/issues/4969)) +- Reduce log level of .well-known/matrix/client responses. ([\#4972](https://github.com/matrix-org/synapse/issues/4972)) +- Add `config.signing_key_path` that can be read by `synapse.config` utility. ([\#4974](https://github.com/matrix-org/synapse/issues/4974)) +- Track which identity server is used when binding a threepid and use that for unbinding, as per MSC1915. ([\#4982](https://github.com/matrix-org/synapse/issues/4982)) +- Rewrite KeyringTestCase as a HomeserverTestCase. ([\#4985](https://github.com/matrix-org/synapse/issues/4985)) +- README updates: Corrected the default POSTGRES_USER. Added port forwarding hint in TLS section. ([\#4987](https://github.com/matrix-org/synapse/issues/4987)) +- Remove a number of unused tables from the database schema. ([\#4992](https://github.com/matrix-org/synapse/issues/4992), [\#5028](https://github.com/matrix-org/synapse/issues/5028), [\#5033](https://github.com/matrix-org/synapse/issues/5033)) +- Run `black` on the remainder of `synapse/storage/`. ([\#4996](https://github.com/matrix-org/synapse/issues/4996)) +- Fix grammar in get_current_users_in_room and give it a docstring. ([\#4998](https://github.com/matrix-org/synapse/issues/4998)) +- Clean up some code in the server-key Keyring. ([\#5001](https://github.com/matrix-org/synapse/issues/5001)) +- Convert SYNAPSE_NO_TLS Docker variable to boolean for user friendliness. Contributed by Gabriel Eckerson. ([\#5005](https://github.com/matrix-org/synapse/issues/5005)) +- Refactor synapse.storage._base._simple_select_list_paginate. ([\#5007](https://github.com/matrix-org/synapse/issues/5007)) +- Store the notary server name correctly in server_keys_json. ([\#5024](https://github.com/matrix-org/synapse/issues/5024)) +- Rewrite Datastore.get_server_verify_keys to reduce the number of database transactions. ([\#5030](https://github.com/matrix-org/synapse/issues/5030)) +- Remove extraneous period from copyright headers. ([\#5046](https://github.com/matrix-org/synapse/issues/5046)) +- Update documentation for where to get Synapse packages. ([\#5067](https://github.com/matrix-org/synapse/issues/5067)) +- Add workarounds for pep-517 install errors. ([\#5098](https://github.com/matrix-org/synapse/issues/5098)) +- Improve logging when event-signature checks fail. ([\#5100](https://github.com/matrix-org/synapse/issues/5100)) +- Factor out an "assert_requester_is_admin" function. ([\#5120](https://github.com/matrix-org/synapse/issues/5120)) +- Remove the requirement to authenticate for /admin/server_version. ([\#5122](https://github.com/matrix-org/synapse/issues/5122)) +- Prevent an exception from being raised in a IResolutionReceiver and use a more generic error message for blacklisted URL previews. ([\#5155](https://github.com/matrix-org/synapse/issues/5155)) +- Run `black` on the tests directory. ([\#5170](https://github.com/matrix-org/synapse/issues/5170)) +- Fix CI after new release of isort. ([\#5179](https://github.com/matrix-org/synapse/issues/5179)) + + Synapse 0.99.3.2 (2019-05-03) ============================= diff --git a/changelog.d/4339.feature b/changelog.d/4339.feature deleted file mode 100644 index cecff97b80..0000000000 --- a/changelog.d/4339.feature +++ /dev/null @@ -1 +0,0 @@ -Add systemd-python to the optional dependencies to enable logging to the systemd journal. Install with `pip install matrix-synapse[systemd]`. diff --git a/changelog.d/4474.misc b/changelog.d/4474.misc deleted file mode 100644 index 4b882d60be..0000000000 --- a/changelog.d/4474.misc +++ /dev/null @@ -1 +0,0 @@ -Add test to verify threepid auth check added in #4435. diff --git a/changelog.d/4555.bugfix b/changelog.d/4555.bugfix deleted file mode 100644 index d596022c3f..0000000000 --- a/changelog.d/4555.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid redundant URL encoding of redirect URL for SSO login in the fallback login page. Fixes a regression introduced in [#4220](https://github.com/matrix-org/synapse/pull/4220). Contributed by Marcel Fabian Krüger ("[zaugin](https://github.com/zauguin)"). diff --git a/changelog.d/4867.feature b/changelog.d/4867.feature deleted file mode 100644 index f5f9030e22..0000000000 --- a/changelog.d/4867.feature +++ /dev/null @@ -1 +0,0 @@ -Add a default .m.rule.tombstone push rule. diff --git a/changelog.d/4942.bugfix b/changelog.d/4942.bugfix deleted file mode 100644 index 590d80d58f..0000000000 --- a/changelog.d/4942.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where presence updates were sent to all servers in a room when a new server joined, rather than to just the new server. diff --git a/changelog.d/4947.feature b/changelog.d/4947.feature deleted file mode 100644 index b9d27b90f1..0000000000 --- a/changelog.d/4947.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability for password provider modules to bind email addresses to users upon registration. \ No newline at end of file diff --git a/changelog.d/4949.misc b/changelog.d/4949.misc deleted file mode 100644 index 25c4e05a64..0000000000 --- a/changelog.d/4949.misc +++ /dev/null @@ -1 +0,0 @@ -Fix/improve some docstrings in the replication code. diff --git a/changelog.d/4953.misc b/changelog.d/4953.misc deleted file mode 100644 index 06a084e6ef..0000000000 --- a/changelog.d/4953.misc +++ /dev/null @@ -1,2 +0,0 @@ -Split synapse.replication.tcp.streams into smaller files. - diff --git a/changelog.d/4954.misc b/changelog.d/4954.misc deleted file mode 100644 index 91f145950d..0000000000 --- a/changelog.d/4954.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor replication row generation/parsing. diff --git a/changelog.d/4955.bugfix b/changelog.d/4955.bugfix deleted file mode 100644 index e50e67383d..0000000000 --- a/changelog.d/4955.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix sync bug which made accepting invites unreliable in worker-mode synapses. diff --git a/changelog.d/4956.bugfix b/changelog.d/4956.bugfix deleted file mode 100644 index e50e67383d..0000000000 --- a/changelog.d/4956.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix sync bug which made accepting invites unreliable in worker-mode synapses. diff --git a/changelog.d/4959.misc b/changelog.d/4959.misc deleted file mode 100644 index dd4275501f..0000000000 --- a/changelog.d/4959.misc +++ /dev/null @@ -1 +0,0 @@ -Run `black` to clean up formatting on `synapse/storage/roommember.py` and `synapse/storage/events.py`. \ No newline at end of file diff --git a/changelog.d/4965.misc b/changelog.d/4965.misc deleted file mode 100644 index 284c58b75e..0000000000 --- a/changelog.d/4965.misc +++ /dev/null @@ -1 +0,0 @@ -Remove log line for password via the admin API. diff --git a/changelog.d/4967.feature b/changelog.d/4967.feature deleted file mode 100644 index 7f9f81f849..0000000000 --- a/changelog.d/4967.feature +++ /dev/null @@ -1 +0,0 @@ -Implementation of [MSC1711](https://github.com/matrix-org/matrix-doc/pull/1711) including config options for requiring valid TLS certificates for federation traffic, the ability to disable TLS validation for specific domains, and the ability to specify your own list of CA certificates. diff --git a/changelog.d/4968.misc b/changelog.d/4968.misc deleted file mode 100644 index 7a7b69771c..0000000000 --- a/changelog.d/4968.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in TLS filenames in docker/README.md. Also add the '-p' commandline option to the 'docker run' example. Contributed by Jurrie Overgoor. diff --git a/changelog.d/4969.misc b/changelog.d/4969.misc deleted file mode 100644 index e3a3214e6b..0000000000 --- a/changelog.d/4969.misc +++ /dev/null @@ -1,2 +0,0 @@ -Refactor room version definitions. - diff --git a/changelog.d/4972.misc b/changelog.d/4972.misc deleted file mode 100644 index e104a9bb34..0000000000 --- a/changelog.d/4972.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce log level of .well-known/matrix/client responses. diff --git a/changelog.d/4974.misc b/changelog.d/4974.misc deleted file mode 100644 index 672a18923a..0000000000 --- a/changelog.d/4974.misc +++ /dev/null @@ -1 +0,0 @@ -Add `config.signing_key_path` that can be read by `synapse.config` utility. diff --git a/changelog.d/4981.bugfix b/changelog.d/4981.bugfix deleted file mode 100644 index e51b45eec0..0000000000 --- a/changelog.d/4981.bugfix +++ /dev/null @@ -1 +0,0 @@ -start.sh: Fix the --no-rate-limit option for messages and make it bypass rate limit on registration and login too. \ No newline at end of file diff --git a/changelog.d/4982.misc b/changelog.d/4982.misc deleted file mode 100644 index 067c177d39..0000000000 --- a/changelog.d/4982.misc +++ /dev/null @@ -1 +0,0 @@ -Track which identity server is used when binding a threepid and use that for unbinding, as per MSC1915. diff --git a/changelog.d/4985.misc b/changelog.d/4985.misc deleted file mode 100644 index 50c9ff9e0f..0000000000 --- a/changelog.d/4985.misc +++ /dev/null @@ -1 +0,0 @@ -Rewrite KeyringTestCase as a HomeserverTestCase. diff --git a/changelog.d/4987.misc b/changelog.d/4987.misc deleted file mode 100644 index 33490e146f..0000000000 --- a/changelog.d/4987.misc +++ /dev/null @@ -1 +0,0 @@ -README updates: Corrected the default POSTGRES_USER. Added port forwarding hint in TLS section. diff --git a/changelog.d/4989.feature b/changelog.d/4989.feature deleted file mode 100644 index a5138f5612..0000000000 --- a/changelog.d/4989.feature +++ /dev/null @@ -1 +0,0 @@ -Remove presence list support as per MSC 1819. diff --git a/changelog.d/4990.bugfix b/changelog.d/4990.bugfix deleted file mode 100644 index 1b69d058f6..0000000000 --- a/changelog.d/4990.bugfix +++ /dev/null @@ -1 +0,0 @@ -Transfer related groups on room upgrade. \ No newline at end of file diff --git a/changelog.d/4991.feature b/changelog.d/4991.feature deleted file mode 100644 index 034bf3239c..0000000000 --- a/changelog.d/4991.feature +++ /dev/null @@ -1 +0,0 @@ -Reduce CPU usage starting pushers during start up. diff --git a/changelog.d/4992.misc b/changelog.d/4992.misc deleted file mode 100644 index 3ee4228c09..0000000000 --- a/changelog.d/4992.misc +++ /dev/null @@ -1 +0,0 @@ -Remove a number of unused tables from the database schema. diff --git a/changelog.d/4996.misc b/changelog.d/4996.misc deleted file mode 100644 index ecac24e2be..0000000000 --- a/changelog.d/4996.misc +++ /dev/null @@ -1 +0,0 @@ -Run `black` on the remainder of `synapse/storage/`. \ No newline at end of file diff --git a/changelog.d/4998.misc b/changelog.d/4998.misc deleted file mode 100644 index 7caf959139..0000000000 --- a/changelog.d/4998.misc +++ /dev/null @@ -1 +0,0 @@ -Fix grammar in get_current_users_in_room and give it a docstring. diff --git a/changelog.d/4999.bugfix b/changelog.d/4999.bugfix deleted file mode 100644 index acbc191960..0000000000 --- a/changelog.d/4999.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent the ability to kick users from a room they aren't in. diff --git a/changelog.d/5001.misc b/changelog.d/5001.misc deleted file mode 100644 index bf590a016d..0000000000 --- a/changelog.d/5001.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up some code in the server-key Keyring. \ No newline at end of file diff --git a/changelog.d/5002.feature b/changelog.d/5002.feature deleted file mode 100644 index d8f50e963f..0000000000 --- a/changelog.d/5002.feature +++ /dev/null @@ -1 +0,0 @@ -Add a delete group admin API. diff --git a/changelog.d/5003.bugfix b/changelog.d/5003.bugfix deleted file mode 100644 index 9955dc871f..0000000000 --- a/changelog.d/5003.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix issue #4596 so synapse_port_db script works with --curses option on Python 3. Contributed by Anders Jensen-Waud . diff --git a/changelog.d/5005.misc b/changelog.d/5005.misc deleted file mode 100644 index f74147b352..0000000000 --- a/changelog.d/5005.misc +++ /dev/null @@ -1 +0,0 @@ -Convert SYNAPSE_NO_TLS Docker variable to boolean for user friendliness. Contributed by Gabriel Eckerson. diff --git a/changelog.d/5007.misc b/changelog.d/5007.misc deleted file mode 100644 index 05b6ce2c26..0000000000 --- a/changelog.d/5007.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor synapse.storage._base._simple_select_list_paginate. \ No newline at end of file diff --git a/changelog.d/5009.bugfix b/changelog.d/5009.bugfix deleted file mode 100644 index e66d3dd1aa..0000000000 --- a/changelog.d/5009.bugfix +++ /dev/null @@ -1 +0,0 @@ -Clients timing out/disappearing while downloading from the media repository will now no longer log a spurious "Producer was not unregistered" message. \ No newline at end of file diff --git a/changelog.d/5010.feature b/changelog.d/5010.feature deleted file mode 100644 index 65ab198b71..0000000000 --- a/changelog.d/5010.feature +++ /dev/null @@ -1 +0,0 @@ -Add config option to block users from looking up 3PIDs. diff --git a/changelog.d/5020.feature b/changelog.d/5020.feature deleted file mode 100644 index 71f7a8db2e..0000000000 --- a/changelog.d/5020.feature +++ /dev/null @@ -1 +0,0 @@ -Add context to phonehome stats. diff --git a/changelog.d/5023.feature b/changelog.d/5023.feature deleted file mode 100644 index 98551f79a3..0000000000 --- a/changelog.d/5023.feature +++ /dev/null @@ -1,3 +0,0 @@ -Configure the example systemd units to have a log identifier of `matrix-synapse` -instead of the executable name, `python`. -Contributed by Christoph Müller. diff --git a/changelog.d/5024.misc b/changelog.d/5024.misc deleted file mode 100644 index 07c13f28d0..0000000000 --- a/changelog.d/5024.misc +++ /dev/null @@ -1 +0,0 @@ -Store the notary server name correctly in server_keys_json. diff --git a/changelog.d/5027.feature b/changelog.d/5027.feature deleted file mode 100644 index 12766a82a7..0000000000 --- a/changelog.d/5027.feature +++ /dev/null @@ -1 +0,0 @@ -Add time-based account expiration. diff --git a/changelog.d/5028.misc b/changelog.d/5028.misc deleted file mode 100644 index 3ee4228c09..0000000000 --- a/changelog.d/5028.misc +++ /dev/null @@ -1 +0,0 @@ -Remove a number of unused tables from the database schema. diff --git a/changelog.d/5030.misc b/changelog.d/5030.misc deleted file mode 100644 index 3456eb5381..0000000000 --- a/changelog.d/5030.misc +++ /dev/null @@ -1 +0,0 @@ -Rewrite Datastore.get_server_verify_keys to reduce the number of database transactions. diff --git a/changelog.d/5032.bugfix b/changelog.d/5032.bugfix deleted file mode 100644 index cd71180ce9..0000000000 --- a/changelog.d/5032.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix "cannot import name execute_batch" error with postgres. diff --git a/changelog.d/5033.misc b/changelog.d/5033.misc deleted file mode 100644 index 3ee4228c09..0000000000 --- a/changelog.d/5033.misc +++ /dev/null @@ -1 +0,0 @@ -Remove a number of unused tables from the database schema. diff --git a/changelog.d/5035.bugfix b/changelog.d/5035.bugfix deleted file mode 100644 index 85e154027e..0000000000 --- a/changelog.d/5035.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix disappearing exceptions in manhole. diff --git a/changelog.d/5037.bugfix b/changelog.d/5037.bugfix deleted file mode 100644 index c3af418ddf..0000000000 --- a/changelog.d/5037.bugfix +++ /dev/null @@ -1 +0,0 @@ -Workaround bug in twisted where attempting too many concurrent DNS requests could cause it to hang due to running out of file descriptors. diff --git a/changelog.d/5046.misc b/changelog.d/5046.misc deleted file mode 100644 index eb966a5ae6..0000000000 --- a/changelog.d/5046.misc +++ /dev/null @@ -1 +0,0 @@ -Remove extraneous period from copyright headers. diff --git a/changelog.d/5047.feature b/changelog.d/5047.feature deleted file mode 100644 index 12766a82a7..0000000000 --- a/changelog.d/5047.feature +++ /dev/null @@ -1 +0,0 @@ -Add time-based account expiration. diff --git a/changelog.d/5063.feature b/changelog.d/5063.feature deleted file mode 100644 index fd7b80018e..0000000000 --- a/changelog.d/5063.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. diff --git a/changelog.d/5065.feature b/changelog.d/5065.feature deleted file mode 100644 index fd7b80018e..0000000000 --- a/changelog.d/5065.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. diff --git a/changelog.d/5067.misc b/changelog.d/5067.misc deleted file mode 100644 index bbb4337dbf..0000000000 --- a/changelog.d/5067.misc +++ /dev/null @@ -1 +0,0 @@ -Update documentation for where to get Synapse packages. diff --git a/changelog.d/5070.feature b/changelog.d/5070.feature deleted file mode 100644 index fd7b80018e..0000000000 --- a/changelog.d/5070.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. diff --git a/changelog.d/5071.bugfix b/changelog.d/5071.bugfix deleted file mode 100644 index ddf7ab5fa8..0000000000 --- a/changelog.d/5071.bugfix +++ /dev/null @@ -1 +0,0 @@ -Make sure we're not registering the same 3pid twice on registration. diff --git a/changelog.d/5073.feature b/changelog.d/5073.feature deleted file mode 100644 index 12766a82a7..0000000000 --- a/changelog.d/5073.feature +++ /dev/null @@ -1 +0,0 @@ -Add time-based account expiration. diff --git a/changelog.d/5077.bugfix b/changelog.d/5077.bugfix deleted file mode 100644 index f3345a6353..0000000000 --- a/changelog.d/5077.bugfix +++ /dev/null @@ -1 +0,0 @@ -Don't crash on lack of expiry templates. diff --git a/changelog.d/5083.feature b/changelog.d/5083.feature deleted file mode 100644 index 55d114b3fe..0000000000 --- a/changelog.d/5083.feature +++ /dev/null @@ -1 +0,0 @@ -Add an configuration option to require authentication on /publicRooms and /profile endpoints. diff --git a/changelog.d/5098.misc b/changelog.d/5098.misc deleted file mode 100644 index 9cd83bf226..0000000000 --- a/changelog.d/5098.misc +++ /dev/null @@ -1 +0,0 @@ -Add workarounds for pep-517 install errors. diff --git a/changelog.d/5100.misc b/changelog.d/5100.misc deleted file mode 100644 index db5eb1b156..0000000000 --- a/changelog.d/5100.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging when event-signature checks fail. diff --git a/changelog.d/5103.bugfix b/changelog.d/5103.bugfix deleted file mode 100644 index 590d80d58f..0000000000 --- a/changelog.d/5103.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where presence updates were sent to all servers in a room when a new server joined, rather than to just the new server. diff --git a/changelog.d/5104.bugfix b/changelog.d/5104.bugfix deleted file mode 100644 index f88aca8a40..0000000000 --- a/changelog.d/5104.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the ratelimting on third party invites. diff --git a/changelog.d/5116.feature b/changelog.d/5116.feature deleted file mode 100644 index dcbf7c1fb8..0000000000 --- a/changelog.d/5116.feature +++ /dev/null @@ -1 +0,0 @@ - Add time-based account expiration. diff --git a/changelog.d/5119.feature b/changelog.d/5119.feature deleted file mode 100644 index a3a73f09d3..0000000000 --- a/changelog.d/5119.feature +++ /dev/null @@ -1 +0,0 @@ -Move admin APIs to `/_synapse/admin/v1`. (The old paths are retained for backwards-compatibility, for now). \ No newline at end of file diff --git a/changelog.d/5120.misc b/changelog.d/5120.misc deleted file mode 100644 index 6575f29322..0000000000 --- a/changelog.d/5120.misc +++ /dev/null @@ -1 +0,0 @@ -Factor out an "assert_requester_is_admin" function. diff --git a/changelog.d/5121.feature b/changelog.d/5121.feature deleted file mode 100644 index 54b228680d..0000000000 --- a/changelog.d/5121.feature +++ /dev/null @@ -1 +0,0 @@ -Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. diff --git a/changelog.d/5122.misc b/changelog.d/5122.misc deleted file mode 100644 index e1be8a6210..0000000000 --- a/changelog.d/5122.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the requirement to authenticate for /admin/server_version. diff --git a/changelog.d/5124.bugfix b/changelog.d/5124.bugfix deleted file mode 100644 index 46df1e9fd5..0000000000 --- a/changelog.d/5124.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add some missing limitations to room alias creation. diff --git a/changelog.d/5128.bugfix b/changelog.d/5128.bugfix deleted file mode 100644 index 46df1e9fd5..0000000000 --- a/changelog.d/5128.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add some missing limitations to room alias creation. diff --git a/changelog.d/5138.bugfix b/changelog.d/5138.bugfix deleted file mode 100644 index c1945a8eb2..0000000000 --- a/changelog.d/5138.bugfix +++ /dev/null @@ -1 +0,0 @@ -Limit the number of EDUs in transactions to 100 as expected by synapse. Thanks to @superboum for this work! diff --git a/changelog.d/5142.feature b/changelog.d/5142.feature deleted file mode 100644 index 54b228680d..0000000000 --- a/changelog.d/5142.feature +++ /dev/null @@ -1 +0,0 @@ -Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. diff --git a/changelog.d/5154.bugfix b/changelog.d/5154.bugfix deleted file mode 100644 index c7bd5ee6cf..0000000000 --- a/changelog.d/5154.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bogus imports in unit tests. diff --git a/changelog.d/5155.misc b/changelog.d/5155.misc deleted file mode 100644 index a81dbae67a..0000000000 --- a/changelog.d/5155.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent an exception from being raised in a IResolutionReceiver and use a more generic error message for blacklisted URL previews. \ No newline at end of file diff --git a/changelog.d/5170.misc b/changelog.d/5170.misc deleted file mode 100644 index 7919dac555..0000000000 --- a/changelog.d/5170.misc +++ /dev/null @@ -1 +0,0 @@ -Run `black` on the tests directory. diff --git a/changelog.d/5179.misc b/changelog.d/5179.misc deleted file mode 100644 index 92369cb2fc..0000000000 --- a/changelog.d/5179.misc +++ /dev/null @@ -1 +0,0 @@ -Fix CI after new release of isort. diff --git a/synapse/__init__.py b/synapse/__init__.py index 315fa96551..cd9cfb2409 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.3.2" +__version__ = "0.99.4rc1" From 5a4b328f522e9d08248dc03613fb0529f7529dbb Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 13 May 2019 11:05:06 -0700 Subject: [PATCH 115/429] Add ability to blacklist ip ranges for federation traffic (#5043) --- changelog.d/5043.feature | 1 + docs/sample_config.yaml | 18 +++++++ synapse/config/server.py | 38 ++++++++++++++ synapse/http/client.py | 6 +-- synapse/http/matrixfederationclient.py | 48 +++++++++++++---- tests/http/test_fedclient.py | 71 ++++++++++++++++++++++++++ 6 files changed, 168 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5043.feature diff --git a/changelog.d/5043.feature b/changelog.d/5043.feature new file mode 100644 index 0000000000..0f1e0ee30e --- /dev/null +++ b/changelog.d/5043.feature @@ -0,0 +1 @@ +Add ability to blacklist IP ranges for the federation client. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index bdfc34c6bd..c4e5c4cf39 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -115,6 +115,24 @@ pid_file: DATADIR/homeserver.pid # - nyc.example.com # - syd.example.com +# Prevent federation requests from being sent to the following +# blacklist IP address CIDR ranges. If this option is not specified, or +# specified with an empty list, no ip range blacklist will be enforced. +# +# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly +# listed here, since they correspond to unroutable addresses.) +# +federation_ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '169.254.0.0/16' + - '::1/128' + - 'fe80::/64' + - 'fc00::/7' + # List of ports that Synapse should listen on, their purpose and their # configuration. # diff --git a/synapse/config/server.py b/synapse/config/server.py index 8dce75c56a..7874cd9da7 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -17,6 +17,8 @@ import logging import os.path +from netaddr import IPSet + from synapse.http.endpoint import parse_and_validate_server_name from synapse.python_dependencies import DependencyException, check_requirements @@ -137,6 +139,24 @@ class ServerConfig(Config): for domain in federation_domain_whitelist: self.federation_domain_whitelist[domain] = True + self.federation_ip_range_blacklist = config.get( + "federation_ip_range_blacklist", [], + ) + + # Attempt to create an IPSet from the given ranges + try: + self.federation_ip_range_blacklist = IPSet( + self.federation_ip_range_blacklist + ) + + # Always blacklist 0.0.0.0, :: + self.federation_ip_range_blacklist.update(["0.0.0.0", "::"]) + except Exception as e: + raise ConfigError( + "Invalid range(s) provided in " + "federation_ip_range_blacklist: %s" % e + ) + if self.public_baseurl is not None: if self.public_baseurl[-1] != '/': self.public_baseurl += '/' @@ -386,6 +406,24 @@ class ServerConfig(Config): # - nyc.example.com # - syd.example.com + # Prevent federation requests from being sent to the following + # blacklist IP address CIDR ranges. If this option is not specified, or + # specified with an empty list, no ip range blacklist will be enforced. + # + # (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly + # listed here, since they correspond to unroutable addresses.) + # + federation_ip_range_blacklist: + - '127.0.0.0/8' + - '10.0.0.0/8' + - '172.16.0.0/12' + - '192.168.0.0/16' + - '100.64.0.0/10' + - '169.254.0.0/16' + - '::1/128' + - 'fe80::/64' + - 'fc00::/7' + # List of ports that Synapse should listen on, their purpose and their # configuration. # diff --git a/synapse/http/client.py b/synapse/http/client.py index ddbfb72228..77fe68818b 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -165,7 +165,8 @@ class BlacklistingAgentWrapper(Agent): ip_address, self._ip_whitelist, self._ip_blacklist ): logger.info( - "Blocking access to %s because of blacklist" % (ip_address,) + "Blocking access to %s due to blacklist" % + (ip_address,) ) e = SynapseError(403, "IP address blocked by IP blacklist entry") return defer.fail(Failure(e)) @@ -263,9 +264,6 @@ class SimpleHttpClient(object): uri (str): URI to query. data (bytes): Data to send in the request body, if applicable. headers (t.w.http_headers.Headers): Request headers. - - Raises: - SynapseError: If the IP is blacklisted. """ # A small wrapper around self.agent.request() so we can easily attach # counters to it diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index ff63d0b2a8..7eefc7b1fc 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -27,9 +27,11 @@ import treq from canonicaljson import encode_canonical_json from prometheus_client import Counter from signedjson.sign import sign_json +from zope.interface import implementer from twisted.internet import defer, protocol from twisted.internet.error import DNSLookupError +from twisted.internet.interfaces import IReactorPluggableNameResolver from twisted.internet.task import _EPSILON, Cooperator from twisted.web._newclient import ResponseDone from twisted.web.http_headers import Headers @@ -44,6 +46,7 @@ from synapse.api.errors import ( SynapseError, ) from synapse.http import QuieterFileBodyProducer +from synapse.http.client import BlacklistingAgentWrapper, IPBlacklistingResolver from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent from synapse.util.async_helpers import timeout_deferred from synapse.util.logcontext import make_deferred_yieldable @@ -172,19 +175,44 @@ class MatrixFederationHttpClient(object): self.hs = hs self.signing_key = hs.config.signing_key[0] self.server_name = hs.hostname - reactor = hs.get_reactor() + + real_reactor = hs.get_reactor() + + # We need to use a DNS resolver which filters out blacklisted IP + # addresses, to prevent DNS rebinding. + nameResolver = IPBlacklistingResolver( + real_reactor, None, hs.config.federation_ip_range_blacklist, + ) + + @implementer(IReactorPluggableNameResolver) + class Reactor(object): + def __getattr__(_self, attr): + if attr == "nameResolver": + return nameResolver + else: + return getattr(real_reactor, attr) + + self.reactor = Reactor() self.agent = MatrixFederationAgent( - hs.get_reactor(), + self.reactor, tls_client_options_factory, ) + + # Use a BlacklistingAgentWrapper to prevent circumventing the IP + # blacklist via IP literals in server names + self.agent = BlacklistingAgentWrapper( + self.agent, self.reactor, + ip_blacklist=hs.config.federation_ip_range_blacklist, + ) + self.clock = hs.get_clock() self._store = hs.get_datastore() self.version_string_bytes = hs.version_string.encode('ascii') self.default_timeout = 60 def schedule(x): - reactor.callLater(_EPSILON, x) + self.reactor.callLater(_EPSILON, x) self._cooperator = Cooperator(scheduler=schedule) @@ -370,7 +398,7 @@ class MatrixFederationHttpClient(object): request_deferred = timeout_deferred( request_deferred, timeout=_sec_timeout, - reactor=self.hs.get_reactor(), + reactor=self.reactor, ) response = yield request_deferred @@ -397,7 +425,7 @@ class MatrixFederationHttpClient(object): d = timeout_deferred( d, timeout=_sec_timeout, - reactor=self.hs.get_reactor(), + reactor=self.reactor, ) try: @@ -586,7 +614,7 @@ class MatrixFederationHttpClient(object): ) body = yield _handle_json_response( - self.hs.get_reactor(), self.default_timeout, request, response, + self.reactor, self.default_timeout, request, response, ) defer.returnValue(body) @@ -645,7 +673,7 @@ class MatrixFederationHttpClient(object): _sec_timeout = self.default_timeout body = yield _handle_json_response( - self.hs.get_reactor(), _sec_timeout, request, response, + self.reactor, _sec_timeout, request, response, ) defer.returnValue(body) @@ -704,7 +732,7 @@ class MatrixFederationHttpClient(object): ) body = yield _handle_json_response( - self.hs.get_reactor(), self.default_timeout, request, response, + self.reactor, self.default_timeout, request, response, ) defer.returnValue(body) @@ -753,7 +781,7 @@ class MatrixFederationHttpClient(object): ) body = yield _handle_json_response( - self.hs.get_reactor(), self.default_timeout, request, response, + self.reactor, self.default_timeout, request, response, ) defer.returnValue(body) @@ -801,7 +829,7 @@ class MatrixFederationHttpClient(object): try: d = _readBodyToFile(response, output_stream, max_size) - d.addTimeout(self.default_timeout, self.hs.get_reactor()) + d.addTimeout(self.default_timeout, self.reactor) length = yield make_deferred_yieldable(d) except Exception as e: logger.warn( diff --git a/tests/http/test_fedclient.py b/tests/http/test_fedclient.py index 279e456614..ee767f3a5a 100644 --- a/tests/http/test_fedclient.py +++ b/tests/http/test_fedclient.py @@ -15,6 +15,8 @@ from mock import Mock +from netaddr import IPSet + from twisted.internet import defer from twisted.internet.defer import TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError @@ -209,6 +211,75 @@ class FederationClientTests(HomeserverTestCase): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived) + def test_client_ip_range_blacklist(self): + """Ensure that Synapse does not try to connect to blacklisted IPs""" + + # Set up the ip_range blacklist + self.hs.config.federation_ip_range_blacklist = IPSet([ + "127.0.0.0/8", + "fe80::/64", + ]) + self.reactor.lookups["internal"] = "127.0.0.1" + self.reactor.lookups["internalv6"] = "fe80:0:0:0:0:8a2e:370:7337" + self.reactor.lookups["fine"] = "10.20.30.40" + cl = MatrixFederationHttpClient(self.hs, None) + + # Try making a GET request to a blacklisted IPv4 address + # ------------------------------------------------------ + # Make the request + d = cl.get_json("internal:8008", "foo/bar", timeout=10000) + + # Nothing happened yet + self.assertNoResult(d) + + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + f = self.failureResultOf(d) + self.assertIsInstance(f.value, RequestSendFailed) + self.assertIsInstance(f.value.inner_exception, DNSLookupError) + + # Try making a POST request to a blacklisted IPv6 address + # ------------------------------------------------------- + # Make the request + d = cl.post_json("internalv6:8008", "foo/bar", timeout=10000) + + # Nothing has happened yet + self.assertNoResult(d) + + # Move the reactor forwards + self.pump(1) + + # Check that it was unable to resolve the address + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 0) + + # Check that it was due to a blacklisted DNS lookup + f = self.failureResultOf(d, RequestSendFailed) + self.assertIsInstance(f.value.inner_exception, DNSLookupError) + + # Try making a GET request to a non-blacklisted IPv4 address + # ---------------------------------------------------------- + # Make the request + d = cl.post_json("fine:8008", "foo/bar", timeout=10000) + + # Nothing has happened yet + self.assertNoResult(d) + + # Move the reactor forwards + self.pump(1) + + # Check that it was able to resolve the address + clients = self.reactor.tcpClients + self.assertNotEqual(len(clients), 0) + + # Connection will still fail as this IP address does not resolve to anything + f = self.failureResultOf(d, RequestSendFailed) + self.assertIsInstance(f.value.inner_exception, ConnectingCancelledError) + def test_client_gets_headers(self): """ Once the client gets the headers, _request returns successfully. From df2ebd75d3abde2bc2262551d8b2fd40c4b4bddf Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 13 May 2019 15:01:14 -0500 Subject: [PATCH 116/429] Migrate all tests to use the dict-based config format instead of hanging items off HomeserverConfig (#5171) --- changelog.d/5171.misc | 1 + synapse/rest/media/v1/storage_provider.py | 1 + tests/handlers/test_register.py | 8 +- tests/handlers/test_user_directory.py | 4 +- .../test_matrix_federation_agent.py | 4 +- tests/push/test_email.py | 36 ++--- tests/push/test_http.py | 2 +- tests/rest/client/test_consent.py | 11 +- tests/rest/client/test_identity.py | 2 +- tests/rest/client/v1/test_directory.py | 2 +- tests/rest/client/v1/test_events.py | 6 +- tests/rest/client/v1/test_profile.py | 2 +- tests/rest/client/v1/test_rooms.py | 2 +- tests/rest/client/v2_alpha/test_auth.py | 6 +- tests/rest/client/v2_alpha/test_register.py | 51 ++++--- tests/rest/media/v1/test_media_storage.py | 17 +-- tests/rest/media/v1/test_url_preview.py | 40 +++--- tests/server.py | 61 ++++---- tests/server_notices/test_consent.py | 30 ++-- .../test_resource_limits_server_notices.py | 9 +- tests/test_state.py | 2 +- tests/unittest.py | 12 +- tests/utils.py | 130 +++++++++--------- 23 files changed, 238 insertions(+), 201 deletions(-) create mode 100644 changelog.d/5171.misc diff --git a/changelog.d/5171.misc b/changelog.d/5171.misc new file mode 100644 index 0000000000..d148b03b51 --- /dev/null +++ b/changelog.d/5171.misc @@ -0,0 +1 @@ +Update tests to consistently be configured via the same code that is used when loading from configuration files. diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 5aa03031f6..d90cbfb56a 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -108,6 +108,7 @@ class FileStorageProviderBackend(StorageProvider): """ def __init__(self, hs, config): + self.hs = hs self.cache_directory = hs.config.media_store_path self.base_directory = config diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 017ea0385e..1c253d0579 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -37,8 +37,12 @@ class RegistrationTestCase(unittest.HomeserverTestCase): hs_config = self.default_config("test") # some of the tests rely on us having a user consent version - hs_config.user_consent_version = "test_consent_version" - hs_config.max_mau_value = 50 + hs_config["user_consent"] = { + "version": "test_consent_version", + "template_dir": ".", + } + hs_config["max_mau_value"] = 50 + hs_config["limit_usage_by_mau"] = True hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) return hs diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 44468f5382..9021e647fe 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -37,7 +37,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config.update_user_directory = True + config["update_user_directory"] = True return self.setup_test_homeserver(config=config) def prepare(self, reactor, clock, hs): @@ -333,7 +333,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config.update_user_directory = True + config["update_user_directory"] = True hs = self.setup_test_homeserver(config=config) self.config = hs.config diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 7036615041..ed0ca079d9 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -54,7 +54,9 @@ class MatrixFederationAgentTests(TestCase): self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory(default_config("test")), + tls_client_options_factory=ClientTLSOptionsFactory( + default_config("test", parse=True) + ), _well_known_tls_policy=TrustingTLSPolicyForHTTPS(), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 325ea449ae..9cdde1a9bd 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -52,22 +52,26 @@ class EmailPusherTests(HomeserverTestCase): return d config = self.default_config() - config.email_enable_notifs = True - config.start_pushers = True - - config.email_template_dir = os.path.abspath( - pkg_resources.resource_filename('synapse', 'res/templates') - ) - config.email_notif_template_html = "notif_mail.html" - config.email_notif_template_text = "notif_mail.txt" - config.email_smtp_host = "127.0.0.1" - config.email_smtp_port = 20 - config.require_transport_security = False - config.email_smtp_user = None - config.email_smtp_pass = None - config.email_app_name = "Matrix" - config.email_notif_from = "test@example.com" - config.email_riot_base_url = None + config["email"] = { + "enable_notifs": True, + "template_dir": os.path.abspath( + pkg_resources.resource_filename('synapse', 'res/templates') + ), + "expiry_template_html": "notice_expiry.html", + "expiry_template_text": "notice_expiry.txt", + "notif_template_html": "notif_mail.html", + "notif_template_text": "notif_mail.txt", + "smtp_host": "127.0.0.1", + "smtp_port": 20, + "require_transport_security": False, + "smtp_user": None, + "smtp_pass": None, + "app_name": "Matrix", + "notif_from": "test@example.com", + "riot_base_url": None, + } + config["public_baseurl"] = "aaa" + config["start_pushers"] = True hs = self.setup_test_homeserver(config=config, sendmail=sendmail) diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 13bd2c8688..aba618b2be 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -54,7 +54,7 @@ class HTTPPusherTests(HomeserverTestCase): m.post_json_get_json = post_json_get_json config = self.default_config() - config.start_pushers = True + config["start_pushers"] = True hs = self.setup_test_homeserver(config=config, simple_http_client=m) diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index 5528971190..88f8f1abdc 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -42,15 +42,18 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config.user_consent_version = "1" - config.public_baseurl = "" - config.form_secret = "123abc" + config["public_baseurl"] = "aaaa" + config["form_secret"] = "123abc" # Make some temporary templates... temp_consent_path = self.mktemp() os.mkdir(temp_consent_path) os.mkdir(os.path.join(temp_consent_path, 'en')) - config.user_consent_template_dir = os.path.abspath(temp_consent_path) + + config["user_consent"] = { + "version": "1", + "template_dir": os.path.abspath(temp_consent_path), + } with open(os.path.join(temp_consent_path, "en/1.html"), 'w') as f: f.write("{{version}},{{has_consented}}") diff --git a/tests/rest/client/test_identity.py b/tests/rest/client/test_identity.py index 1a714ff58a..68949307d9 100644 --- a/tests/rest/client/test_identity.py +++ b/tests/rest/client/test_identity.py @@ -32,7 +32,7 @@ class IdentityTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config.enable_3pid_lookup = False + config["enable_3pid_lookup"] = False self.hs = self.setup_test_homeserver(config=config) return self.hs diff --git a/tests/rest/client/v1/test_directory.py b/tests/rest/client/v1/test_directory.py index 73c5b44b46..633b7dbda0 100644 --- a/tests/rest/client/v1/test_directory.py +++ b/tests/rest/client/v1/test_directory.py @@ -34,7 +34,7 @@ class DirectoryTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config.require_membership_for_aliases = True + config["require_membership_for_aliases"] = True self.hs = self.setup_test_homeserver(config=config) diff --git a/tests/rest/client/v1/test_events.py b/tests/rest/client/v1/test_events.py index 8a9a55a527..f340b7e851 100644 --- a/tests/rest/client/v1/test_events.py +++ b/tests/rest/client/v1/test_events.py @@ -36,9 +36,9 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config.enable_registration_captcha = False - config.enable_registration = True - config.auto_join_rooms = [] + config["enable_registration_captcha"] = False + config["enable_registration"] = True + config["auto_join_rooms"] = [] hs = self.setup_test_homeserver( config=config, ratelimiter=NonCallableMock(spec_set=["can_do_action"]) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index ed034879cf..769c37ce52 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -171,7 +171,7 @@ class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config.require_auth_for_profile_requests = True + config["require_auth_for_profile_requests"] = True self.hs = self.setup_test_homeserver(config=config) return self.hs diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 9b191436cc..6220172cde 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -919,7 +919,7 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): self.url = b"/_matrix/client/r0/publicRooms" config = self.default_config() - config.restrict_public_rooms_to_local_users = True + config["restrict_public_rooms_to_local_users"] = True self.hs = self.setup_test_homeserver(config=config) return self.hs diff --git a/tests/rest/client/v2_alpha/test_auth.py b/tests/rest/client/v2_alpha/test_auth.py index 0ca3c4657b..ad7d476401 100644 --- a/tests/rest/client/v2_alpha/test_auth.py +++ b/tests/rest/client/v2_alpha/test_auth.py @@ -36,9 +36,9 @@ class FallbackAuthTests(unittest.HomeserverTestCase): config = self.default_config() - config.enable_registration_captcha = True - config.recaptcha_public_key = "brokencake" - config.registrations_require_3pid = [] + config["enable_registration_captcha"] = True + config["recaptcha_public_key"] = "brokencake" + config["registrations_require_3pid"] = [] hs = self.setup_test_homeserver(config=config) return hs diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index be95dc592d..65685883db 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -201,9 +201,11 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() # Test for account expiring after a week. - config.enable_registration = True - config.account_validity.enabled = True - config.account_validity.period = 604800000 # Time in ms for 1 week + config["enable_registration"] = True + config["account_validity"] = { + "enabled": True, + "period": 604800000, # Time in ms for 1 week + } self.hs = self.setup_test_homeserver(config=config) return self.hs @@ -299,14 +301,17 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() + # Test for account expiring after a week and renewal emails being sent 2 # days before expiry. - config.enable_registration = True - config.account_validity.enabled = True - config.account_validity.renew_by_email_enabled = True - config.account_validity.period = 604800000 # Time in ms for 1 week - config.account_validity.renew_at = 172800000 # Time in ms for 2 days - config.account_validity.renew_email_subject = "Renew your account" + config["enable_registration"] = True + config["account_validity"] = { + "enabled": True, + "period": 604800000, # Time in ms for 1 week + "renew_at": 172800000, # Time in ms for 2 days + "renew_by_email_enabled": True, + "renew_email_subject": "Renew your account", + } # Email config. self.email_attempts = [] @@ -315,17 +320,23 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): self.email_attempts.append((args, kwargs)) return - config.email_template_dir = os.path.abspath( - pkg_resources.resource_filename('synapse', 'res/templates') - ) - config.email_expiry_template_html = "notice_expiry.html" - config.email_expiry_template_text = "notice_expiry.txt" - config.email_smtp_host = "127.0.0.1" - config.email_smtp_port = 20 - config.require_transport_security = False - config.email_smtp_user = None - config.email_smtp_pass = None - config.email_notif_from = "test@example.com" + config["email"] = { + "enable_notifs": True, + "template_dir": os.path.abspath( + pkg_resources.resource_filename('synapse', 'res/templates') + ), + "expiry_template_html": "notice_expiry.html", + "expiry_template_text": "notice_expiry.txt", + "notif_template_html": "notif_mail.html", + "notif_template_text": "notif_mail.txt", + "smtp_host": "127.0.0.1", + "smtp_port": 20, + "require_transport_security": False, + "smtp_user": None, + "smtp_pass": None, + "notif_from": "test@example.com", + } + config["public_baseurl"] = "aaa" self.hs = self.setup_test_homeserver(config=config, sendmail=sendmail) diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index ad5e9a612f..1069a44145 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -25,13 +25,11 @@ from six.moves.urllib import parse from twisted.internet import defer, reactor from twisted.internet.defer import Deferred -from synapse.config.repository import MediaStorageProviderConfig from synapse.rest.media.v1._base import FileInfo from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.rest.media.v1.media_storage import MediaStorage from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend from synapse.util.logcontext import make_deferred_yieldable -from synapse.util.module_loader import load_module from tests import unittest @@ -120,12 +118,14 @@ class MediaRepoTests(unittest.HomeserverTestCase): client.get_file = get_file self.storage_path = self.mktemp() + self.media_store_path = self.mktemp() os.mkdir(self.storage_path) + os.mkdir(self.media_store_path) config = self.default_config() - config.media_store_path = self.storage_path - config.thumbnail_requirements = {} - config.max_image_pixels = 2000000 + config["media_store_path"] = self.media_store_path + config["thumbnail_requirements"] = {} + config["max_image_pixels"] = 2000000 provider_config = { "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", @@ -134,12 +134,7 @@ class MediaRepoTests(unittest.HomeserverTestCase): "store_remote": True, "config": {"directory": self.storage_path}, } - - loaded = list(load_module(provider_config)) + [ - MediaStorageProviderConfig(False, False, False) - ] - - config.media_storage_providers = [loaded] + config["media_storage_providers"] = [provider_config] hs = self.setup_test_homeserver(config=config, http_client=client) diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index f696395f3c..1ab0f7293a 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -16,7 +16,6 @@ import os import attr -from netaddr import IPSet from twisted.internet._resolver import HostResolution from twisted.internet.address import IPv4Address, IPv6Address @@ -25,9 +24,6 @@ from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol from twisted.web._newclient import ResponseDone -from synapse.config.repository import MediaStorageProviderConfig -from synapse.util.module_loader import load_module - from tests import unittest from tests.server import FakeTransport @@ -67,23 +63,23 @@ class URLPreviewTests(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): - self.storage_path = self.mktemp() - os.mkdir(self.storage_path) - config = self.default_config() - config.url_preview_enabled = True - config.max_spider_size = 9999999 - config.url_preview_ip_range_blacklist = IPSet( - ( - "192.168.1.1", - "1.0.0.0/8", - "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", - "2001:800::/21", - ) + config["url_preview_enabled"] = True + config["max_spider_size"] = 9999999 + config["url_preview_ip_range_blacklist"] = ( + "192.168.1.1", + "1.0.0.0/8", + "3fff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", + "2001:800::/21", ) - config.url_preview_ip_range_whitelist = IPSet(("1.1.1.1",)) - config.url_preview_url_blacklist = [] - config.media_store_path = self.storage_path + config["url_preview_ip_range_whitelist"] = ("1.1.1.1",) + config["url_preview_url_blacklist"] = [] + + self.storage_path = self.mktemp() + self.media_store_path = self.mktemp() + os.mkdir(self.storage_path) + os.mkdir(self.media_store_path) + config["media_store_path"] = self.media_store_path provider_config = { "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", @@ -93,11 +89,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): "config": {"directory": self.storage_path}, } - loaded = list(load_module(provider_config)) + [ - MediaStorageProviderConfig(False, False, False) - ] - - config.media_storage_providers = [loaded] + config["media_storage_providers"] = [provider_config] hs = self.setup_test_homeserver(config=config) diff --git a/tests/server.py b/tests/server.py index fc41345488..c15a47f2a4 100644 --- a/tests/server.py +++ b/tests/server.py @@ -227,6 +227,8 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): """ def __init__(self): + self.threadpool = ThreadPool(self) + self._udp = [] lookups = self.lookups = {} @@ -255,6 +257,37 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): self.callLater(0, d.callback, True) return d + def getThreadPool(self): + return self.threadpool + + +class ThreadPool: + """ + Threadless thread pool. + """ + + def __init__(self, reactor): + self._reactor = reactor + + def start(self): + pass + + def stop(self): + pass + + def callInThreadWithCallback(self, onResult, function, *args, **kwargs): + def _(res): + if isinstance(res, Failure): + onResult(False, res) + else: + onResult(True, res) + + d = Deferred() + d.addCallback(lambda x: function(*args, **kwargs)) + d.addBoth(_) + self._reactor.callLater(0, d.callback, True) + return d + def setup_test_homeserver(cleanup_func, *args, **kwargs): """ @@ -290,36 +323,10 @@ def setup_test_homeserver(cleanup_func, *args, **kwargs): **kwargs ) - class ThreadPool: - """ - Threadless thread pool. - """ - - def start(self): - pass - - def stop(self): - pass - - def callInThreadWithCallback(self, onResult, function, *args, **kwargs): - def _(res): - if isinstance(res, Failure): - onResult(False, res) - else: - onResult(True, res) - - d = Deferred() - d.addCallback(lambda x: function(*args, **kwargs)) - d.addBoth(_) - clock._reactor.callLater(0, d.callback, True) - return d - - clock.threadpool = ThreadPool() - if pool: pool.runWithConnection = runWithConnection pool.runInteraction = runInteraction - pool.threadpool = ThreadPool() + pool.threadpool = ThreadPool(clock._reactor) pool.running = True return d diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index e0b4e0eb63..872039c8f1 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -12,6 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +import os + import synapse.rest.admin from synapse.rest.client.v1 import login, room from synapse.rest.client.v2_alpha import sync @@ -30,20 +33,27 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): + tmpdir = self.mktemp() + os.mkdir(tmpdir) self.consent_notice_message = "consent %(consent_uri)s" config = self.default_config() - config.user_consent_version = "1" - config.user_consent_server_notice_content = { - "msgtype": "m.text", - "body": self.consent_notice_message, + config["user_consent"] = { + "version": "1", + "template_dir": tmpdir, + "server_notice_content": { + "msgtype": "m.text", + "body": self.consent_notice_message, + }, } - config.public_baseurl = "https://example.com/" - config.form_secret = "123abc" + config["public_baseurl"] = "https://example.com/" + config["form_secret"] = "123abc" - config.server_notices_mxid = "@notices:test" - config.server_notices_mxid_display_name = "test display name" - config.server_notices_mxid_avatar_url = None - config.server_notices_room_name = "Server Notices" + config["server_notices"] = { + "system_mxid_localpart": "notices", + "system_mxid_display_name": "test display name", + "system_mxid_avatar_url": None, + "room_name": "Server Notices", + } hs = self.setup_test_homeserver(config=config) diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index a490b81ed4..739ee59ce4 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -29,7 +29,12 @@ from tests import unittest class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): hs_config = self.default_config("test") - hs_config.server_notices_mxid = "@server:test" + hs_config["server_notices"] = { + "system_mxid_localpart": "server", + "system_mxid_display_name": "test display name", + "system_mxid_avatar_url": None, + "room_name": "Server Notices", + } hs = self.setup_test_homeserver(config=hs_config, expire_access_token=True) return hs @@ -79,7 +84,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): self._send_notice.assert_not_called() # Test when mau limiting disabled self.hs.config.hs_disabled = False - self.hs.limit_usage_by_mau = False + self.hs.config.limit_usage_by_mau = False self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_not_called() diff --git a/tests/test_state.py b/tests/test_state.py index 5bcc6aaa18..6491a7105a 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -168,7 +168,7 @@ class StateTestCase(unittest.TestCase): "get_state_resolution_handler", ] ) - hs.config = default_config("tesths") + hs.config = default_config("tesths", True) hs.get_datastore.return_value = self.store hs.get_state_handler.return_value = None hs.get_clock.return_value = MockClock() diff --git a/tests/unittest.py b/tests/unittest.py index 94df8cf47e..26204470b1 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -27,6 +27,7 @@ import twisted.logger from twisted.internet.defer import Deferred from twisted.trial import unittest +from synapse.config.homeserver import HomeServerConfig from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest from synapse.server import HomeServer @@ -245,7 +246,7 @@ class HomeserverTestCase(TestCase): def default_config(self, name="test"): """ - Get a default HomeServer config object. + Get a default HomeServer config dict. Args: name (str): The homeserver name/domain. @@ -335,7 +336,14 @@ class HomeserverTestCase(TestCase): kwargs.update(self._hs_args) if "config" not in kwargs: config = self.default_config() - kwargs["config"] = config + else: + config = kwargs["config"] + + # Parse the config from a config dict into a HomeServerConfig + config_obj = HomeServerConfig() + config_obj.parse_config_dict(config) + kwargs["config"] = config_obj + hs = setup_test_homeserver(self.addCleanup, *args, **kwargs) stor = hs.get_datastore() diff --git a/tests/utils.py b/tests/utils.py index c2ef4b0bb5..f21074ae28 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -110,7 +110,7 @@ def setupdb(): atexit.register(_cleanup) -def default_config(name): +def default_config(name, parse=False): """ Create a reasonable test config. """ @@ -121,75 +121,69 @@ def default_config(name): # the test signing key is just an arbitrary ed25519 key to keep the config # parser happy "signing_key": "ed25519 a_lPym qvioDNmfExFBRPgdTU+wtFYKq4JfwFRv7sYVgWvmgJg", + "event_cache_size": 1, + "enable_registration": True, + "enable_registration_captcha": False, + "macaroon_secret_key": "not even a little secret", + "expire_access_token": False, + "trusted_third_party_id_servers": [], + "room_invite_state_types": [], + "password_providers": [], + "worker_replication_url": "", + "worker_app": None, + "email_enable_notifs": False, + "block_non_admin_invites": False, + "federation_domain_whitelist": None, + "federation_rc_reject_limit": 10, + "federation_rc_sleep_limit": 10, + "federation_rc_sleep_delay": 100, + "federation_rc_concurrent": 10, + "filter_timeline_limit": 5000, + "user_directory_search_all_users": False, + "user_consent_server_notice_content": None, + "block_events_without_consent_error": None, + "user_consent_at_registration": False, + "user_consent_policy_name": "Privacy Policy", + "media_storage_providers": [], + "autocreate_auto_join_rooms": True, + "auto_join_rooms": [], + "limit_usage_by_mau": False, + "hs_disabled": False, + "hs_disabled_message": "", + "hs_disabled_limit_type": "", + "max_mau_value": 50, + "mau_trial_days": 0, + "mau_stats_only": False, + "mau_limits_reserved_threepids": [], + "admin_contact": None, + "rc_message": {"per_second": 10000, "burst_count": 10000}, + "rc_registration": {"per_second": 10000, "burst_count": 10000}, + "rc_login": { + "address": {"per_second": 10000, "burst_count": 10000}, + "account": {"per_second": 10000, "burst_count": 10000}, + "failed_attempts": {"per_second": 10000, "burst_count": 10000}, + }, + "saml2_enabled": False, + "public_baseurl": None, + "default_identity_server": None, + "key_refresh_interval": 24 * 60 * 60 * 1000, + "old_signing_keys": {}, + "tls_fingerprints": [], + "use_frozen_dicts": False, + # We need a sane default_room_version, otherwise attempts to create + # rooms will fail. + "default_room_version": "1", + # disable user directory updates, because they get done in the + # background, which upsets the test runner. + "update_user_directory": False, } - config = HomeServerConfig() - config.parse_config_dict(config_dict) + if parse: + config = HomeServerConfig() + config.parse_config_dict(config_dict) + return config - # TODO: move this stuff into config_dict or get rid of it - config.event_cache_size = 1 - config.enable_registration = True - config.enable_registration_captcha = False - config.macaroon_secret_key = "not even a little secret" - config.expire_access_token = False - config.trusted_third_party_id_servers = [] - config.room_invite_state_types = [] - config.password_providers = [] - config.worker_replication_url = "" - config.worker_app = None - config.email_enable_notifs = False - config.block_non_admin_invites = False - config.federation_domain_whitelist = None - config.federation_rc_reject_limit = 10 - config.federation_rc_sleep_limit = 10 - config.federation_rc_sleep_delay = 100 - config.federation_rc_concurrent = 10 - config.filter_timeline_limit = 5000 - config.user_directory_search_all_users = False - config.user_consent_server_notice_content = None - config.block_events_without_consent_error = None - config.user_consent_at_registration = False - config.user_consent_policy_name = "Privacy Policy" - config.media_storage_providers = [] - config.autocreate_auto_join_rooms = True - config.auto_join_rooms = [] - config.limit_usage_by_mau = False - config.hs_disabled = False - config.hs_disabled_message = "" - config.hs_disabled_limit_type = "" - config.max_mau_value = 50 - config.mau_trial_days = 0 - config.mau_stats_only = False - config.mau_limits_reserved_threepids = [] - config.admin_contact = None - config.rc_messages_per_second = 10000 - config.rc_message_burst_count = 10000 - config.rc_registration.per_second = 10000 - config.rc_registration.burst_count = 10000 - config.rc_login_address.per_second = 10000 - config.rc_login_address.burst_count = 10000 - config.rc_login_account.per_second = 10000 - config.rc_login_account.burst_count = 10000 - config.rc_login_failed_attempts.per_second = 10000 - config.rc_login_failed_attempts.burst_count = 10000 - config.saml2_enabled = False - config.public_baseurl = None - config.default_identity_server = None - config.key_refresh_interval = 24 * 60 * 60 * 1000 - config.old_signing_keys = {} - config.tls_fingerprints = [] - - config.use_frozen_dicts = False - - # we need a sane default_room_version, otherwise attempts to create rooms will - # fail. - config.default_room_version = "1" - - # disable user directory updates, because they get done in the - # background, which upsets the test runner. - config.update_user_directory = False - - return config + return config_dict class TestHomeServer(HomeServer): @@ -223,7 +217,7 @@ def setup_test_homeserver( from twisted.internet import reactor if config is None: - config = default_config(name) + config = default_config(name, parse=True) config.ldap_enabled = False From b54b03f9e1abc1964fe5f00115a165a2b8e10df5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 9 May 2019 13:21:57 +0100 Subject: [PATCH 117/429] Allow client event serialization to be async --- synapse/events/utils.py | 44 +++++++++++++++++ synapse/handlers/events.py | 8 ++-- synapse/handlers/initial_sync.py | 44 ++++++++++------- synapse/handlers/message.py | 7 +-- synapse/handlers/pagination.py | 22 +++++---- synapse/handlers/search.py | 42 +++++++++-------- synapse/rest/client/v1/events.py | 5 +- synapse/rest/client/v1/room.py | 29 +++++++----- synapse/rest/client/v2_alpha/notifications.py | 10 ++-- synapse/rest/client/v2_alpha/sync.py | 47 ++++++++++--------- synapse/server.py | 5 ++ synapse/util/async_helpers.py | 19 ++++++++ 12 files changed, 187 insertions(+), 95 deletions(-) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 07fccdd8f9..a5454556cc 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -19,7 +19,10 @@ from six import string_types from frozendict import frozendict +from twisted.internet import defer + from synapse.api.constants import EventTypes +from synapse.util.async_helpers import yieldable_gather_results from . import EventBase @@ -311,3 +314,44 @@ def serialize_event(e, time_now_ms, as_client_event=True, d = only_fields(d, only_event_fields) return d + + +class EventClientSerializer(object): + """Serializes events that are to be sent to clients. + + This is used for bundling extra information with any events to be sent to + clients. + """ + + def __init__(self, hs): + pass + + def serialize_event(self, event, time_now, **kwargs): + """Serializes a single event. + + Args: + event (EventBase) + time_now (int): The current time in milliseconds + **kwargs: Arguments to pass to `serialize_event` + + Returns: + Deferred[dict]: The serialized event + """ + event = serialize_event(event, time_now, **kwargs) + return defer.succeed(event) + + def serialize_events(self, events, time_now, **kwargs): + """Serializes multiple events. + + Args: + event (iter[EventBase]) + time_now (int): The current time in milliseconds + **kwargs: Arguments to pass to `serialize_event` + + Returns: + Deferred[list[dict]]: The list of serialized events + """ + return yieldable_gather_results( + self.serialize_event, events, + time_now=time_now, **kwargs + ) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 1b4d8c74ae..6003ad9cca 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -21,7 +21,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, SynapseError from synapse.events import EventBase -from synapse.events.utils import serialize_event from synapse.types import UserID from synapse.util.logutils import log_function from synapse.visibility import filter_events_for_client @@ -50,6 +49,7 @@ class EventStreamHandler(BaseHandler): self.notifier = hs.get_notifier() self.state = hs.get_state_handler() self._server_notices_sender = hs.get_server_notices_sender() + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks @log_function @@ -120,9 +120,9 @@ class EventStreamHandler(BaseHandler): time_now = self.clock.time_msec() - chunks = [ - serialize_event(e, time_now, as_client_event) for e in events - ] + chunks = yield self._event_serializer.serialize_events( + events, time_now, as_client_event=as_client_event, + ) chunk = { "chunk": chunks, diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 7dfae78db0..aaee5db0b7 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -19,7 +19,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, SynapseError -from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.handlers.presence import format_user_presence_state from synapse.streams.config import PaginationConfig @@ -43,6 +42,7 @@ class InitialSyncHandler(BaseHandler): self.clock = hs.get_clock() self.validator = EventValidator() self.snapshot_cache = SnapshotCache() + self._event_serializer = hs.get_event_client_serializer() def snapshot_all_rooms(self, user_id=None, pagin_config=None, as_client_event=True, include_archived=False): @@ -138,7 +138,9 @@ class InitialSyncHandler(BaseHandler): d["inviter"] = event.sender invite_event = yield self.store.get_event(event.event_id) - d["invite"] = serialize_event(invite_event, time_now, as_client_event) + d["invite"] = yield self._event_serializer.serialize_event( + invite_event, time_now, as_client_event, + ) rooms_ret.append(d) @@ -185,18 +187,21 @@ class InitialSyncHandler(BaseHandler): time_now = self.clock.time_msec() d["messages"] = { - "chunk": [ - serialize_event(m, time_now, as_client_event) - for m in messages - ], + "chunk": ( + yield self._event_serializer.serialize_events( + messages, time_now=time_now, + as_client_event=as_client_event, + ) + ), "start": start_token.to_string(), "end": end_token.to_string(), } - d["state"] = [ - serialize_event(c, time_now, as_client_event) - for c in current_state.values() - ] + d["state"] = yield self._event_serializer.serialize_events( + current_state.values(), + time_now=time_now, + as_client_event=as_client_event + ) account_data_events = [] tags = tags_by_room.get(event.room_id) @@ -337,11 +342,15 @@ class InitialSyncHandler(BaseHandler): "membership": membership, "room_id": room_id, "messages": { - "chunk": [serialize_event(m, time_now) for m in messages], + "chunk": (yield self._event_serializer.serialize_events( + messages, time_now, + )), "start": start_token.to_string(), "end": end_token.to_string(), }, - "state": [serialize_event(s, time_now) for s in room_state.values()], + "state": (yield self._event_serializer.serialize_events( + room_state.values(), time_now, + )), "presence": [], "receipts": [], }) @@ -355,10 +364,9 @@ class InitialSyncHandler(BaseHandler): # TODO: These concurrently time_now = self.clock.time_msec() - state = [ - serialize_event(x, time_now) - for x in current_state.values() - ] + state = yield self._event_serializer.serialize_events( + current_state.values(), time_now, + ) now_token = yield self.hs.get_event_sources().get_current_token() @@ -425,7 +433,9 @@ class InitialSyncHandler(BaseHandler): ret = { "room_id": room_id, "messages": { - "chunk": [serialize_event(m, time_now) for m in messages], + "chunk": (yield self._event_serializer.serialize_events( + messages, time_now, + )), "start": start_token.to_string(), "end": end_token.to_string(), }, diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e5afeadf68..7b2c33a922 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -32,7 +32,6 @@ from synapse.api.errors import ( ) from synapse.api.room_versions import RoomVersions from synapse.api.urls import ConsentURIBuilder -from synapse.events.utils import serialize_event from synapse.events.validator import EventValidator from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.storage.state import StateFilter @@ -57,6 +56,7 @@ class MessageHandler(object): self.clock = hs.get_clock() self.state = hs.get_state_handler() self.store = hs.get_datastore() + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks def get_room_data(self, user_id=None, room_id=None, @@ -164,9 +164,10 @@ class MessageHandler(object): room_state = room_state[membership_event_id] now = self.clock.time_msec() - defer.returnValue( - [serialize_event(c, now) for c in room_state.values()] + events = yield self._event_serializer.serialize_events( + room_state.values(), now, ) + defer.returnValue(events) @defer.inlineCallbacks def get_joined_members(self, requester, room_id): diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index e4fdae9266..8f811e24fe 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -20,7 +20,6 @@ from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError -from synapse.events.utils import serialize_event from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import ReadWriteLock @@ -78,6 +77,7 @@ class PaginationHandler(object): self._purges_in_progress_by_room = set() # map from purge id to PurgeStatus self._purges_by_id = {} + self._event_serializer = hs.get_event_client_serializer() def start_purge_history(self, room_id, token, delete_local_events=False): @@ -278,18 +278,22 @@ class PaginationHandler(object): time_now = self.clock.time_msec() chunk = { - "chunk": [ - serialize_event(e, time_now, as_client_event) - for e in events - ], + "chunk": ( + yield self._event_serializer.serialize_events( + events, time_now, + as_client_event=as_client_event, + ) + ), "start": pagin_config.from_token.to_string(), "end": next_token.to_string(), } if state: - chunk["state"] = [ - serialize_event(e, time_now, as_client_event) - for e in state - ] + chunk["state"] = ( + yield self._event_serializer.serialize_events( + state, time_now, + as_client_event=as_client_event, + ) + ) defer.returnValue(chunk) diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 49c439313e..9bba74d6c9 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -23,7 +23,6 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter -from synapse.events.utils import serialize_event from synapse.storage.state import StateFilter from synapse.visibility import filter_events_for_client @@ -36,6 +35,7 @@ class SearchHandler(BaseHandler): def __init__(self, hs): super(SearchHandler, self).__init__(hs) + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks def get_old_rooms_from_upgraded_room(self, room_id): @@ -401,14 +401,16 @@ class SearchHandler(BaseHandler): time_now = self.clock.time_msec() for context in contexts.values(): - context["events_before"] = [ - serialize_event(e, time_now) - for e in context["events_before"] - ] - context["events_after"] = [ - serialize_event(e, time_now) - for e in context["events_after"] - ] + context["events_before"] = ( + yield self._event_serializer.serialize_events( + context["events_before"], time_now, + ) + ) + context["events_after"] = ( + yield self._event_serializer.serialize_events( + context["events_after"], time_now, + ) + ) state_results = {} if include_state: @@ -422,14 +424,13 @@ class SearchHandler(BaseHandler): # We're now about to serialize the events. We should not make any # blocking calls after this. Otherwise the 'age' will be wrong - results = [ - { + results = [] + for e in allowed_events: + results.append({ "rank": rank_map[e.event_id], - "result": serialize_event(e, time_now), + "result": (yield self._event_serializer.serialize_event(e, time_now)), "context": contexts.get(e.event_id, {}), - } - for e in allowed_events - ] + }) rooms_cat_res = { "results": results, @@ -438,10 +439,13 @@ class SearchHandler(BaseHandler): } if state_results: - rooms_cat_res["state"] = { - room_id: [serialize_event(e, time_now) for e in state] - for room_id, state in state_results.items() - } + s = {} + for room_id, state in state_results.items(): + s[room_id] = yield self._event_serializer.serialize_events( + state, time_now, + ) + + rooms_cat_res["state"] = s if room_groups and "room_id" in group_keys: rooms_cat_res.setdefault("groups", {})["room_id"] = room_groups diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index cd9b3bdbd1..c3b0a39ab7 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -19,7 +19,6 @@ import logging from twisted.internet import defer from synapse.api.errors import SynapseError -from synapse.events.utils import serialize_event from synapse.streams.config import PaginationConfig from .base import ClientV1RestServlet, client_path_patterns @@ -84,6 +83,7 @@ class EventRestServlet(ClientV1RestServlet): super(EventRestServlet, self).__init__(hs) self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks def on_GET(self, request, event_id): @@ -92,7 +92,8 @@ class EventRestServlet(ClientV1RestServlet): time_now = self.clock.time_msec() if event: - defer.returnValue((200, serialize_event(event, time_now))) + event = yield self._event_serializer.serialize_event(event, time_now) + defer.returnValue((200, event)) else: defer.returnValue((404, "Event not found.")) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index fab04965cb..255a85c588 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -26,7 +26,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, Membership from synapse.api.errors import AuthError, Codes, SynapseError from synapse.api.filtering import Filter -from synapse.events.utils import format_event_for_client_v2, serialize_event +from synapse.events.utils import format_event_for_client_v2 from synapse.http.servlet import ( assert_params_in_dict, parse_integer, @@ -537,6 +537,7 @@ class RoomEventServlet(ClientV1RestServlet): super(RoomEventServlet, self).__init__(hs) self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -545,7 +546,8 @@ class RoomEventServlet(ClientV1RestServlet): time_now = self.clock.time_msec() if event: - defer.returnValue((200, serialize_event(event, time_now))) + event = yield self._event_serializer.serialize_event(event, time_now) + defer.returnValue((200, event)) else: defer.returnValue((404, "Event not found.")) @@ -559,6 +561,7 @@ class RoomEventContextServlet(ClientV1RestServlet): super(RoomEventContextServlet, self).__init__(hs) self.clock = hs.get_clock() self.room_context_handler = hs.get_room_context_handler() + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -588,16 +591,18 @@ class RoomEventContextServlet(ClientV1RestServlet): ) time_now = self.clock.time_msec() - results["events_before"] = [ - serialize_event(event, time_now) for event in results["events_before"] - ] - results["event"] = serialize_event(results["event"], time_now) - results["events_after"] = [ - serialize_event(event, time_now) for event in results["events_after"] - ] - results["state"] = [ - serialize_event(event, time_now) for event in results["state"] - ] + results["events_before"] = yield self._event_serializer.serialize_events( + results["events_before"], time_now, + ) + results["event"] = yield self._event_serializer.serialize_event( + results["event"], time_now, + ) + results["events_after"] = yield self._event_serializer.serialize_events( + results["events_after"], time_now, + ) + results["state"] = yield self._event_serializer.serialize_events( + results["state"], time_now, + ) defer.returnValue((200, results)) diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 2a6ea3df5f..0a1eb0ae45 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -17,10 +17,7 @@ import logging from twisted.internet import defer -from synapse.events.utils import ( - format_event_for_client_v2_without_room_id, - serialize_event, -) +from synapse.events.utils import format_event_for_client_v2_without_room_id from synapse.http.servlet import RestServlet, parse_integer, parse_string from ._base import client_v2_patterns @@ -36,6 +33,7 @@ class NotificationsServlet(RestServlet): self.store = hs.get_datastore() self.auth = hs.get_auth() self.clock = hs.get_clock() + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks def on_GET(self, request): @@ -69,11 +67,11 @@ class NotificationsServlet(RestServlet): "profile_tag": pa["profile_tag"], "actions": pa["actions"], "ts": pa["received_ts"], - "event": serialize_event( + "event": (yield self._event_serializer.serialize_event( notif_events[pa["event_id"]], self.clock.time_msec(), event_format=format_event_for_client_v2_without_room_id, - ), + )), } if pa["room_id"] not in receipts_by_room: diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 39d157a44b..078d65969a 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -26,7 +26,6 @@ from synapse.api.filtering import DEFAULT_FILTER_COLLECTION, FilterCollection from synapse.events.utils import ( format_event_for_client_v2_without_room_id, format_event_raw, - serialize_event, ) from synapse.handlers.presence import format_user_presence_state from synapse.handlers.sync import SyncConfig @@ -86,6 +85,7 @@ class SyncRestServlet(RestServlet): self.filtering = hs.get_filtering() self.presence_handler = hs.get_presence_handler() self._server_notices_sender = hs.get_server_notices_sender() + self._event_serializer = hs.get_event_client_serializer() @defer.inlineCallbacks def on_GET(self, request): @@ -168,14 +168,14 @@ class SyncRestServlet(RestServlet): ) time_now = self.clock.time_msec() - response_content = self.encode_response( + response_content = yield self.encode_response( time_now, sync_result, requester.access_token_id, filter ) defer.returnValue((200, response_content)) - @staticmethod - def encode_response(time_now, sync_result, access_token_id, filter): + @defer.inlineCallbacks + def encode_response(self, time_now, sync_result, access_token_id, filter): if filter.event_format == 'client': event_formatter = format_event_for_client_v2_without_room_id elif filter.event_format == 'federation': @@ -183,18 +183,18 @@ class SyncRestServlet(RestServlet): else: raise Exception("Unknown event format %s" % (filter.event_format, )) - joined = SyncRestServlet.encode_joined( + joined = yield self.encode_joined( sync_result.joined, time_now, access_token_id, filter.event_fields, event_formatter, ) - invited = SyncRestServlet.encode_invited( + invited = yield self.encode_invited( sync_result.invited, time_now, access_token_id, event_formatter, ) - archived = SyncRestServlet.encode_archived( + archived = yield self.encode_archived( sync_result.archived, time_now, access_token_id, filter.event_fields, event_formatter, @@ -239,8 +239,8 @@ class SyncRestServlet(RestServlet): ] } - @staticmethod - def encode_joined(rooms, time_now, token_id, event_fields, event_formatter): + @defer.inlineCallbacks + def encode_joined(self, rooms, time_now, token_id, event_fields, event_formatter): """ Encode the joined rooms in a sync result @@ -261,15 +261,15 @@ class SyncRestServlet(RestServlet): """ joined = {} for room in rooms: - joined[room.room_id] = SyncRestServlet.encode_room( + joined[room.room_id] = yield self.encode_room( room, time_now, token_id, joined=True, only_fields=event_fields, event_formatter=event_formatter, ) return joined - @staticmethod - def encode_invited(rooms, time_now, token_id, event_formatter): + @defer.inlineCallbacks + def encode_invited(self, rooms, time_now, token_id, event_formatter): """ Encode the invited rooms in a sync result @@ -289,7 +289,7 @@ class SyncRestServlet(RestServlet): """ invited = {} for room in rooms: - invite = serialize_event( + invite = yield self._event_serializer.serialize_event( room.invite, time_now, token_id=token_id, event_format=event_formatter, is_invite=True, @@ -304,8 +304,8 @@ class SyncRestServlet(RestServlet): return invited - @staticmethod - def encode_archived(rooms, time_now, token_id, event_fields, event_formatter): + @defer.inlineCallbacks + def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatter): """ Encode the archived rooms in a sync result @@ -326,7 +326,7 @@ class SyncRestServlet(RestServlet): """ joined = {} for room in rooms: - joined[room.room_id] = SyncRestServlet.encode_room( + joined[room.room_id] = yield self.encode_room( room, time_now, token_id, joined=False, only_fields=event_fields, event_formatter=event_formatter, @@ -334,9 +334,9 @@ class SyncRestServlet(RestServlet): return joined - @staticmethod + @defer.inlineCallbacks def encode_room( - room, time_now, token_id, joined, + self, room, time_now, token_id, joined, only_fields, event_formatter, ): """ @@ -355,9 +355,10 @@ class SyncRestServlet(RestServlet): Returns: dict[str, object]: the room, encoded in our response format """ - def serialize(event): - return serialize_event( - event, time_now, token_id=token_id, + def serialize(events): + return self._event_serializer.serialize_events( + events, time_now=time_now, + token_id=token_id, event_format=event_formatter, only_event_fields=only_fields, ) @@ -376,8 +377,8 @@ class SyncRestServlet(RestServlet): event.event_id, room.room_id, event.room_id, ) - serialized_state = [serialize(e) for e in state_events] - serialized_timeline = [serialize(e) for e in timeline_events] + serialized_state = yield serialize(state_events) + serialized_timeline = yield serialize(timeline_events) account_data = room.account_data diff --git a/synapse/server.py b/synapse/server.py index 8c30ac2fa5..80d40b9272 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -35,6 +35,7 @@ from synapse.crypto import context_factory from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory from synapse.events.spamcheck import SpamChecker +from synapse.events.utils import EventClientSerializer from synapse.federation.federation_client import FederationClient from synapse.federation.federation_server import ( FederationHandlerRegistry, @@ -185,6 +186,7 @@ class HomeServer(object): 'sendmail', 'registration_handler', 'account_validity_handler', + 'event_client_serializer', ] REQUIRED_ON_MASTER_STARTUP = [ @@ -511,6 +513,9 @@ class HomeServer(object): def build_account_validity_handler(self): return AccountValidityHandler(self) + def build_event_client_serializer(self): + return EventClientSerializer(self) + def remove_pusher(self, app_id, push_key, user_id): return self.get_pusherpool().remove_pusher(app_id, push_key, user_id) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 2f16f23d91..9a17dfdab2 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -156,6 +156,25 @@ def concurrently_execute(func, args, limit): ], consumeErrors=True)).addErrback(unwrapFirstError) +def yieldable_gather_results(func, iter, *args, **kwargs): + """Executes the function with each argument concurrently. + + Args: + func (func): Function to execute that returns a Deferred + iter (iter): An iterable that yields items that get passed as the first + argument to the function + *args: Arguments to be passed to each call to func + + Returns + Deferred: Resolved when all functions have been invoked, or errors if + one of the function calls fails. + """ + return logcontext.make_deferred_yieldable(defer.gatherResults([ + run_in_background(func, item, *args, **kwargs) + for item in iter + ], consumeErrors=True)).addErrback(unwrapFirstError) + + class Linearizer(object): """Limits concurrent access to resources based on a key. Useful to ensure only a few things happen at a time on a given resource. From a80e6b53f903d75b053546db5d99c93bffc4495e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 11:58:45 +0100 Subject: [PATCH 118/429] Newsfile --- changelog.d/5183.misc | 1 + synapse/rest/client/v2_alpha/sync.py | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/5183.misc diff --git a/changelog.d/5183.misc b/changelog.d/5183.misc new file mode 100644 index 0000000000..a8970f29eb --- /dev/null +++ b/changelog.d/5183.misc @@ -0,0 +1 @@ +Allow client event serialization to be async. diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index 078d65969a..c701e534e7 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -200,7 +200,7 @@ class SyncRestServlet(RestServlet): event_formatter, ) - return { + defer.returnValue({ "account_data": {"events": sync_result.account_data}, "to_device": {"events": sync_result.to_device}, "device_lists": { @@ -222,7 +222,7 @@ class SyncRestServlet(RestServlet): }, "device_one_time_keys_count": sync_result.device_one_time_keys_count, "next_batch": sync_result.next_batch.to_string(), - } + }) @staticmethod def encode_presence(events, time_now): @@ -266,7 +266,7 @@ class SyncRestServlet(RestServlet): event_formatter=event_formatter, ) - return joined + defer.returnValue(joined) @defer.inlineCallbacks def encode_invited(self, rooms, time_now, token_id, event_formatter): @@ -302,7 +302,7 @@ class SyncRestServlet(RestServlet): "invite_state": {"events": invited_state} } - return invited + defer.returnValue(invited) @defer.inlineCallbacks def encode_archived(self, rooms, time_now, token_id, event_fields, event_formatter): @@ -332,7 +332,7 @@ class SyncRestServlet(RestServlet): event_formatter=event_formatter, ) - return joined + defer.returnValue(joined) @defer.inlineCallbacks def encode_room( @@ -398,7 +398,7 @@ class SyncRestServlet(RestServlet): result["unread_notifications"] = room.unread_notifications result["summary"] = room.summary - return result + defer.returnValue(result) def register_servlets(hs, http_server): From 4fb44fb5b9f0043ab7bfbc0aa7251c92680cc639 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 13:37:44 +0100 Subject: [PATCH 119/429] Expose DataStore._get_events as get_events_as_list This is in preparation for reaction work which requires it. --- synapse/storage/appservice.py | 4 +-- synapse/storage/event_federation.py | 6 ++-- synapse/storage/events_worker.py | 50 ++++++++++++++++++++--------- synapse/storage/search.py | 4 +-- synapse/storage/stream.py | 18 +++++++---- tests/storage/test_appservice.py | 2 +- 6 files changed, 54 insertions(+), 30 deletions(-) diff --git a/synapse/storage/appservice.py b/synapse/storage/appservice.py index 6092f600ba..eb329ebd8b 100644 --- a/synapse/storage/appservice.py +++ b/synapse/storage/appservice.py @@ -302,7 +302,7 @@ class ApplicationServiceTransactionWorkerStore( event_ids = json.loads(entry["event_ids"]) - events = yield self._get_events(event_ids) + events = yield self.get_events_as_list(event_ids) defer.returnValue( AppServiceTransaction(service=service, id=entry["txn_id"], events=events) @@ -358,7 +358,7 @@ class ApplicationServiceTransactionWorkerStore( "get_new_events_for_appservice", get_new_events_for_appservice_txn ) - events = yield self._get_events(event_ids) + events = yield self.get_events_as_list(event_ids) defer.returnValue((upper_bound, events)) diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 956f876572..09e39c2c28 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -45,7 +45,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas """ return self.get_auth_chain_ids( event_ids, include_given=include_given - ).addCallback(self._get_events) + ).addCallback(self.get_events_as_list) def get_auth_chain_ids(self, event_ids, include_given=False): """Get auth events for given event_ids. The events *must* be state events. @@ -316,7 +316,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas event_list, limit, ) - .addCallback(self._get_events) + .addCallback(self.get_events_as_list) .addCallback(lambda l: sorted(l, key=lambda e: -e.depth)) ) @@ -382,7 +382,7 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas latest_events, limit, ) - events = yield self._get_events(ids) + events = yield self.get_events_as_list(ids) defer.returnValue(events) def _get_missing_events(self, txn, room_id, earliest_events, latest_events, limit): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 663991a9b6..5c40056d11 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -103,7 +103,7 @@ class EventsWorkerStore(SQLBaseStore): Returns: Deferred : A FrozenEvent. """ - events = yield self._get_events( + events = yield self.get_events_as_list( [event_id], check_redacted=check_redacted, get_prev_content=get_prev_content, @@ -142,7 +142,7 @@ class EventsWorkerStore(SQLBaseStore): Returns: Deferred : Dict from event_id to event. """ - events = yield self._get_events( + events = yield self.get_events_as_list( event_ids, check_redacted=check_redacted, get_prev_content=get_prev_content, @@ -152,13 +152,32 @@ class EventsWorkerStore(SQLBaseStore): defer.returnValue({e.event_id: e for e in events}) @defer.inlineCallbacks - def _get_events( + def get_events_as_list( self, event_ids, check_redacted=True, get_prev_content=False, allow_rejected=False, ): + """Get events from the database and return in a list in the same order + as given by `event_ids` arg. + + Args: + event_ids (list): The event_ids of the events to fetch + check_redacted (bool): If True, check if event has been redacted + and redact it. + get_prev_content (bool): If True and event is a state event, + include the previous states content in the unsigned field. + allow_rejected (bool): If True return rejected events. + + Returns: + Deferred[list]: List of events fetched from the database. The + events are in the same order as `event_ids` arg. + + Note that the returned list may be smaller than the list of event + IDs if not all events could be fetched. + """ + if not event_ids: defer.returnValue([]) @@ -202,21 +221,22 @@ class EventsWorkerStore(SQLBaseStore): # # The problem is that we end up at this point when an event # which has been redacted is pulled out of the database by - # _enqueue_events, because _enqueue_events needs to check the - # redaction before it can cache the redacted event. So obviously, - # calling get_event to get the redacted event out of the database - # gives us an infinite loop. + # _enqueue_events, because _enqueue_events needs to check + # the redaction before it can cache the redacted event. So + # obviously, calling get_event to get the redacted event out + # of the database gives us an infinite loop. # - # For now (quick hack to fix during 0.99 release cycle), we just - # go and fetch the relevant row from the db, but it would be nice - # to think about how we can cache this rather than hit the db - # every time we access a redaction event. + # For now (quick hack to fix during 0.99 release cycle), we + # just go and fetch the relevant row from the db, but it + # would be nice to think about how we can cache this rather + # than hit the db every time we access a redaction event. # # One thought on how to do this: - # 1. split _get_events up so that it is divided into (a) get the - # rawish event from the db/cache, (b) do the redaction/rejection - # filtering - # 2. have _get_event_from_row just call the first half of that + # 1. split get_events_as_list up so that it is divided into + # (a) get the rawish event from the db/cache, (b) do the + # redaction/rejection filtering + # 2. have _get_event_from_row just call the first half of + # that orig_sender = yield self._simple_select_one_onecol( table="events", diff --git a/synapse/storage/search.py b/synapse/storage/search.py index 226f8f1b7e..ff49eaae02 100644 --- a/synapse/storage/search.py +++ b/synapse/storage/search.py @@ -460,7 +460,7 @@ class SearchStore(BackgroundUpdateStore): results = list(filter(lambda row: row["room_id"] in room_ids, results)) - events = yield self._get_events([r["event_id"] for r in results]) + events = yield self.get_events_as_list([r["event_id"] for r in results]) event_map = {ev.event_id: ev for ev in events} @@ -605,7 +605,7 @@ class SearchStore(BackgroundUpdateStore): results = list(filter(lambda row: row["room_id"] in room_ids, results)) - events = yield self._get_events([r["event_id"] for r in results]) + events = yield self.get_events_as_list([r["event_id"] for r in results]) event_map = {ev.event_id: ev for ev in events} diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 9cd1e0f9fe..d105b6b17d 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -319,7 +319,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): rows = yield self.runInteraction("get_room_events_stream_for_room", f) - ret = yield self._get_events([r.event_id for r in rows], get_prev_content=True) + ret = yield self.get_events_as_list([ + r.event_id for r in rows], get_prev_content=True, + ) self._set_before_and_after(ret, rows, topo_order=from_id is None) @@ -367,7 +369,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): rows = yield self.runInteraction("get_membership_changes_for_user", f) - ret = yield self._get_events([r.event_id for r in rows], get_prev_content=True) + ret = yield self.get_events_as_list( + [r.event_id for r in rows], get_prev_content=True, + ) self._set_before_and_after(ret, rows, topo_order=False) @@ -394,7 +398,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) logger.debug("stream before") - events = yield self._get_events( + events = yield self.get_events_as_list( [r.event_id for r in rows], get_prev_content=True ) logger.debug("stream after") @@ -580,11 +584,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event_filter, ) - events_before = yield self._get_events( + events_before = yield self.get_events_as_list( [e for e in results["before"]["event_ids"]], get_prev_content=True ) - events_after = yield self._get_events( + events_after = yield self.get_events_as_list( [e for e in results["after"]["event_ids"]], get_prev_content=True ) @@ -697,7 +701,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): "get_all_new_events_stream", get_all_new_events_stream_txn ) - events = yield self._get_events(event_ids) + events = yield self.get_events_as_list(event_ids) defer.returnValue((upper_bound, events)) @@ -849,7 +853,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): event_filter, ) - events = yield self._get_events( + events = yield self.get_events_as_list( [r.event_id for r in rows], get_prev_content=True ) diff --git a/tests/storage/test_appservice.py b/tests/storage/test_appservice.py index 3f0083831b..25a6c89ef5 100644 --- a/tests/storage/test_appservice.py +++ b/tests/storage/test_appservice.py @@ -340,7 +340,7 @@ class ApplicationServiceTransactionStoreTestCase(unittest.TestCase): other_events = [Mock(event_id="e5"), Mock(event_id="e6")] # we aren't testing store._base stuff here, so mock this out - self.store._get_events = Mock(return_value=events) + self.store.get_events_as_list = Mock(return_value=events) yield self._insert_txn(self.as_list[1]["id"], 9, other_events) yield self._insert_txn(service.id, 10, events) From 53788a447ffc9a2b35dbb0a43e7ac881fc075b17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 13:41:36 +0100 Subject: [PATCH 120/429] Newsfile --- changelog.d/5184.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5184.misc diff --git a/changelog.d/5184.misc b/changelog.d/5184.misc new file mode 100644 index 0000000000..1588bdef6c --- /dev/null +++ b/changelog.d/5184.misc @@ -0,0 +1 @@ +Expose DataStore._get_events as get_events_as_list. From dc4f6d1b0104bb6d736deaec454640681bd31360 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 14:37:40 +0100 Subject: [PATCH 121/429] Use correct config option for ratelimiting in tests --- tests/utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/utils.py b/tests/utils.py index f21074ae28..f38533a0c7 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -156,7 +156,8 @@ def default_config(name, parse=False): "mau_stats_only": False, "mau_limits_reserved_threepids": [], "admin_contact": None, - "rc_message": {"per_second": 10000, "burst_count": 10000}, + "rc_messages_per_second": 10000, + "rc_message_burst_count": 10000, "rc_registration": {"per_second": 10000, "burst_count": 10000}, "rc_login": { "address": {"per_second": 10000, "burst_count": 10000}, From db3046f5657af0f4f512b7f9aeb0d424808c8663 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 14:39:27 +0100 Subject: [PATCH 122/429] Newsfile --- changelog.d/5185.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5185.misc diff --git a/changelog.d/5185.misc b/changelog.d/5185.misc new file mode 100644 index 0000000000..d148b03b51 --- /dev/null +++ b/changelog.d/5185.misc @@ -0,0 +1 @@ +Update tests to consistently be configured via the same code that is used when loading from configuration files. From daa2fb63179345b993f213f126d9f7bc019f3281 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 14 May 2019 18:53:09 +0100 Subject: [PATCH 123/429] comment about user_joined_room --- synapse/util/distributor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/util/distributor.py b/synapse/util/distributor.py index 194da87639..e14c8bdfda 100644 --- a/synapse/util/distributor.py +++ b/synapse/util/distributor.py @@ -27,6 +27,7 @@ def user_left_room(distributor, user, room_id): distributor.fire("user_left_room", user=user, room_id=room_id) +# XXX: this is no longer used. We should probably kill it. def user_joined_room(distributor, user, room_id): distributor.fire("user_joined_room", user=user, room_id=room_id) From 6ca88c469373a7ef345d05a4b69afe810e240183 Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 14 May 2019 19:04:59 +0100 Subject: [PATCH 124/429] Only check 3pids not in use when registering We checked that 3pids were not already in use before we checked if we were going to return the account previously registered in the same UI auth session, in which case the 3pids will definitely be in use. https://github.com/vector-im/riot-web/issues/9586 --- synapse/rest/client/v2_alpha/register.py | 40 +++++++++++++----------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index dc3e265bcd..ecec610859 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -391,13 +391,6 @@ class RegisterRestServlet(RestServlet): # the user-facing checks will probably already have happened in # /register/email/requestToken when we requested a 3pid, but that's not # guaranteed. - # - # Also check that we're not trying to register a 3pid that's already - # been registered. - # - # This has probably happened in /register/email/requestToken as well, - # but if a user hits this endpoint twice then clicks on each link from - # the two activation emails, they would register the same 3pid twice. if auth_result: for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: @@ -413,17 +406,6 @@ class RegisterRestServlet(RestServlet): Codes.THREEPID_DENIED, ) - existingUid = yield self.store.get_user_id_by_threepid( - medium, address, - ) - - if existingUid is not None: - raise SynapseError( - 400, - "%s is already in use" % medium, - Codes.THREEPID_IN_USE, - ) - if registered_user_id is not None: logger.info( "Already registered user ID %r for this session", @@ -446,6 +428,28 @@ class RegisterRestServlet(RestServlet): if auth_result: threepid = auth_result.get(LoginType.EMAIL_IDENTITY) + # Also check that we're not trying to register a 3pid that's already + # been registered. + # + # This has probably happened in /register/email/requestToken as well, + # but if a user hits this endpoint twice then clicks on each link from + # the two activation emails, they would register the same 3pid twice. + for login_type in [LoginType.EMAIL_IDENTITY, LoginType.MSISDN]: + if login_type in auth_result: + medium = auth_result[login_type]['medium'] + address = auth_result[login_type]['address'] + + existingUid = yield self.store.get_user_id_by_threepid( + medium, address, + ) + + if existingUid is not None: + raise SynapseError( + 400, + "%s is already in use" % medium, + Codes.THREEPID_IN_USE, + ) + (registered_user_id, _) = yield self.registration_handler.register( localpart=desired_username, password=new_password, From efefb5bda2364de6c205a8e3fa2e8be95b2ae89e Mon Sep 17 00:00:00 2001 From: David Baker Date: Tue, 14 May 2019 19:18:42 +0100 Subject: [PATCH 125/429] Have I got newsfile for you --- changelog.d/5187.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5187.bugfix diff --git a/changelog.d/5187.bugfix b/changelog.d/5187.bugfix new file mode 100644 index 0000000000..03b985265f --- /dev/null +++ b/changelog.d/5187.bugfix @@ -0,0 +1 @@ +Fix register endpoint returning a previously registered account. From 52ddc6c0ed40dfc15e8e4abd383a2a5fc12fcfec Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 May 2019 09:52:15 +0100 Subject: [PATCH 126/429] Update docstring with correct type Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/events_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 5c40056d11..adc6cf26b5 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -171,7 +171,7 @@ class EventsWorkerStore(SQLBaseStore): allow_rejected (bool): If True return rejected events. Returns: - Deferred[list]: List of events fetched from the database. The + Deferred[list[EventBase]]: List of events fetched from the database. The events are in the same order as `event_ids` arg. Note that the returned list may be smaller than the list of event From 8ed2f182f74b5a99d643dd0ee5bad34211cd1311 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 May 2019 09:52:52 +0100 Subject: [PATCH 127/429] Update docstring with correct return type Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/util/async_helpers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 9a17dfdab2..7253ba120f 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -166,7 +166,7 @@ def yieldable_gather_results(func, iter, *args, **kwargs): *args: Arguments to be passed to each call to func Returns - Deferred: Resolved when all functions have been invoked, or errors if + Deferred[list]: Resolved when all functions have been invoked, or errors if one of the function calls fails. """ return logcontext.make_deferred_yieldable(defer.gatherResults([ From 54d77107c1fde88333ecccedfe30f26fb7645847 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 May 2019 11:27:50 +0100 Subject: [PATCH 128/429] Make generating SQL bounds for pagination generic This will allow us to reuse the same structure when we paginate e.g. relations --- synapse/storage/stream.py | 175 +++++++++++++++++++++++++------------- 1 file changed, 116 insertions(+), 59 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index d105b6b17d..163363c0c2 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -64,59 +64,120 @@ _EventDictReturn = namedtuple( ) -def lower_bound(token, engine, inclusive=False): - inclusive = "=" if inclusive else "" - if token.topological is None: - return "(%d <%s %s)" % (token.stream, inclusive, "stream_ordering") - else: - if isinstance(engine, PostgresEngine): - # Postgres doesn't optimise ``(x < a) OR (x=a AND y= (topological_ordering, stream_ordering) + AND (5, 3) < (topological_ordering, stream_ordering) + + would be generated for dir=b, from_token=(6, 7) and to_token=(5, 3). + + Args: + direction (str): Whether we're paginating backwards("b") or + forwards ("f"). + column_names (tuple[str, str]): The column names to bound. Must *not* + be user defined as these get inserted directly into the SQL + statement without escapes. + from_token (tuple[int, int]|None) + to_token (tuple[int, int]|None) + engine: The database engine to generate the clauses for + + Returns: + str: The sql expression + """ + assert direction in ("b", "f") + + where_clause = [] + if from_token: + where_clause.append( + _make_generic_sql_bound( + bound=">=" if direction == "b" else "<", + column_names=column_names, + values=from_token, + engine=engine, ) - return "(%d < %s OR (%d = %s AND %d <%s %s))" % ( - token.topological, - "topological_ordering", - token.topological, - "topological_ordering", - token.stream, - inclusive, - "stream_ordering", ) - -def upper_bound(token, engine, inclusive=True): - inclusive = "=" if inclusive else "" - if token.topological is None: - return "(%d >%s %s)" % (token.stream, inclusive, "stream_ordering") - else: - if isinstance(engine, PostgresEngine): - # Postgres doesn't optimise ``(x > a) OR (x=a AND y>b)`` as well - # as it optimises ``(x,y) > (a,b)`` on multicolumn indexes. So we - # use the later form when running against postgres. - return "((%d,%d) >%s (%s,%s))" % ( - token.topological, - token.stream, - inclusive, - "topological_ordering", - "stream_ordering", + if to_token: + where_clause.append( + _make_generic_sql_bound( + bound="<" if direction == "b" else ">=", + column_names=column_names, + values=to_token, + engine=engine, ) - return "(%d > %s OR (%d = %s AND %d >%s %s))" % ( - token.topological, - "topological_ordering", - token.topological, - "topological_ordering", - token.stream, - inclusive, - "stream_ordering", ) + return " AND ".join(where_clause) + + +def _make_generic_sql_bound(bound, column_names, values, engine): + """Create an SQL expression that bounds the given column names by the + values, e.g. create the equivalent of `(1, 2) < (col1, col2)`. + + Only works with two columns. + + Older versions of SQLite don't support that syntax so we have to expand it + out manually. + + Args: + bound (str): The comparison operator to use. One of ">", "<", ">=", + "<=", where the values are on the left and columns on the right. + names (tuple[str, str]): The column names. Must *not* be user defined + as these get inserted directly into the SQL statement without + escapes. + values (tuple[int, int]): The values to bound the columns by. + engine: The database engine to generate the SQL for + + Returns: + str + """ + + assert(bound in (">", "<", ">=", "<=")) + + name1, name2 = column_names + val1, val2 = values + + if val1 is None: + val2 = int(val2) + return "(%d %s %s)" % (val2, bound, name2) + + val1 = int(val1) + val2 = int(val2) + + if isinstance(engine, PostgresEngine): + # Postgres doesn't optimise ``(x < a) OR (x=a AND y Date: Wed, 15 May 2019 11:33:22 +0100 Subject: [PATCH 129/429] Newsfile --- changelog.d/5191.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5191.misc diff --git a/changelog.d/5191.misc b/changelog.d/5191.misc new file mode 100644 index 0000000000..e0615fec9c --- /dev/null +++ b/changelog.d/5191.misc @@ -0,0 +1 @@ +Make generating SQL bounds for pagination generic. From efe3c7977a3dc9c4308388f71e82b837df7d09b4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 16:59:21 +0100 Subject: [PATCH 130/429] Add simple send_relation API and track in DB --- synapse/api/constants.py | 8 ++ synapse/rest/__init__.py | 2 + synapse/rest/client/v2_alpha/relations.py | 110 ++++++++++++++++++ synapse/storage/__init__.py | 2 + synapse/storage/events.py | 2 + synapse/storage/relations.py | 62 ++++++++++ synapse/storage/schema/delta/54/relations.sql | 27 +++++ tests/rest/client/v2_alpha/test_relations.py | 98 ++++++++++++++++ 8 files changed, 311 insertions(+) create mode 100644 synapse/rest/client/v2_alpha/relations.py create mode 100644 synapse/storage/relations.py create mode 100644 synapse/storage/schema/delta/54/relations.sql create mode 100644 tests/rest/client/v2_alpha/test_relations.py diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 8547a63535..30bebd749f 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -116,3 +116,11 @@ class UserTypes(object): """ SUPPORT = "support" ALL_USER_TYPES = (SUPPORT,) + + +class RelationTypes(object): + """The types of relations known to this server. + """ + ANNOTATION = "m.annotation" + REPLACES = "m.replaces" + REFERENCES = "m.references" diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 3a24d31d1b..e6110ad9b1 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -44,6 +44,7 @@ from synapse.rest.client.v2_alpha import ( read_marker, receipts, register, + relations, report_event, room_keys, room_upgrade_rest_servlet, @@ -115,6 +116,7 @@ class ClientRestResource(JsonResource): room_upgrade_rest_servlet.register_servlets(hs, client_resource) capabilities.register_servlets(hs, client_resource) account_validity.register_servlets(hs, client_resource) + relations.register_servlets(hs, client_resource) # moving to /_synapse/admin synapse.rest.admin.register_servlets_for_client_rest_resource( diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py new file mode 100644 index 0000000000..b504b4a8be --- /dev/null +++ b/synapse/rest/client/v2_alpha/relations.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This class implements the proposed relation APIs from MSC 1849. + +Since the MSC has not been approved all APIs here are unstable and may change at +any time to reflect changes in the MSC. +""" + +import logging + +from twisted.internet import defer + +from synapse.api.constants import EventTypes +from synapse.api.errors import SynapseError +from synapse.http.servlet import ( + RestServlet, + parse_json_object_from_request, + parse_string, +) + +from ._base import client_v2_patterns + +logger = logging.getLogger(__name__) + + +class RelationSendServlet(RestServlet): + """Helper API for sending events that have relation data. + + Example API shape to send a 👍 reaction to a room: + + POST /rooms/!foo/send_relation/$bar/m.annotation/m.reaction?key=%F0%9F%91%8D + {} + + { + "event_id": "$foobar" + } + """ + + PATTERN = ( + "/rooms/(?P[^/]*)/send_relation" + "/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)" + ) + + def __init__(self, hs): + super(RelationSendServlet, self).__init__() + self.auth = hs.get_auth() + self.event_creation_handler = hs.get_event_creation_handler() + + def register(self, http_server): + http_server.register_paths( + "POST", + client_v2_patterns(self.PATTERN + "$", releases=()), + self.on_PUT_or_POST, + ) + http_server.register_paths( + "PUT", + client_v2_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), + self.on_PUT_or_POST, + ) + + @defer.inlineCallbacks + def on_PUT_or_POST( + self, request, room_id, parent_id, relation_type, event_type, txn_id=None + ): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + + if event_type == EventTypes.Member: + # Add relations to a membership is meaningless, so we just deny it + # at the CS API rather than trying to handle it correctly. + raise SynapseError(400, "Cannot send member events with relations") + + content = parse_json_object_from_request(request) + + aggregation_key = parse_string(request, "key", encoding="utf-8") + + content["m.relates_to"] = { + "event_id": parent_id, + "key": aggregation_key, + "rel_type": relation_type, + } + + event_dict = { + "type": event_type, + "content": content, + "room_id": room_id, + "sender": requester.user.to_string(), + } + + event = yield self.event_creation_handler.create_and_send_nonmember_event( + requester, event_dict=event_dict, txn_id=txn_id + ) + + defer.returnValue((200, {"event_id": event.event_id})) + + +def register_servlets(hs, http_server): + RelationSendServlet(hs).register(http_server) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index c432041b4e..7522d3fd57 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -49,6 +49,7 @@ from .pusher import PusherStore from .receipts import ReceiptsStore from .registration import RegistrationStore from .rejections import RejectionsStore +from .relations import RelationsStore from .room import RoomStore from .roommember import RoomMemberStore from .search import SearchStore @@ -99,6 +100,7 @@ class DataStore( GroupServerStore, UserErasureStore, MonthlyActiveUsersStore, + RelationsStore, ): def __init__(self, db_conn, hs): self.hs = hs diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 7a7f841c6c..6802bf42ce 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1351,6 +1351,8 @@ class EventsStore( # Insert into the event_search table. self._store_guest_access_txn(txn, event) + self._handle_event_relations(txn, event) + # Insert into the room_memberships table. self._store_room_members_txn( txn, diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py new file mode 100644 index 0000000000..a4905162e0 --- /dev/null +++ b/synapse/storage/relations.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.api.constants import RelationTypes +from synapse.storage._base import SQLBaseStore + +logger = logging.getLogger(__name__) + + +class RelationsStore(SQLBaseStore): + def _handle_event_relations(self, txn, event): + """Handles inserting relation data during peristence of events + + Args: + txn + event (EventBase) + """ + relation = event.content.get("m.relates_to") + if not relation: + # No relations + return + + rel_type = relation.get("rel_type") + if rel_type not in ( + RelationTypes.ANNOTATION, + RelationTypes.REFERENCES, + RelationTypes.REPLACES, + ): + # Unknown relation type + return + + parent_id = relation.get("event_id") + if not parent_id: + # Invalid relation + return + + aggregation_key = relation.get("key") + + self._simple_insert_txn( + txn, + table="event_relations", + values={ + "event_id": event.event_id, + "relates_to_id": parent_id, + "relation_type": rel_type, + "aggregation_key": aggregation_key, + }, + ) diff --git a/synapse/storage/schema/delta/54/relations.sql b/synapse/storage/schema/delta/54/relations.sql new file mode 100644 index 0000000000..134862b870 --- /dev/null +++ b/synapse/storage/schema/delta/54/relations.sql @@ -0,0 +1,27 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Tracks related events, like reactions, replies, edits, etc. Note that things +-- in this table are not necessarily "valid", e.g. it may contain edits from +-- people who don't have power to edit other peoples events. +CREATE TABLE IF NOT EXISTS event_relations ( + event_id TEXT NOT NULL, + relates_to_id TEXT NOT NULL, + relation_type TEXT NOT NULL, + aggregation_key TEXT +); + +CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id); +CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key); diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py new file mode 100644 index 0000000000..61163d5b26 --- /dev/null +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -0,0 +1,98 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import six + +from synapse.api.constants import EventTypes, RelationTypes +from synapse.rest.client.v1 import login, room +from synapse.rest.client.v2_alpha import relations + +from tests import unittest + + +class RelationsTestCase(unittest.HomeserverTestCase): + user_id = "@alice:test" + servlets = [ + relations.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + self.room = self.helper.create_room_as(self.user_id) + res = self.helper.send(self.room, body="Hi!") + self.parent_id = res["event_id"] + + def test_send_relation(self): + """Tests that sending a relation using the new /send_relation works + creates the right shape of event. + """ + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + self.assertEquals(200, channel.code, channel.json_body) + + event_id = channel.json_body["event_id"] + + request, channel = self.make_request( + "GET", "/rooms/%s/event/%s" % (self.room, event_id) + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assert_dict( + { + "type": "m.reaction", + "sender": self.user_id, + "content": { + "m.relates_to": { + "event_id": self.parent_id, + "key": u"👍", + "rel_type": RelationTypes.ANNOTATION, + } + }, + }, + channel.json_body, + ) + + def test_deny_membership(self): + """Test that we deny relations on membership events + """ + channel = self._send_relation(RelationTypes.ANNOTATION, EventTypes.Member) + self.assertEquals(400, channel.code, channel.json_body) + + def _send_relation(self, relation_type, event_type, key=None): + """Helper function to send a relation pointing at `self.parent_id` + + Args: + relation_type (str): One of `RelationTypes` + event_type (str): The type of the event to create + key (str|None): The aggregation key used for m.annotation relation + type. + + Returns: + FakeChannel + """ + query = "" + if key: + query = "?key=" + six.moves.urllib.parse.quote_plus(key) + + request, channel = self.make_request( + "POST", + "/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s" + % (self.room, self.parent_id, relation_type, event_type, query), + b"{}", + ) + self.render(request) + return channel From b50641e357f6139b0807df3714440a8ccc21d628 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 16:59:21 +0100 Subject: [PATCH 131/429] Add simple pagination API --- synapse/rest/client/v2_alpha/relations.py | 50 ++++++++++++ synapse/storage/relations.py | 80 ++++++++++++++++++++ tests/rest/client/v2_alpha/test_relations.py | 30 ++++++++ 3 files changed, 160 insertions(+) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index b504b4a8be..bac9e85c21 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -27,6 +27,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.http.servlet import ( RestServlet, + parse_integer, parse_json_object_from_request, parse_string, ) @@ -106,5 +107,54 @@ class RelationSendServlet(RestServlet): defer.returnValue((200, {"event_id": event.event_id})) +class RelationPaginationServlet(RestServlet): + """API to paginate relations on an event by topological ordering, optionally + filtered by relation type and event type. + """ + + PATTERNS = client_v2_patterns( + "/rooms/(?P[^/]*)/relations/(?P[^/]*)" + "(/(?P[^/]*)(/(?P[^/]*))?)?$", + releases=(), + ) + + def __init__(self, hs): + super(RelationPaginationServlet, self).__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.clock = hs.get_clock() + self._event_serializer = hs.get_event_client_serializer() + + @defer.inlineCallbacks + def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=None): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + + yield self.auth.check_in_room_or_world_readable( + room_id, requester.user.to_string() + ) + + limit = parse_integer(request, "limit", default=5) + + result = yield self.store.get_relations_for_event( + event_id=parent_id, + relation_type=relation_type, + event_type=event_type, + limit=limit, + ) + + events = yield self.store.get_events_as_list( + [c["event_id"] for c in result.chunk] + ) + + now = self.clock.time_msec() + events = yield self._event_serializer.serialize_events(events, now) + + return_value = result.to_dict() + return_value["chunk"] = events + + defer.returnValue((200, return_value)) + + def register_servlets(hs, http_server): RelationSendServlet(hs).register(http_server) + RelationPaginationServlet(hs).register(http_server) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index a4905162e0..f304b8a9a4 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -15,13 +15,93 @@ import logging +import attr + from synapse.api.constants import RelationTypes from synapse.storage._base import SQLBaseStore logger = logging.getLogger(__name__) +@attr.s +class PaginationChunk(object): + """Returned by relation pagination APIs. + + Attributes: + chunk (list): The rows returned by pagination + """ + + chunk = attr.ib() + + def to_dict(self): + d = {"chunk": self.chunk} + + return d + + class RelationsStore(SQLBaseStore): + def get_relations_for_event( + self, event_id, relation_type=None, event_type=None, limit=5, direction="b" + ): + """Get a list of relations for an event, ordered by topological ordering. + + Args: + event_id (str): Fetch events that relate to this event ID. + relation_type (str|None): Only fetch events with this relation + type, if given. + event_type (str|None): Only fetch events with this event type, if + given. + limit (int): Only fetch the most recent `limit` events. + direction (str): Whether to fetch the most recent first (`"b"`) or + the oldest first (`"f"`). + + Returns: + Deferred[PaginationChunk]: List of event IDs that match relations + requested. The rows are of the form `{"event_id": "..."}`. + """ + + # TODO: Pagination tokens + + where_clause = ["relates_to_id = ?"] + where_args = [event_id] + + if relation_type: + where_clause.append("relation_type = ?") + where_args.append(relation_type) + + if event_type: + where_clause.append("type = ?") + where_args.append(event_type) + + order = "ASC" + if direction == "b": + order = "DESC" + + sql = """ + SELECT event_id FROM event_relations + INNER JOIN events USING (event_id) + WHERE %s + ORDER BY topological_ordering %s, stream_ordering %s + LIMIT ? + """ % ( + " AND ".join(where_clause), + order, + order, + ) + + def _get_recent_references_for_event_txn(txn): + txn.execute(sql, where_args + [limit + 1]) + + events = [{"event_id": row[0]} for row in txn] + + return PaginationChunk( + chunk=list(events[:limit]), + ) + + return self.runInteraction( + "get_recent_references_for_event", _get_recent_references_for_event_txn + ) + def _handle_event_relations(self, txn, event): """Handles inserting relation data during peristence of events diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 61163d5b26..bcc1c1bb85 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -72,6 +72,36 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, EventTypes.Member) self.assertEquals(400, channel.code, channel.json_body) + def test_paginate(self): + """Tests that calling pagination API corectly the latest relations. + """ + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction") + self.assertEquals(200, channel.code, channel.json_body) + annotation_id = channel.json_body["event_id"] + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1" + % (self.room, self.parent_id), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + # We expect to get back a single pagination result, which is the full + # relation event we sent above. + self.assertEquals(len(channel.json_body["chunk"]), 1, channel.json_body) + self.assert_dict( + { + "event_id": annotation_id, + "sender": self.user_id, + "type": "m.reaction", + }, + channel.json_body["chunk"][0], + ) + def _send_relation(self, relation_type, event_type, key=None): """Helper function to send a relation pointing at `self.parent_id` From 5fb72e6888c2dbd54a7b7d76657ea36c0faccedc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 17:05:45 +0100 Subject: [PATCH 132/429] Newsfile --- changelog.d/5198.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5198.feature diff --git a/changelog.d/5198.feature b/changelog.d/5198.feature new file mode 100644 index 0000000000..c8f624d172 --- /dev/null +++ b/changelog.d/5198.feature @@ -0,0 +1 @@ +Add experimental support for reactions. From 4a926f528e88ae4dc2a3dd2e1fd86166763c4bb1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 15 May 2019 13:58:45 +0100 Subject: [PATCH 133/429] 0.99.4 --- CHANGES.md | 6 ++++++ debian/changelog | 7 +++++-- synapse/__init__.py | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 3cacca5a6b..91ddd07bc3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.99.4 (2019-05-15) +=========================== + +No significant changes. + + Synapse 0.99.4rc1 (2019-05-13) ============================== diff --git a/debian/changelog b/debian/changelog index 454fa8eb16..35cf8ffb20 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,9 +1,12 @@ -matrix-synapse-py3 (0.99.3.2+nmu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (0.99.4) stable; urgency=medium [ Christoph Müller ] * Configure the systemd units to have a log identifier of `matrix-synapse` - -- Christoph Müller Wed, 17 Apr 2019 16:17:32 +0200 + [ Synapse Packaging team ] + * New synapse release 0.99.4. + + -- Synapse Packaging team Wed, 15 May 2019 13:58:08 +0100 matrix-synapse-py3 (0.99.3.2) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index cd9cfb2409..bf9e810da6 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.4rc1" +__version__ = "0.99.4" From 13018bb997962fc00fc874c2508d8e76d4cb4603 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 15 May 2019 14:04:02 +0100 Subject: [PATCH 134/429] fix some typos in the changelog --- CHANGES.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 91ddd07bc3..1e9c3cf953 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -23,8 +23,8 @@ Features instead of the executable name, `python`. Contributed by Christoph Müller. ([\#5023](https://github.com/matrix-org/synapse/issues/5023)) - Add time-based account expiration. ([\#5027](https://github.com/matrix-org/synapse/issues/5027), [\#5047](https://github.com/matrix-org/synapse/issues/5047), [\#5073](https://github.com/matrix-org/synapse/issues/5073), [\#5116](https://github.com/matrix-org/synapse/issues/5116)) -- Add support for handling /verions, /voip and /push_rules client endpoints to client_reader worker. ([\#5063](https://github.com/matrix-org/synapse/issues/5063), [\#5065](https://github.com/matrix-org/synapse/issues/5065), [\#5070](https://github.com/matrix-org/synapse/issues/5070)) -- Add an configuration option to require authentication on /publicRooms and /profile endpoints. ([\#5083](https://github.com/matrix-org/synapse/issues/5083)) +- Add support for handling `/versions`, `/voip` and `/push_rules` client endpoints to client_reader worker. ([\#5063](https://github.com/matrix-org/synapse/issues/5063), [\#5065](https://github.com/matrix-org/synapse/issues/5065), [\#5070](https://github.com/matrix-org/synapse/issues/5070)) +- Add a configuration option to require authentication on /publicRooms and /profile endpoints. ([\#5083](https://github.com/matrix-org/synapse/issues/5083)) - Move admin APIs to `/_synapse/admin/v1`. (The old paths are retained for backwards-compatibility, for now). ([\#5119](https://github.com/matrix-org/synapse/issues/5119)) - Implement an admin API for sending server notices. Many thanks to @krombel who provided a foundation for this work. ([\#5121](https://github.com/matrix-org/synapse/issues/5121), [\#5142](https://github.com/matrix-org/synapse/issues/5142)) @@ -45,11 +45,9 @@ Bugfixes - Workaround bug in twisted where attempting too many concurrent DNS requests could cause it to hang due to running out of file descriptors. ([\#5037](https://github.com/matrix-org/synapse/issues/5037)) - Make sure we're not registering the same 3pid twice on registration. ([\#5071](https://github.com/matrix-org/synapse/issues/5071)) - Don't crash on lack of expiry templates. ([\#5077](https://github.com/matrix-org/synapse/issues/5077)) -- Fix the ratelimting on third party invites. ([\#5104](https://github.com/matrix-org/synapse/issues/5104)) +- Fix the ratelimiting on third party invites. ([\#5104](https://github.com/matrix-org/synapse/issues/5104)) - Add some missing limitations to room alias creation. ([\#5124](https://github.com/matrix-org/synapse/issues/5124), [\#5128](https://github.com/matrix-org/synapse/issues/5128)) - Limit the number of EDUs in transactions to 100 as expected by synapse. Thanks to @superboum for this work! ([\#5138](https://github.com/matrix-org/synapse/issues/5138)) -- Fix bogus imports in unit tests. ([\#5154](https://github.com/matrix-org/synapse/issues/5154)) - Internal Changes ---------------- @@ -84,6 +82,7 @@ Internal Changes - Prevent an exception from being raised in a IResolutionReceiver and use a more generic error message for blacklisted URL previews. ([\#5155](https://github.com/matrix-org/synapse/issues/5155)) - Run `black` on the tests directory. ([\#5170](https://github.com/matrix-org/synapse/issues/5170)) - Fix CI after new release of isort. ([\#5179](https://github.com/matrix-org/synapse/issues/5179)) +- Fix bogus imports in unit tests. ([\#5154](https://github.com/matrix-org/synapse/issues/5154)) Synapse 0.99.3.2 (2019-05-03) From e6459c26b45866558056bee991024e093e0de059 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 May 2019 17:28:33 +0100 Subject: [PATCH 135/429] Actually implement idempotency --- synapse/rest/client/v2_alpha/relations.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index bac9e85c21..c3ac73b8c7 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -31,6 +31,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.rest.client.transactions import HttpTransactionCache from ._base import client_v2_patterns @@ -59,6 +60,7 @@ class RelationSendServlet(RestServlet): super(RelationSendServlet, self).__init__() self.auth = hs.get_auth() self.event_creation_handler = hs.get_event_creation_handler() + self.txns = HttpTransactionCache(hs) def register(self, http_server): http_server.register_paths( @@ -69,7 +71,12 @@ class RelationSendServlet(RestServlet): http_server.register_paths( "PUT", client_v2_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), - self.on_PUT_or_POST, + self.on_PUT, + ) + + def on_PUT(self, request, *args, **kwargs): + return self.txns.fetch_or_execute_request( + request, self.on_PUT_or_POST, request, *args, **kwargs ) @defer.inlineCallbacks From 5be34fc3e3045b3ba5a97b73ba0f25887874e622 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 15 May 2019 17:30:23 +0100 Subject: [PATCH 136/429] Actually check for None rather falsey --- synapse/storage/relations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index f304b8a9a4..31ef6679af 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -65,11 +65,11 @@ class RelationsStore(SQLBaseStore): where_clause = ["relates_to_id = ?"] where_args = [event_id] - if relation_type: + if relation_type is not None: where_clause.append("relation_type = ?") where_args.append(relation_type) - if event_type: + if event_type is not None: where_clause.append("type = ?") where_args.append(event_type) From 5f027a315fbf010c213ca6f88141404ed86d05ef Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 15 May 2019 17:37:46 +0100 Subject: [PATCH 137/429] Drop support for v2_alpha API prefix (#5190) --- changelog.d/5190.feature | 1 + synapse/api/urls.py | 3 +-- synapse/rest/client/v1/base.py | 8 ++++---- synapse/rest/client/v2_alpha/_base.py | 9 +++------ synapse/rest/client/v2_alpha/auth.py | 18 +++++++++--------- synapse/rest/client/v2_alpha/devices.py | 6 +++--- .../v2_alpha/room_upgrade_rest_servlet.py | 1 - synapse/rest/client/v2_alpha/sendtodevice.py | 1 - 8 files changed, 21 insertions(+), 26 deletions(-) create mode 100644 changelog.d/5190.feature diff --git a/changelog.d/5190.feature b/changelog.d/5190.feature new file mode 100644 index 0000000000..34904aa7a8 --- /dev/null +++ b/changelog.d/5190.feature @@ -0,0 +1 @@ +Drop support for the undocumented /_matrix/client/v2_alpha API prefix. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index cb71d80875..3c6bddff7a 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -22,8 +22,7 @@ from six.moves.urllib.parse import urlencode from synapse.config import ConfigError -CLIENT_PREFIX = "/_matrix/client/api/v1" -CLIENT_V2_ALPHA_PREFIX = "/_matrix/client/v2_alpha" +CLIENT_API_PREFIX = "/_matrix/client" FEDERATION_PREFIX = "/_matrix/federation" FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1" FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2" diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py index c77d7aba68..dc63b661c0 100644 --- a/synapse/rest/client/v1/base.py +++ b/synapse/rest/client/v1/base.py @@ -19,7 +19,7 @@ import logging import re -from synapse.api.urls import CLIENT_PREFIX +from synapse.api.urls import CLIENT_API_PREFIX from synapse.http.servlet import RestServlet from synapse.rest.client.transactions import HttpTransactionCache @@ -36,12 +36,12 @@ def client_path_patterns(path_regex, releases=(0,), include_in_unstable=True): Returns: SRE_Pattern """ - patterns = [re.compile("^" + CLIENT_PREFIX + path_regex)] + patterns = [re.compile("^" + CLIENT_API_PREFIX + "/api/v1" + path_regex)] if include_in_unstable: - unstable_prefix = CLIENT_PREFIX.replace("/api/v1", "/unstable") + unstable_prefix = CLIENT_API_PREFIX + "/unstable" patterns.append(re.compile("^" + unstable_prefix + path_regex)) for release in releases: - new_prefix = CLIENT_PREFIX.replace("/api/v1", "/r%d" % release) + new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) patterns.append(re.compile("^" + new_prefix + path_regex)) return patterns diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index 77434937ff..24ac26bf03 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -21,13 +21,12 @@ import re from twisted.internet import defer from synapse.api.errors import InteractiveAuthIncompleteError -from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX +from synapse.api.urls import CLIENT_API_PREFIX logger = logging.getLogger(__name__) def client_v2_patterns(path_regex, releases=(0,), - v2_alpha=True, unstable=True): """Creates a regex compiled client path with the correct client path prefix. @@ -39,13 +38,11 @@ def client_v2_patterns(path_regex, releases=(0,), SRE_Pattern """ patterns = [] - if v2_alpha: - patterns.append(re.compile("^" + CLIENT_V2_ALPHA_PREFIX + path_regex)) if unstable: - unstable_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/unstable") + unstable_prefix = CLIENT_API_PREFIX + "/unstable" patterns.append(re.compile("^" + unstable_prefix + path_regex)) for release in releases: - new_prefix = CLIENT_V2_ALPHA_PREFIX.replace("/v2_alpha", "/r%d" % release) + new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) patterns.append(re.compile("^" + new_prefix + path_regex)) return patterns diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index ac035c7735..4c380ab84d 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.constants import LoginType from synapse.api.errors import SynapseError -from synapse.api.urls import CLIENT_V2_ALPHA_PREFIX +from synapse.api.urls import CLIENT_API_PREFIX from synapse.http.server import finish_request from synapse.http.servlet import RestServlet, parse_string @@ -139,8 +139,8 @@ class AuthRestServlet(RestServlet): if stagetype == LoginType.RECAPTCHA: html = RECAPTCHA_TEMPLATE % { 'session': session, - 'myurl': "%s/auth/%s/fallback/web" % ( - CLIENT_V2_ALPHA_PREFIX, LoginType.RECAPTCHA + 'myurl': "%s/r0/auth/%s/fallback/web" % ( + CLIENT_API_PREFIX, LoginType.RECAPTCHA ), 'sitekey': self.hs.config.recaptcha_public_key, } @@ -159,8 +159,8 @@ class AuthRestServlet(RestServlet): self.hs.config.public_baseurl, self.hs.config.user_consent_version, ), - 'myurl': "%s/auth/%s/fallback/web" % ( - CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS + 'myurl': "%s/r0/auth/%s/fallback/web" % ( + CLIENT_API_PREFIX, LoginType.TERMS ), } html_bytes = html.encode("utf8") @@ -203,8 +203,8 @@ class AuthRestServlet(RestServlet): else: html = RECAPTCHA_TEMPLATE % { 'session': session, - 'myurl': "%s/auth/%s/fallback/web" % ( - CLIENT_V2_ALPHA_PREFIX, LoginType.RECAPTCHA + 'myurl': "%s/r0/auth/%s/fallback/web" % ( + CLIENT_API_PREFIX, LoginType.RECAPTCHA ), 'sitekey': self.hs.config.recaptcha_public_key, } @@ -240,8 +240,8 @@ class AuthRestServlet(RestServlet): self.hs.config.public_baseurl, self.hs.config.user_consent_version, ), - 'myurl': "%s/auth/%s/fallback/web" % ( - CLIENT_V2_ALPHA_PREFIX, LoginType.TERMS + 'myurl': "%s/r0/auth/%s/fallback/web" % ( + CLIENT_API_PREFIX, LoginType.TERMS ), } html_bytes = html.encode("utf8") diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 9b75bb1377..5a5be7c390 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -30,7 +30,7 @@ logger = logging.getLogger(__name__) class DevicesRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices$", v2_alpha=False) + PATTERNS = client_v2_patterns("/devices$") def __init__(self, hs): """ @@ -56,7 +56,7 @@ class DeleteDevicesRestServlet(RestServlet): API for bulk deletion of devices. Accepts a JSON object with a devices key which lists the device_ids to delete. Requires user interactive auth. """ - PATTERNS = client_v2_patterns("/delete_devices", v2_alpha=False) + PATTERNS = client_v2_patterns("/delete_devices") def __init__(self, hs): super(DeleteDevicesRestServlet, self).__init__() @@ -95,7 +95,7 @@ class DeleteDevicesRestServlet(RestServlet): class DeviceRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$", v2_alpha=False) + PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 3db7ff8d1b..62b8de71fa 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -50,7 +50,6 @@ class RoomUpgradeRestServlet(RestServlet): PATTERNS = client_v2_patterns( # /rooms/$roomid/upgrade "/rooms/(?P[^/]*)/upgrade$", - v2_alpha=False, ) def __init__(self, hs): diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index a9e9a47a0b..21e9cef2d0 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -29,7 +29,6 @@ logger = logging.getLogger(__name__) class SendToDeviceRestServlet(servlet.RestServlet): PATTERNS = client_v2_patterns( "/sendToDevice/(?P[^/]*)/(?P[^/]*)$", - v2_alpha=False ) def __init__(self, hs): From f1e5b413886ba4d9d0a16b028dba89c4a5cb56ac Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 15 May 2019 12:06:04 -0500 Subject: [PATCH 138/429] Make all the rate limiting options more consistent (#5181) --- changelog.d/5181.feature | 1 + docs/sample_config.yaml | 53 +++++------ synapse/config/ratelimiting.py | 115 ++++++++++++++--------- synapse/federation/transport/server.py | 6 +- synapse/handlers/_base.py | 4 +- synapse/rest/client/v2_alpha/register.py | 23 +++-- synapse/util/ratelimitutils.py | 47 +++------ tests/utils.py | 20 ++-- 8 files changed, 138 insertions(+), 131 deletions(-) create mode 100644 changelog.d/5181.feature diff --git a/changelog.d/5181.feature b/changelog.d/5181.feature new file mode 100644 index 0000000000..5ce13aa2ea --- /dev/null +++ b/changelog.d/5181.feature @@ -0,0 +1 @@ +Ratelimiting configuration for clients sending messages and the federation server has been altered to match login ratelimiting. The old configuration names will continue working. Check the sample config for details of the new names. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index c4e5c4cf39..09ee0e8984 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -446,21 +446,15 @@ log_config: "CONFDIR/SERVERNAME.log.config" ## Ratelimiting ## -# Number of messages a client can send per second -# -#rc_messages_per_second: 0.2 - -# Number of message a client can send before being throttled -# -#rc_message_burst_count: 10.0 - -# Ratelimiting settings for registration and login. +# Ratelimiting settings for client actions (registration, login, messaging). # # Each ratelimiting configuration is made of two parameters: # - per_second: number of requests a client can send per second. # - burst_count: number of requests a client can send before being throttled. # # Synapse currently uses the following configurations: +# - one for messages that ratelimits sending based on the account the client +# is using # - one for registration that ratelimits registration requests based on the # client's IP address. # - one for login that ratelimits login requests based on the client's IP @@ -473,6 +467,10 @@ log_config: "CONFDIR/SERVERNAME.log.config" # # The defaults are as shown below. # +#rc_message: +# per_second: 0.2 +# burst_count: 10 +# #rc_registration: # per_second: 0.17 # burst_count: 3 @@ -488,29 +486,28 @@ log_config: "CONFDIR/SERVERNAME.log.config" # per_second: 0.17 # burst_count: 3 -# The federation window size in milliseconds -# -#federation_rc_window_size: 1000 -# The number of federation requests from a single server in a window -# before the server will delay processing the request. +# Ratelimiting settings for incoming federation # -#federation_rc_sleep_limit: 10 - -# The duration in milliseconds to delay processing events from -# remote servers by if they go over the sleep limit. +# The rc_federation configuration is made up of the following settings: +# - window_size: window size in milliseconds +# - sleep_limit: number of federation requests from a single server in +# a window before the server will delay processing the request. +# - sleep_delay: duration in milliseconds to delay processing events +# from remote servers by if they go over the sleep limit. +# - reject_limit: maximum number of concurrent federation requests +# allowed from a single server +# - concurrent: number of federation requests to concurrently process +# from a single server # -#federation_rc_sleep_delay: 500 - -# The maximum number of concurrent federation requests allowed -# from a single server +# The defaults are as shown below. # -#federation_rc_reject_limit: 50 - -# The number of federation requests to concurrently process from a -# single server -# -#federation_rc_concurrent: 3 +#rc_federation: +# window_size: 1000 +# sleep_limit: 10 +# sleep_delay: 500 +# reject_limit: 50 +# concurrent: 3 # Target outgoing federation transaction frequency for sending read-receipts, # per-room. diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 5a68399e63..5a9adac480 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -16,16 +16,56 @@ from ._base import Config class RateLimitConfig(object): - def __init__(self, config): - self.per_second = config.get("per_second", 0.17) - self.burst_count = config.get("burst_count", 3.0) + def __init__(self, config, defaults={"per_second": 0.17, "burst_count": 3.0}): + self.per_second = config.get("per_second", defaults["per_second"]) + self.burst_count = config.get("burst_count", defaults["burst_count"]) + + +class FederationRateLimitConfig(object): + _items_and_default = { + "window_size": 10000, + "sleep_limit": 10, + "sleep_delay": 500, + "reject_limit": 50, + "concurrent": 3, + } + + def __init__(self, **kwargs): + for i in self._items_and_default.keys(): + setattr(self, i, kwargs.get(i) or self._items_and_default[i]) class RatelimitConfig(Config): - def read_config(self, config): - self.rc_messages_per_second = config.get("rc_messages_per_second", 0.2) - self.rc_message_burst_count = config.get("rc_message_burst_count", 10.0) + + # Load the new-style messages config if it exists. Otherwise fall back + # to the old method. + if "rc_message" in config: + self.rc_message = RateLimitConfig( + config["rc_message"], defaults={"per_second": 0.2, "burst_count": 10.0} + ) + else: + self.rc_message = RateLimitConfig( + { + "per_second": config.get("rc_messages_per_second", 0.2), + "burst_count": config.get("rc_message_burst_count", 10.0), + } + ) + + # Load the new-style federation config, if it exists. Otherwise, fall + # back to the old method. + if "federation_rc" in config: + self.rc_federation = FederationRateLimitConfig(**config["rc_federation"]) + else: + self.rc_federation = FederationRateLimitConfig( + **{ + "window_size": config.get("federation_rc_window_size"), + "sleep_limit": config.get("federation_rc_sleep_limit"), + "sleep_delay": config.get("federation_rc_sleep_delay"), + "reject_limit": config.get("federation_rc_reject_limit"), + "concurrent": config.get("federation_rc_concurrent"), + } + ) self.rc_registration = RateLimitConfig(config.get("rc_registration", {})) @@ -33,38 +73,26 @@ class RatelimitConfig(Config): self.rc_login_address = RateLimitConfig(rc_login_config.get("address", {})) self.rc_login_account = RateLimitConfig(rc_login_config.get("account", {})) self.rc_login_failed_attempts = RateLimitConfig( - rc_login_config.get("failed_attempts", {}), + rc_login_config.get("failed_attempts", {}) ) - self.federation_rc_window_size = config.get("federation_rc_window_size", 1000) - self.federation_rc_sleep_limit = config.get("federation_rc_sleep_limit", 10) - self.federation_rc_sleep_delay = config.get("federation_rc_sleep_delay", 500) - self.federation_rc_reject_limit = config.get("federation_rc_reject_limit", 50) - self.federation_rc_concurrent = config.get("federation_rc_concurrent", 3) - self.federation_rr_transactions_per_room_per_second = config.get( - "federation_rr_transactions_per_room_per_second", 50, + "federation_rr_transactions_per_room_per_second", 50 ) def default_config(self, **kwargs): return """\ ## Ratelimiting ## - # Number of messages a client can send per second - # - #rc_messages_per_second: 0.2 - - # Number of message a client can send before being throttled - # - #rc_message_burst_count: 10.0 - - # Ratelimiting settings for registration and login. + # Ratelimiting settings for client actions (registration, login, messaging). # # Each ratelimiting configuration is made of two parameters: # - per_second: number of requests a client can send per second. # - burst_count: number of requests a client can send before being throttled. # # Synapse currently uses the following configurations: + # - one for messages that ratelimits sending based on the account the client + # is using # - one for registration that ratelimits registration requests based on the # client's IP address. # - one for login that ratelimits login requests based on the client's IP @@ -77,6 +105,10 @@ class RatelimitConfig(Config): # # The defaults are as shown below. # + #rc_message: + # per_second: 0.2 + # burst_count: 10 + # #rc_registration: # per_second: 0.17 # burst_count: 3 @@ -92,29 +124,28 @@ class RatelimitConfig(Config): # per_second: 0.17 # burst_count: 3 - # The federation window size in milliseconds - # - #federation_rc_window_size: 1000 - # The number of federation requests from a single server in a window - # before the server will delay processing the request. + # Ratelimiting settings for incoming federation # - #federation_rc_sleep_limit: 10 - - # The duration in milliseconds to delay processing events from - # remote servers by if they go over the sleep limit. + # The rc_federation configuration is made up of the following settings: + # - window_size: window size in milliseconds + # - sleep_limit: number of federation requests from a single server in + # a window before the server will delay processing the request. + # - sleep_delay: duration in milliseconds to delay processing events + # from remote servers by if they go over the sleep limit. + # - reject_limit: maximum number of concurrent federation requests + # allowed from a single server + # - concurrent: number of federation requests to concurrently process + # from a single server # - #federation_rc_sleep_delay: 500 - - # The maximum number of concurrent federation requests allowed - # from a single server + # The defaults are as shown below. # - #federation_rc_reject_limit: 50 - - # The number of federation requests to concurrently process from a - # single server - # - #federation_rc_concurrent: 3 + #rc_federation: + # window_size: 1000 + # sleep_limit: 10 + # sleep_delay: 500 + # reject_limit: 50 + # concurrent: 3 # Target outgoing federation transaction frequency for sending read-receipts, # per-room. diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 9030eb18c5..385eda2dca 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -63,11 +63,7 @@ class TransportLayerServer(JsonResource): self.authenticator = Authenticator(hs) self.ratelimiter = FederationRateLimiter( self.clock, - window_size=hs.config.federation_rc_window_size, - sleep_limit=hs.config.federation_rc_sleep_limit, - sleep_msec=hs.config.federation_rc_sleep_delay, - reject_limit=hs.config.federation_rc_reject_limit, - concurrent_requests=hs.config.federation_rc_concurrent, + config=hs.config.rc_federation, ) self.register_servlets() diff --git a/synapse/handlers/_base.py b/synapse/handlers/_base.py index ac09d03ba9..dca337ec61 100644 --- a/synapse/handlers/_base.py +++ b/synapse/handlers/_base.py @@ -90,8 +90,8 @@ class BaseHandler(object): messages_per_second = override.messages_per_second burst_count = override.burst_count else: - messages_per_second = self.hs.config.rc_messages_per_second - burst_count = self.hs.config.rc_message_burst_count + messages_per_second = self.hs.config.rc_message.per_second + burst_count = self.hs.config.rc_message.burst_count allowed, time_allowed = self.ratelimiter.can_do_action( user_id, time_now, diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index dc3e265bcd..3d045880b9 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -31,6 +31,7 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) +from synapse.config.ratelimiting import FederationRateLimitConfig from synapse.config.server import is_threepid_reserved from synapse.http.servlet import ( RestServlet, @@ -153,16 +154,18 @@ class UsernameAvailabilityRestServlet(RestServlet): self.registration_handler = hs.get_registration_handler() self.ratelimiter = FederationRateLimiter( hs.get_clock(), - # Time window of 2s - window_size=2000, - # Artificially delay requests if rate > sleep_limit/window_size - sleep_limit=1, - # Amount of artificial delay to apply - sleep_msec=1000, - # Error with 429 if more than reject_limit requests are queued - reject_limit=1, - # Allow 1 request at a time - concurrent_requests=1, + FederationRateLimitConfig( + # Time window of 2s + window_size=2000, + # Artificially delay requests if rate > sleep_limit/window_size + sleep_limit=1, + # Amount of artificial delay to apply + sleep_msec=1000, + # Error with 429 if more than reject_limit requests are queued + reject_limit=1, + # Allow 1 request at a time + concurrent_requests=1, + ) ) @defer.inlineCallbacks diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 7deb38f2a7..b146d137f4 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -30,31 +30,14 @@ logger = logging.getLogger(__name__) class FederationRateLimiter(object): - def __init__(self, clock, window_size, sleep_limit, sleep_msec, - reject_limit, concurrent_requests): + def __init__(self, clock, config): """ Args: clock (Clock) - window_size (int): The window size in milliseconds. - sleep_limit (int): The number of requests received in the last - `window_size` milliseconds before we artificially start - delaying processing of requests. - sleep_msec (int): The number of milliseconds to delay processing - of incoming requests by. - reject_limit (int): The maximum number of requests that are can be - queued for processing before we start rejecting requests with - a 429 Too Many Requests response. - concurrent_requests (int): The number of concurrent requests to - process. + config (FederationRateLimitConfig) """ self.clock = clock - - self.window_size = window_size - self.sleep_limit = sleep_limit - self.sleep_msec = sleep_msec - self.reject_limit = reject_limit - self.concurrent_requests = concurrent_requests - + self._config = config self.ratelimiters = {} def ratelimit(self, host): @@ -76,25 +59,25 @@ class FederationRateLimiter(object): host, _PerHostRatelimiter( clock=self.clock, - window_size=self.window_size, - sleep_limit=self.sleep_limit, - sleep_msec=self.sleep_msec, - reject_limit=self.reject_limit, - concurrent_requests=self.concurrent_requests, + config=self._config, ) ).ratelimit() class _PerHostRatelimiter(object): - def __init__(self, clock, window_size, sleep_limit, sleep_msec, - reject_limit, concurrent_requests): + def __init__(self, clock, config): + """ + Args: + clock (Clock) + config (FederationRateLimitConfig) + """ self.clock = clock - self.window_size = window_size - self.sleep_limit = sleep_limit - self.sleep_sec = sleep_msec / 1000.0 - self.reject_limit = reject_limit - self.concurrent_requests = concurrent_requests + self.window_size = config.window_size + self.sleep_limit = config.sleep_limit + self.sleep_sec = config.sleep_delay / 1000.0 + self.reject_limit = config.reject_limit + self.concurrent_requests = config.concurrent # request_id objects for requests which have been slept self.sleeping_requests = set() diff --git a/tests/utils.py b/tests/utils.py index f38533a0c7..200c1ceabe 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -134,10 +134,6 @@ def default_config(name, parse=False): "email_enable_notifs": False, "block_non_admin_invites": False, "federation_domain_whitelist": None, - "federation_rc_reject_limit": 10, - "federation_rc_sleep_limit": 10, - "federation_rc_sleep_delay": 100, - "federation_rc_concurrent": 10, "filter_timeline_limit": 5000, "user_directory_search_all_users": False, "user_consent_server_notice_content": None, @@ -156,8 +152,13 @@ def default_config(name, parse=False): "mau_stats_only": False, "mau_limits_reserved_threepids": [], "admin_contact": None, - "rc_messages_per_second": 10000, - "rc_message_burst_count": 10000, + "rc_federation": { + "reject_limit": 10, + "sleep_limit": 10, + "sleep_delay": 10, + "concurrent": 10, + }, + "rc_message": {"per_second": 10000, "burst_count": 10000}, "rc_registration": {"per_second": 10000, "burst_count": 10000}, "rc_login": { "address": {"per_second": 10000, "burst_count": 10000}, @@ -375,12 +376,7 @@ def register_federation_servlets(hs, resource): resource=resource, authenticator=federation_server.Authenticator(hs), ratelimiter=FederationRateLimiter( - hs.get_clock(), - window_size=hs.config.federation_rc_window_size, - sleep_limit=hs.config.federation_rc_sleep_limit, - sleep_msec=hs.config.federation_rc_sleep_delay, - reject_limit=hs.config.federation_rc_reject_limit, - concurrent_requests=hs.config.federation_rc_concurrent, + hs.get_clock(), config=hs.config.rc_federation ), ) From cd0faba7cd8d469533342a35882648051b393744 Mon Sep 17 00:00:00 2001 From: David Baker Date: Wed, 15 May 2019 20:53:48 +0100 Subject: [PATCH 139/429] Make newsfile clearer --- changelog.d/5187.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5187.bugfix b/changelog.d/5187.bugfix index 03b985265f..df176cf5b2 100644 --- a/changelog.d/5187.bugfix +++ b/changelog.d/5187.bugfix @@ -1 +1 @@ -Fix register endpoint returning a previously registered account. +Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session. From a0603523d2e210cf59f887bd75e1a755720cb7a8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 16:59:21 +0100 Subject: [PATCH 140/429] Add aggregations API --- synapse/config/server.py | 5 + synapse/events/utils.py | 34 ++- synapse/rest/client/v2_alpha/relations.py | 141 ++++++++++- synapse/storage/relations.py | 225 ++++++++++++++++- tests/rest/client/v2_alpha/test_relations.py | 251 ++++++++++++++++++- 5 files changed, 643 insertions(+), 13 deletions(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index 7874cd9da7..f403477b54 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -100,6 +100,11 @@ class ServerConfig(Config): "block_non_admin_invites", False, ) + # Whether to enable experimental MSC1849 (aka relations) support + self.experimental_msc1849_support_enabled = config.get( + "experimental_msc1849_support_enabled", False, + ) + # Options to control access by tracking MAU self.limit_usage_by_mau = config.get("limit_usage_by_mau", False) self.max_mau_value = 0 diff --git a/synapse/events/utils.py b/synapse/events/utils.py index a5454556cc..16d0c64372 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -21,7 +21,7 @@ from frozendict import frozendict from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, RelationTypes from synapse.util.async_helpers import yieldable_gather_results from . import EventBase @@ -324,8 +324,12 @@ class EventClientSerializer(object): """ def __init__(self, hs): - pass + self.store = hs.get_datastore() + self.experimental_msc1849_support_enabled = ( + hs.config.experimental_msc1849_support_enabled + ) + @defer.inlineCallbacks def serialize_event(self, event, time_now, **kwargs): """Serializes a single event. @@ -337,8 +341,32 @@ class EventClientSerializer(object): Returns: Deferred[dict]: The serialized event """ + # To handle the case of presence events and the like + if not isinstance(event, EventBase): + defer.returnValue(event) + + event_id = event.event_id event = serialize_event(event, time_now, **kwargs) - return defer.succeed(event) + + # If MSC1849 is enabled then we need to look if thre are any relations + # we need to bundle in with the event + if self.experimental_msc1849_support_enabled: + annotations = yield self.store.get_aggregation_groups_for_event( + event_id, + ) + references = yield self.store.get_relations_for_event( + event_id, RelationTypes.REFERENCES, direction="f", + ) + + if annotations.chunk: + r = event["unsigned"].setdefault("m.relations", {}) + r[RelationTypes.ANNOTATION] = annotations.to_dict() + + if references.chunk: + r = event["unsigned"].setdefault("m.relations", {}) + r[RelationTypes.REFERENCES] = references.to_dict() + + defer.returnValue(event) def serialize_events(self, events, time_now, **kwargs): """Serializes multiple events. diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index c3ac73b8c7..f468409b68 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -23,7 +23,7 @@ import logging from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.http.servlet import ( RestServlet, @@ -141,12 +141,16 @@ class RelationPaginationServlet(RestServlet): ) limit = parse_integer(request, "limit", default=5) + from_token = parse_string(request, "from") + to_token = parse_string(request, "to") result = yield self.store.get_relations_for_event( event_id=parent_id, relation_type=relation_type, event_type=event_type, limit=limit, + from_token=from_token, + to_token=to_token, ) events = yield self.store.get_events_as_list( @@ -162,6 +166,141 @@ class RelationPaginationServlet(RestServlet): defer.returnValue((200, return_value)) +class RelationAggregationPaginationServlet(RestServlet): + """API to paginate aggregation groups of relations, e.g. paginate the + types and counts of the reactions on the events. + + Example request and response: + + GET /rooms/{room_id}/aggregations/{parent_id} + + { + chunk: [ + { + "type": "m.reaction", + "key": "👍", + "count": 3 + } + ] + } + """ + + PATTERNS = client_v2_patterns( + "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" + "(/(?P[^/]*)(/(?P[^/]*))?)?$", + releases=(), + ) + + def __init__(self, hs): + super(RelationAggregationPaginationServlet, self).__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=None): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + + yield self.auth.check_in_room_or_world_readable( + room_id, requester.user.to_string() + ) + + if relation_type not in (RelationTypes.ANNOTATION, None): + raise SynapseError(400, "Relation type must be 'annotation'") + + limit = parse_integer(request, "limit", default=5) + from_token = parse_string(request, "from") + to_token = parse_string(request, "to") + + res = yield self.store.get_aggregation_groups_for_event( + event_id=parent_id, + event_type=event_type, + limit=limit, + from_token=from_token, + to_token=to_token, + ) + + defer.returnValue((200, res.to_dict())) + + +class RelationAggregationGroupPaginationServlet(RestServlet): + """API to paginate within an aggregation group of relations, e.g. paginate + all the 👍 reactions on an event. + + Example request and response: + + GET /rooms/{room_id}/aggregations/{parent_id}/m.annotation/m.reaction/👍 + + { + chunk: [ + { + "type": "m.reaction", + "content": { + "m.relates_to": { + "rel_type": "m.annotation", + "key": "👍" + } + } + }, + ... + ] + } + """ + + PATTERNS = client_v2_patterns( + "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" + "/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)$", + releases=(), + ) + + def __init__(self, hs): + super(RelationAggregationGroupPaginationServlet, self).__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastore() + self.clock = hs.get_clock() + self._event_serializer = hs.get_event_client_serializer() + + @defer.inlineCallbacks + def on_GET(self, request, room_id, parent_id, relation_type, event_type, key): + requester = yield self.auth.get_user_by_req(request, allow_guest=True) + + yield self.auth.check_in_room_or_world_readable( + room_id, requester.user.to_string() + ) + + if relation_type != RelationTypes.ANNOTATION: + raise SynapseError(400, "Relation type must be 'annotation'") + + limit = parse_integer(request, "limit", default=5) + from_token = parse_string(request, "from") + to_token = parse_string(request, "to") + + result = yield self.store.get_relations_for_event( + event_id=parent_id, + relation_type=relation_type, + event_type=event_type, + aggregation_key=key, + limit=limit, + from_token=from_token, + to_token=to_token, + ) + + events = yield self.store.get_events_as_list( + [c["event_id"] for c in result.chunk] + ) + + now = self.clock.time_msec() + events = yield self._event_serializer.serialize_events(events, now) + + return_value = result.to_dict() + return_value["chunk"] = events + + defer.returnValue((200, return_value)) + + defer.returnValue((200, return_value)) + + def register_servlets(hs, http_server): RelationSendServlet(hs).register(http_server) RelationPaginationServlet(hs).register(http_server) + RelationAggregationPaginationServlet(hs).register(http_server) + RelationAggregationGroupPaginationServlet(hs).register(http_server) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 31ef6679af..db4b842c97 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -18,7 +18,9 @@ import logging import attr from synapse.api.constants import RelationTypes +from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore +from synapse.storage.stream import generate_pagination_where_clause logger = logging.getLogger(__name__) @@ -29,19 +31,94 @@ class PaginationChunk(object): Attributes: chunk (list): The rows returned by pagination + next_batch (Any|None): Token to fetch next set of results with, if + None then there are no more results. + prev_batch (Any|None): Token to fetch previous set of results with, if + None then there are no previous results. """ chunk = attr.ib() + next_batch = attr.ib(default=None) + prev_batch = attr.ib(default=None) def to_dict(self): d = {"chunk": self.chunk} + if self.next_batch: + d["next_batch"] = self.next_batch.to_string() + + if self.prev_batch: + d["prev_batch"] = self.prev_batch.to_string() + return d +@attr.s +class RelationPaginationToken(object): + """Pagination token for relation pagination API. + + As the results are order by topological ordering, we can use the + `topological_ordering` and `stream_ordering` fields of the events at the + boundaries of the chunk as pagination tokens. + + Attributes: + topological (int): The topological ordering of the boundary event + stream (int): The stream ordering of the boundary event. + """ + + topological = attr.ib() + stream = attr.ib() + + @staticmethod + def from_string(string): + try: + t, s = string.split("-") + return RelationPaginationToken(int(t), int(s)) + except ValueError: + raise SynapseError(400, "Invalid token") + + def to_string(self): + return "%d-%d" % (self.topological, self.stream) + + +@attr.s +class AggregationPaginationToken(object): + """Pagination token for relation aggregation pagination API. + + As the results are order by count and then MAX(stream_ordering) of the + aggregation groups, we can just use them as our pagination token. + + Attributes: + count (int): The count of relations in the boundar group. + stream (int): The MAX stream ordering in the boundary group. + """ + + count = attr.ib() + stream = attr.ib() + + @staticmethod + def from_string(string): + try: + c, s = string.split("-") + return AggregationPaginationToken(int(c), int(s)) + except ValueError: + raise SynapseError(400, "Invalid token") + + def to_string(self): + return "%d-%d" % (self.count, self.stream) + + class RelationsStore(SQLBaseStore): def get_relations_for_event( - self, event_id, relation_type=None, event_type=None, limit=5, direction="b" + self, + event_id, + relation_type=None, + event_type=None, + aggregation_key=None, + limit=5, + direction="b", + from_token=None, + to_token=None, ): """Get a list of relations for an event, ordered by topological ordering. @@ -51,16 +128,26 @@ class RelationsStore(SQLBaseStore): type, if given. event_type (str|None): Only fetch events with this event type, if given. + aggregation_key (str|None): Only fetch events with this aggregation + key, if given. limit (int): Only fetch the most recent `limit` events. direction (str): Whether to fetch the most recent first (`"b"`) or the oldest first (`"f"`). + from_token (RelationPaginationToken|None): Fetch rows from the given + token, or from the start if None. + to_token (RelationPaginationToken|None): Fetch rows up to the given + token, or up to the end if None. Returns: Deferred[PaginationChunk]: List of event IDs that match relations requested. The rows are of the form `{"event_id": "..."}`. """ - # TODO: Pagination tokens + if from_token: + from_token = RelationPaginationToken.from_string(from_token) + + if to_token: + to_token = RelationPaginationToken.from_string(to_token) where_clause = ["relates_to_id = ?"] where_args = [event_id] @@ -73,12 +160,29 @@ class RelationsStore(SQLBaseStore): where_clause.append("type = ?") where_args.append(event_type) - order = "ASC" + if aggregation_key: + where_clause.append("aggregation_key = ?") + where_args.append(aggregation_key) + + pagination_clause = generate_pagination_where_clause( + direction=direction, + column_names=("topological_ordering", "stream_ordering"), + from_token=attr.astuple(from_token) if from_token else None, + to_token=attr.astuple(to_token) if to_token else None, + engine=self.database_engine, + ) + + if pagination_clause: + where_clause.append(pagination_clause) + if direction == "b": order = "DESC" + else: + order = "ASC" sql = """ - SELECT event_id FROM event_relations + SELECT event_id, topological_ordering, stream_ordering + FROM event_relations INNER JOIN events USING (event_id) WHERE %s ORDER BY topological_ordering %s, stream_ordering %s @@ -92,16 +196,125 @@ class RelationsStore(SQLBaseStore): def _get_recent_references_for_event_txn(txn): txn.execute(sql, where_args + [limit + 1]) - events = [{"event_id": row[0]} for row in txn] + last_topo_id = None + last_stream_id = None + events = [] + for row in txn: + events.append({"event_id": row[0]}) + last_topo_id = row[1] + last_stream_id = row[2] + + next_batch = None + if len(events) > limit and last_topo_id and last_stream_id: + next_batch = RelationPaginationToken(last_topo_id, last_stream_id) return PaginationChunk( - chunk=list(events[:limit]), + chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token ) return self.runInteraction( "get_recent_references_for_event", _get_recent_references_for_event_txn ) + def get_aggregation_groups_for_event( + self, + event_id, + event_type=None, + limit=5, + direction="b", + from_token=None, + to_token=None, + ): + """Get a list of annotations on the event, grouped by event type and + aggregation key, sorted by count. + + This is used e.g. to get the what and how many reactions have happend + on an event. + + Args: + event_id (str): Fetch events that relate to this event ID. + event_type (str|None): Only fetch events with this event type, if + given. + limit (int): Only fetch the `limit` groups. + direction (str): Whether to fetch the highest count first (`"b"`) or + the lowest count first (`"f"`). + from_token (AggregationPaginationToken|None): Fetch rows from the + given token, or from the start if None. + to_token (AggregationPaginationToken|None): Fetch rows up to the + given token, or up to the end if None. + + + Returns: + Deferred[PaginationChunk]: List of groups of annotations that + match. Each row is a dict with `type`, `key` and `count` fields. + """ + + if from_token: + from_token = AggregationPaginationToken.from_string(from_token) + + if to_token: + to_token = AggregationPaginationToken.from_string(to_token) + + where_clause = ["relates_to_id = ?", "relation_type = ?"] + where_args = [event_id, RelationTypes.ANNOTATION] + + if event_type: + where_clause.append("type = ?") + where_args.append(event_type) + + having_clause = generate_pagination_where_clause( + direction=direction, + column_names=("COUNT(*)", "MAX(stream_ordering)"), + from_token=attr.astuple(from_token) if from_token else None, + to_token=attr.astuple(to_token) if to_token else None, + engine=self.database_engine, + ) + + if direction == "b": + order = "DESC" + else: + order = "ASC" + + if having_clause: + having_clause = "HAVING " + having_clause + else: + having_clause = "" + + sql = """ + SELECT type, aggregation_key, COUNT(*), MAX(stream_ordering) + FROM event_relations + INNER JOIN events USING (event_id) + WHERE {where_clause} + GROUP BY relation_type, type, aggregation_key + {having_clause} + ORDER BY COUNT(*) {order}, MAX(stream_ordering) {order} + LIMIT ? + """.format( + where_clause=" AND ".join(where_clause), + order=order, + having_clause=having_clause, + ) + + def _get_aggregation_groups_for_event_txn(txn): + txn.execute(sql, where_args + [limit + 1]) + + next_batch = None + events = [] + for row in txn: + events.append({"type": row[0], "key": row[1], "count": row[2]}) + next_batch = AggregationPaginationToken(row[2], row[3]) + + if len(events) <= limit: + next_batch = None + + return PaginationChunk( + chunk=list(events[:limit]), next_batch=next_batch, prev_batch=from_token + ) + + return self.runInteraction( + "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn + ) + def _handle_event_relations(self, txn, event): """Handles inserting relation data during peristence of events diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index bcc1c1bb85..661dd45755 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import itertools + import six from synapse.api.constants import EventTypes, RelationTypes @@ -30,6 +32,12 @@ class RelationsTestCase(unittest.HomeserverTestCase): login.register_servlets, ] + def make_homeserver(self, reactor, clock): + # We need to enable msc1849 support for aggregations + config = self.default_config() + config["experimental_msc1849_support_enabled"] = True + return self.setup_test_homeserver(config=config) + def prepare(self, reactor, clock, hs): self.room = self.helper.create_room_as(self.user_id) res = self.helper.send(self.room, body="Hi!") @@ -40,7 +48,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): creates the right shape of event. """ - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key=u"👍") self.assertEquals(200, channel.code, channel.json_body) event_id = channel.json_body["event_id"] @@ -72,7 +80,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, EventTypes.Member) self.assertEquals(400, channel.code, channel.json_body) - def test_paginate(self): + def test_basic_paginate_relations(self): """Tests that calling pagination API corectly the latest relations. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction") @@ -102,6 +110,243 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel.json_body["chunk"][0], ) + # Make sure next_batch has something in it that looks like it could be a + # valid token. + self.assertIsInstance( + channel.json_body.get("next_batch"), six.string_types, channel.json_body + ) + + def test_repeated_paginate_relations(self): + """Test that if we paginate using a limit and tokens then we get the + expected events. + """ + + expected_event_ids = [] + for _ in range(10): + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction") + self.assertEquals(200, channel.code, channel.json_body) + expected_event_ids.append(channel.json_body["event_id"]) + + prev_token = None + found_event_ids = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1%s" + % (self.room, self.parent_id, from_token), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEquals(found_event_ids, expected_event_ids) + + def test_aggregation_pagination_groups(self): + """Test that we can paginate annotation groups correctly. + """ + + sent_groups = {u"👍": 10, u"a": 7, u"b": 5, u"c": 3, u"d": 2, u"e": 1} + for key in itertools.chain.from_iterable( + itertools.repeat(key, num) for key, num in sent_groups.items() + ): + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", key=key + ) + self.assertEquals(200, channel.code, channel.json_body) + + prev_token = None + found_groups = {} + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/%s?limit=1%s" + % (self.room, self.parent_id, from_token), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + + for groups in channel.json_body["chunk"]: + # We only expect reactions + self.assertEqual(groups["type"], "m.reaction", channel.json_body) + + # We should only see each key once + self.assertNotIn(groups["key"], found_groups, channel.json_body) + + found_groups[groups["key"]] = groups["count"] + + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + self.assertEquals(sent_groups, found_groups) + + def test_aggregation_pagination_within_group(self): + """Test that we can paginate within an annotation group. + """ + + expected_event_ids = [] + for _ in range(10): + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", key=u"👍" + ) + self.assertEquals(200, channel.code, channel.json_body) + expected_event_ids.append(channel.json_body["event_id"]) + + # Also send a different type of reaction so that we test we don't see it + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") + self.assertEquals(200, channel.code, channel.json_body) + + prev_token = None + found_event_ids = [] + encoded_key = six.moves.urllib.parse.quote_plus(u"👍".encode("utf-8")) + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s" + "/aggregations/%s/%s/m.reaction/%s?limit=1%s" + % ( + self.room, + self.parent_id, + RelationTypes.ANNOTATION, + encoded_key, + from_token, + ), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEqual(len(channel.json_body["chunk"]), 1, channel.json_body) + + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + + next_batch = channel.json_body.get("next_batch") + + self.assertNotEquals(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + # We paginated backwards, so reverse + found_event_ids.reverse() + self.assertEquals(found_event_ids, expected_event_ids) + + def test_aggregation(self): + """Test that annotations get correctly aggregated. + """ + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + self.assertEquals(200, channel.code, channel.json_body) + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/%s" + % (self.room, self.parent_id), + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEquals( + channel.json_body, + { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + ) + + def test_aggregation_must_be_annotation(self): + """Test that aggregations must be annotations. + """ + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/m.replaces/%s?limit=1" + % (self.room, self.parent_id), + ) + self.render(request) + self.assertEquals(400, channel.code, channel.json_body) + + def test_aggregation_get_event(self): + """Test that annotations and references get correctly bundled when + getting the parent event. + """ + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.REFERENCES, "m.room.test") + self.assertEquals(200, channel.code, channel.json_body) + reply_1 = channel.json_body["event_id"] + + channel = self._send_relation(RelationTypes.REFERENCES, "m.room.test") + self.assertEquals(200, channel.code, channel.json_body) + reply_2 = channel.json_body["event_id"] + + request, channel = self.make_request( + "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id) + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.maxDiff = None + + self.assertEquals( + channel.json_body["unsigned"].get("m.relations"), + { + RelationTypes.ANNOTATION: { + "chunk": [ + {"type": "m.reaction", "key": "a", "count": 2}, + {"type": "m.reaction", "key": "b", "count": 1}, + ] + }, + RelationTypes.REFERENCES: { + "chunk": [{"event_id": reply_1}, {"event_id": reply_2}] + }, + }, + ) + def _send_relation(self, relation_type, event_type, key=None): """Helper function to send a relation pointing at `self.parent_id` @@ -116,7 +361,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): """ query = "" if key: - query = "?key=" + six.moves.urllib.parse.quote_plus(key) + query = "?key=" + six.moves.urllib.parse.quote_plus(key.encode("utf-8")) request, channel = self.make_request( "POST", From 33453419b0cbbe29d3f4b376e9eafab10d5d012a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 16 May 2019 09:56:12 +0100 Subject: [PATCH 141/429] Add cache to relations --- synapse/storage/relations.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index db4b842c97..1fd3d4fafc 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -21,6 +21,7 @@ from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore from synapse.storage.stream import generate_pagination_where_clause +from synapse.util.caches.descriptors import cached logger = logging.getLogger(__name__) @@ -109,6 +110,7 @@ class AggregationPaginationToken(object): class RelationsStore(SQLBaseStore): + @cached(tree=True) def get_relations_for_event( self, event_id, @@ -216,6 +218,7 @@ class RelationsStore(SQLBaseStore): "get_recent_references_for_event", _get_recent_references_for_event_txn ) + @cached(tree=True) def get_aggregation_groups_for_event( self, event_id, @@ -353,3 +356,8 @@ class RelationsStore(SQLBaseStore): "aggregation_key": aggregation_key, }, ) + + txn.call_after(self.get_relations_for_event.invalidate_many, (parent_id,)) + txn.call_after( + self.get_aggregation_groups_for_event.invalidate_many, (parent_id,) + ) From b5c62c6b2643a138956af0b04521540c270a3e94 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 16 May 2019 10:18:53 +0100 Subject: [PATCH 142/429] Fix relations in worker mode --- synapse/replication/slave/storage/events.py | 13 ++++++++++--- synapse/replication/tcp/streams/_base.py | 1 + synapse/replication/tcp/streams/events.py | 11 ++++++----- synapse/storage/events.py | 12 ++++++++---- synapse/storage/relations.py | 4 +++- 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index b457c5563f..797450bc66 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -23,6 +23,7 @@ from synapse.replication.tcp.streams.events import ( from synapse.storage.event_federation import EventFederationWorkerStore from synapse.storage.event_push_actions import EventPushActionsWorkerStore from synapse.storage.events_worker import EventsWorkerStore +from synapse.storage.relations import RelationsWorkerStore from synapse.storage.roommember import RoomMemberWorkerStore from synapse.storage.signatures import SignatureWorkerStore from synapse.storage.state import StateGroupWorkerStore @@ -52,6 +53,7 @@ class SlavedEventStore(EventFederationWorkerStore, EventsWorkerStore, SignatureWorkerStore, UserErasureWorkerStore, + RelationsWorkerStore, BaseSlavedStore): def __init__(self, db_conn, hs): @@ -89,7 +91,7 @@ class SlavedEventStore(EventFederationWorkerStore, for row in rows: self.invalidate_caches_for_event( -token, row.event_id, row.room_id, row.type, row.state_key, - row.redacts, + row.redacts, row.relates_to, backfilled=True, ) return super(SlavedEventStore, self).process_replication_rows( @@ -102,7 +104,7 @@ class SlavedEventStore(EventFederationWorkerStore, if row.type == EventsStreamEventRow.TypeId: self.invalidate_caches_for_event( token, data.event_id, data.room_id, data.type, data.state_key, - data.redacts, + data.redacts, data.relates_to, backfilled=False, ) elif row.type == EventsStreamCurrentStateRow.TypeId: @@ -114,7 +116,8 @@ class SlavedEventStore(EventFederationWorkerStore, raise Exception("Unknown events stream row type %s" % (row.type, )) def invalidate_caches_for_event(self, stream_ordering, event_id, room_id, - etype, state_key, redacts, backfilled): + etype, state_key, redacts, relates_to, + backfilled): self._invalidate_get_event_cache(event_id) self.get_latest_event_ids_in_room.invalidate((room_id,)) @@ -136,3 +139,7 @@ class SlavedEventStore(EventFederationWorkerStore, state_key, stream_ordering ) self.get_invited_rooms_for_user.invalidate((state_key,)) + + if relates_to: + self.get_relations_for_event.invalidate_many((relates_to,)) + self.get_aggregation_groups_for_event.invalidate_many((relates_to,)) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 8971a6a22e..b6ce7a7bee 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -32,6 +32,7 @@ BackfillStreamRow = namedtuple("BackfillStreamRow", ( "type", # str "state_key", # str, optional "redacts", # str, optional + "relates_to", # str, optional )) PresenceStreamRow = namedtuple("PresenceStreamRow", ( "user_id", # str diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index e0f6e29248..f1290d022a 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -80,11 +80,12 @@ class BaseEventsStreamRow(object): class EventsStreamEventRow(BaseEventsStreamRow): TypeId = "ev" - event_id = attr.ib() # str - room_id = attr.ib() # str - type = attr.ib() # str - state_key = attr.ib() # str, optional - redacts = attr.ib() # str, optional + event_id = attr.ib() # str + room_id = attr.ib() # str + type = attr.ib() # str + state_key = attr.ib() # str, optional + redacts = attr.ib() # str, optional + relates_to = attr.ib() # str, optional @attr.s(slots=True, frozen=True) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6802bf42ce..b025ebc926 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1657,10 +1657,11 @@ class EventsStore( def get_all_new_forward_event_rows(txn): sql = ( "SELECT e.stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts" + " state_key, redacts, relates_to_id" " FROM events AS e" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" " WHERE ? < stream_ordering AND stream_ordering <= ?" " ORDER BY stream_ordering ASC" " LIMIT ?" @@ -1675,11 +1676,12 @@ class EventsStore( sql = ( "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts" + " state_key, redacts, relates_to_id" " FROM events AS e" " INNER JOIN ex_outlier_stream USING (event_id)" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" " WHERE ? < event_stream_ordering" " AND event_stream_ordering <= ?" " ORDER BY event_stream_ordering DESC" @@ -1700,10 +1702,11 @@ class EventsStore( def get_all_new_backfill_event_rows(txn): sql = ( "SELECT -e.stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts" + " state_key, redacts, relates_to_id" " FROM events AS e" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" " WHERE ? > stream_ordering AND stream_ordering >= ?" " ORDER BY stream_ordering ASC" " LIMIT ?" @@ -1718,11 +1721,12 @@ class EventsStore( sql = ( "SELECT -event_stream_ordering, e.event_id, e.room_id, e.type," - " state_key, redacts" + " state_key, redacts, relates_to_id" " FROM events AS e" " INNER JOIN ex_outlier_stream USING (event_id)" " LEFT JOIN redactions USING (event_id)" " LEFT JOIN state_events USING (event_id)" + " LEFT JOIN event_relations USING (event_id)" " WHERE ? > event_stream_ordering" " AND event_stream_ordering >= ?" " ORDER BY event_stream_ordering DESC" diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 1fd3d4fafc..732418ec65 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -109,7 +109,7 @@ class AggregationPaginationToken(object): return "%d-%d" % (self.count, self.stream) -class RelationsStore(SQLBaseStore): +class RelationsWorkerStore(SQLBaseStore): @cached(tree=True) def get_relations_for_event( self, @@ -318,6 +318,8 @@ class RelationsStore(SQLBaseStore): "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn ) + +class RelationsStore(RelationsWorkerStore): def _handle_event_relations(self, txn, event): """Handles inserting relation data during peristence of events From 4a6d5de98c7b697d2368521a0bb22b9d721dbb38 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Thu, 16 May 2019 13:23:43 +0100 Subject: [PATCH 143/429] Make /sync attempt to return device updates for both joined and invited users (#3484) --- changelog.d/3484.misc | 1 + synapse/handlers/sync.py | 44 +++++++++++++++++++++++----------------- 2 files changed, 26 insertions(+), 19 deletions(-) create mode 100644 changelog.d/3484.misc diff --git a/changelog.d/3484.misc b/changelog.d/3484.misc new file mode 100644 index 0000000000..3645849844 --- /dev/null +++ b/changelog.d/3484.misc @@ -0,0 +1 @@ +Make /sync attempt to return device updates for both joined and invited users. Note that this doesn't currently work correctly due to other bugs. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 153312e39f..1ee9a6e313 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -934,7 +934,7 @@ class SyncHandler(object): res = yield self._generate_sync_entry_for_rooms( sync_result_builder, account_data_by_room ) - newly_joined_rooms, newly_joined_users, _, _ = res + newly_joined_rooms, newly_joined_or_invited_users, _, _ = res _, _, newly_left_rooms, newly_left_users = res block_all_presence_data = ( @@ -943,7 +943,7 @@ class SyncHandler(object): ) if self.hs_config.use_presence and not block_all_presence_data: yield self._generate_sync_entry_for_presence( - sync_result_builder, newly_joined_rooms, newly_joined_users + sync_result_builder, newly_joined_rooms, newly_joined_or_invited_users ) yield self._generate_sync_entry_for_to_device(sync_result_builder) @@ -951,7 +951,7 @@ class SyncHandler(object): device_lists = yield self._generate_sync_entry_for_device_list( sync_result_builder, newly_joined_rooms=newly_joined_rooms, - newly_joined_users=newly_joined_users, + newly_joined_or_invited_users=newly_joined_or_invited_users, newly_left_rooms=newly_left_rooms, newly_left_users=newly_left_users, ) @@ -1036,7 +1036,8 @@ class SyncHandler(object): @measure_func("_generate_sync_entry_for_device_list") @defer.inlineCallbacks def _generate_sync_entry_for_device_list(self, sync_result_builder, - newly_joined_rooms, newly_joined_users, + newly_joined_rooms, + newly_joined_or_invited_users, newly_left_rooms, newly_left_users): user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token @@ -1050,7 +1051,7 @@ class SyncHandler(object): # share a room with? for room_id in newly_joined_rooms: joined_users = yield self.state.get_current_users_in_room(room_id) - newly_joined_users.update(joined_users) + newly_joined_or_invited_users.update(joined_users) for room_id in newly_left_rooms: left_users = yield self.state.get_current_users_in_room(room_id) @@ -1058,7 +1059,7 @@ class SyncHandler(object): # TODO: Check that these users are actually new, i.e. either they # weren't in the previous sync *or* they left and rejoined. - changed.update(newly_joined_users) + changed.update(newly_joined_or_invited_users) if not changed and not newly_left_users: defer.returnValue(DeviceLists( @@ -1176,7 +1177,7 @@ class SyncHandler(object): @defer.inlineCallbacks def _generate_sync_entry_for_presence(self, sync_result_builder, newly_joined_rooms, - newly_joined_users): + newly_joined_or_invited_users): """Generates the presence portion of the sync response. Populates the `sync_result_builder` with the result. @@ -1184,8 +1185,9 @@ class SyncHandler(object): sync_result_builder(SyncResultBuilder) newly_joined_rooms(list): List of rooms that the user has joined since the last sync (or empty if an initial sync) - newly_joined_users(list): List of users that have joined rooms - since the last sync (or empty if an initial sync) + newly_joined_or_invited_users(list): List of users that have joined + or been invited to rooms since the last sync (or empty if an initial + sync) """ now_token = sync_result_builder.now_token sync_config = sync_result_builder.sync_config @@ -1211,7 +1213,7 @@ class SyncHandler(object): "presence_key", presence_key ) - extra_users_ids = set(newly_joined_users) + extra_users_ids = set(newly_joined_or_invited_users) for room_id in newly_joined_rooms: users = yield self.state.get_current_users_in_room(room_id) extra_users_ids.update(users) @@ -1243,7 +1245,8 @@ class SyncHandler(object): Returns: Deferred(tuple): Returns a 4-tuple of - `(newly_joined_rooms, newly_joined_users, newly_left_rooms, newly_left_users)` + `(newly_joined_rooms, newly_joined_or_invited_users, + newly_left_rooms, newly_left_users)` """ user_id = sync_result_builder.sync_config.user.to_string() block_all_room_ephemeral = ( @@ -1314,8 +1317,8 @@ class SyncHandler(object): sync_result_builder.invited.extend(invited) - # Now we want to get any newly joined users - newly_joined_users = set() + # Now we want to get any newly joined or invited users + newly_joined_or_invited_users = set() newly_left_users = set() if since_token: for joined_sync in sync_result_builder.joined: @@ -1324,19 +1327,22 @@ class SyncHandler(object): ) for event in it: if event.type == EventTypes.Member: - if event.membership == Membership.JOIN: - newly_joined_users.add(event.state_key) + if ( + event.membership == Membership.JOIN or + event.membership == Membership.INVITE + ): + newly_joined_or_invited_users.add(event.state_key) else: prev_content = event.unsigned.get("prev_content", {}) prev_membership = prev_content.get("membership", None) if prev_membership == Membership.JOIN: newly_left_users.add(event.state_key) - newly_left_users -= newly_joined_users + newly_left_users -= newly_joined_or_invited_users defer.returnValue(( newly_joined_rooms, - newly_joined_users, + newly_joined_or_invited_users, newly_left_rooms, newly_left_users, )) @@ -1381,7 +1387,7 @@ class SyncHandler(object): where: room_entries is a list [RoomSyncResultBuilder] invited_rooms is a list [InvitedSyncResult] - newly_joined rooms is a list[str] of room ids + newly_joined_rooms is a list[str] of room ids newly_left_rooms is a list[str] of room ids """ user_id = sync_result_builder.sync_config.user.to_string() @@ -1422,7 +1428,7 @@ class SyncHandler(object): if room_id in sync_result_builder.joined_room_ids and non_joins: # Always include if the user (re)joined the room, especially # important so that device list changes are calculated correctly. - # If there are non join member events, but we are still in the room, + # If there are non-join member events, but we are still in the room, # then the user must have left and joined newly_joined_rooms.append(room_id) From 95f3fcda3c398bc82825694bde143a98940194c6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 16 May 2019 14:19:06 +0100 Subject: [PATCH 144/429] Check that event is visible in new APIs --- synapse/rest/client/v2_alpha/relations.py | 17 +++++++++++++++-- tests/rest/client/v2_alpha/test_relations.py | 2 +- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index f468409b68..1b53e638eb 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -131,6 +131,7 @@ class RelationPaginationServlet(RestServlet): self.store = hs.get_datastore() self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() + self.event_handler = hs.get_event_handler() @defer.inlineCallbacks def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=None): @@ -140,6 +141,10 @@ class RelationPaginationServlet(RestServlet): room_id, requester.user.to_string() ) + # This checks that a) the event exists and b) the user is allowed to + # view it. + yield self.event_handler.get_event(requester.user, room_id, parent_id) + limit = parse_integer(request, "limit", default=5) from_token = parse_string(request, "from") to_token = parse_string(request, "to") @@ -195,6 +200,7 @@ class RelationAggregationPaginationServlet(RestServlet): super(RelationAggregationPaginationServlet, self).__init__() self.auth = hs.get_auth() self.store = hs.get_datastore() + self.event_handler = hs.get_event_handler() @defer.inlineCallbacks def on_GET(self, request, room_id, parent_id, relation_type=None, event_type=None): @@ -204,6 +210,10 @@ class RelationAggregationPaginationServlet(RestServlet): room_id, requester.user.to_string() ) + # This checks that a) the event exists and b) the user is allowed to + # view it. + yield self.event_handler.get_event(requester.user, room_id, parent_id) + if relation_type not in (RelationTypes.ANNOTATION, None): raise SynapseError(400, "Relation type must be 'annotation'") @@ -258,6 +268,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): self.store = hs.get_datastore() self.clock = hs.get_clock() self._event_serializer = hs.get_event_client_serializer() + self.event_handler = hs.get_event_handler() @defer.inlineCallbacks def on_GET(self, request, room_id, parent_id, relation_type, event_type, key): @@ -267,6 +278,10 @@ class RelationAggregationGroupPaginationServlet(RestServlet): room_id, requester.user.to_string() ) + # This checks that a) the event exists and b) the user is allowed to + # view it. + yield self.event_handler.get_event(requester.user, room_id, parent_id) + if relation_type != RelationTypes.ANNOTATION: raise SynapseError(400, "Relation type must be 'annotation'") @@ -296,8 +311,6 @@ class RelationAggregationGroupPaginationServlet(RestServlet): defer.returnValue((200, return_value)) - defer.returnValue((200, return_value)) - def register_servlets(hs, http_server): RelationSendServlet(hs).register(http_server) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 661dd45755..775622bd2b 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -296,7 +296,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/m.replaces/%s?limit=1" + "/_matrix/client/unstable/rooms/%s/aggregations/%s/m.replace?limit=1" % (self.room, self.parent_id), ) self.render(request) From 2c662ddde4e1277a0dc17295748aa5f0c41fa163 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 16 May 2019 14:21:39 +0100 Subject: [PATCH 145/429] Indirect tuple conversion --- synapse/storage/relations.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 732418ec65..996cb6903a 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -81,6 +81,9 @@ class RelationPaginationToken(object): def to_string(self): return "%d-%d" % (self.topological, self.stream) + def as_tuple(self): + return attr.astuple(self) + @attr.s class AggregationPaginationToken(object): @@ -108,6 +111,9 @@ class AggregationPaginationToken(object): def to_string(self): return "%d-%d" % (self.count, self.stream) + def as_tuple(self): + return attr.astuple(self) + class RelationsWorkerStore(SQLBaseStore): @cached(tree=True) From 7a7eba8302d6566c044ca82c8ac0da65aa85e36b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 16 May 2019 14:24:58 +0100 Subject: [PATCH 146/429] Move parsing of tokens out of storage layer --- synapse/rest/client/v2_alpha/relations.py | 19 +++++++++++++++++++ synapse/storage/relations.py | 16 ++-------------- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 1b53e638eb..41e0a44936 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -32,6 +32,7 @@ from synapse.http.servlet import ( parse_string, ) from synapse.rest.client.transactions import HttpTransactionCache +from synapse.storage.relations import AggregationPaginationToken, RelationPaginationToken from ._base import client_v2_patterns @@ -149,6 +150,12 @@ class RelationPaginationServlet(RestServlet): from_token = parse_string(request, "from") to_token = parse_string(request, "to") + if from_token: + from_token = RelationPaginationToken.from_string(from_token) + + if to_token: + to_token = RelationPaginationToken.from_string(to_token) + result = yield self.store.get_relations_for_event( event_id=parent_id, relation_type=relation_type, @@ -221,6 +228,12 @@ class RelationAggregationPaginationServlet(RestServlet): from_token = parse_string(request, "from") to_token = parse_string(request, "to") + if from_token: + from_token = AggregationPaginationToken.from_string(from_token) + + if to_token: + to_token = AggregationPaginationToken.from_string(to_token) + res = yield self.store.get_aggregation_groups_for_event( event_id=parent_id, event_type=event_type, @@ -289,6 +302,12 @@ class RelationAggregationGroupPaginationServlet(RestServlet): from_token = parse_string(request, "from") to_token = parse_string(request, "to") + if from_token: + from_token = RelationPaginationToken.from_string(from_token) + + if to_token: + to_token = RelationPaginationToken.from_string(to_token) + result = yield self.store.get_relations_for_event( event_id=parent_id, relation_type=relation_type, diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 996cb6903a..de67e305a1 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -54,7 +54,7 @@ class PaginationChunk(object): return d -@attr.s +@attr.s(frozen=True, slots=True) class RelationPaginationToken(object): """Pagination token for relation pagination API. @@ -85,7 +85,7 @@ class RelationPaginationToken(object): return attr.astuple(self) -@attr.s +@attr.s(frozen=True, slots=True) class AggregationPaginationToken(object): """Pagination token for relation aggregation pagination API. @@ -151,12 +151,6 @@ class RelationsWorkerStore(SQLBaseStore): requested. The rows are of the form `{"event_id": "..."}`. """ - if from_token: - from_token = RelationPaginationToken.from_string(from_token) - - if to_token: - to_token = RelationPaginationToken.from_string(to_token) - where_clause = ["relates_to_id = ?"] where_args = [event_id] @@ -258,12 +252,6 @@ class RelationsWorkerStore(SQLBaseStore): match. Each row is a dict with `type`, `key` and `count` fields. """ - if from_token: - from_token = AggregationPaginationToken.from_string(from_token) - - if to_token: - to_token = AggregationPaginationToken.from_string(to_token) - where_clause = ["relates_to_id = ?", "relation_type = ?"] where_args = [event_id, RelationTypes.ANNOTATION] From cd32375846397ed15f27a4f6602bf20999d2b8b3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 May 2019 14:26:41 +0100 Subject: [PATCH 147/429] Add option to disable per-room profiles --- synapse/config/server.py | 11 +++++++++++ synapse/handlers/room_member.py | 9 +++++++++ 2 files changed, 20 insertions(+) diff --git a/synapse/config/server.py b/synapse/config/server.py index 7874cd9da7..1b8968608e 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -173,6 +174,10 @@ class ServerConfig(Config): "require_membership_for_aliases", True, ) + # Whether to allow per-room membership profiles through the send of membership + # events with profile information that differ from the target's global profile. + self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) + self.listeners = [] for listener in config.get("listeners", []): if not isinstance(listener.get("port", None), int): @@ -566,6 +571,12 @@ class ServerConfig(Config): # Defaults to 'true'. # #require_membership_for_aliases: false + + # Whether to allow per-room membership profiles through the send of membership + # events with profile information that differ from the target's global profile. + # Defaults to 'true'. + # + #allow_per_room_profiles: false """ % locals() def read_arguments(self, args): diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 3e86b9c690..ffc588d454 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -73,6 +74,7 @@ class RoomMemberHandler(object): self.spam_checker = hs.get_spam_checker() self._server_notices_mxid = self.config.server_notices_mxid self._enable_lookup = hs.config.enable_3pid_lookup + self.allow_per_room_profiles = self.config.allow_per_room_profiles # This is only used to get at ratelimit function, and # maybe_kick_guest_users. It's fine there are multiple of these as @@ -357,6 +359,13 @@ class RoomMemberHandler(object): # later on. content = dict(content) + if not self.allow_per_room_profiles: + # Strip profile data, knowing that new profile data will be added to the + # event's content in event_creation_handler.create_event() using the target's + # global profile. + content.pop("displayname", None) + content.pop("avatar_url", None) + effective_membership_state = action if action in ["kick", "unban"]: effective_membership_state = "leave" From 54a582ed44372a8ecd0d20bf474e5ac81e140220 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 May 2019 15:09:16 +0100 Subject: [PATCH 148/429] Add test case --- tests/rest/client/v1/test_rooms.py | 68 +++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 6220172cde..7639648752 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -24,7 +24,7 @@ from twisted.internet import defer import synapse.rest.admin from synapse.api.constants import Membership -from synapse.rest.client.v1 import login, room +from synapse.rest.client.v1 import login, profile, room from tests import unittest @@ -936,3 +936,69 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): request, channel = self.make_request("GET", self.url, access_token=tok) self.render(request) self.assertEqual(channel.code, 200, channel.result) + + +class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + profile.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["allow_per_room_profiles"] = False + self.hs = self.setup_test_homeserver(config=config) + + return self.hs + + def prepare(self, reactor, clock, homeserver): + self.user_id = self.register_user("test", "test") + self.tok = self.login("test", "test") + + # Set a profile for the test user + self.displayname = "test user" + data = { + "displayname": self.displayname, + } + request_data = json.dumps(data) + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/profile/%s/displayname" % (self.user_id,), + request_data, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self.room_id = self.helper.create_room_as(self.user_id, tok=self.tok) + + def test_per_room_profile_forbidden(self): + data = { + "membership": "join", + "displayname": "other test user" + } + request_data = json.dumps(data) + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id), + request_data, + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + event_id = channel.json_body["event_id"] + + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/%s/event/%s" % (self.room_id, event_id), + access_token=self.tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + res_displayname = channel.json_body["content"]["displayname"] + self.assertEqual(res_displayname, self.displayname, channel.result) + From efdc55db7500df778ffa3a63c4864ae0241315f0 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 May 2019 15:10:24 +0100 Subject: [PATCH 149/429] Forgot copyright --- tests/rest/client/v1/test_rooms.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 7639648752..ffd28772b0 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From a5fe16c5a7cbff5106524791f2d7ab621ca13bb1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 May 2019 15:11:37 +0100 Subject: [PATCH 150/429] Changelog + sample config --- changelog.d/5196.feature | 1 + docs/sample_config.yaml | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/5196.feature diff --git a/changelog.d/5196.feature b/changelog.d/5196.feature new file mode 100644 index 0000000000..1ffb928f62 --- /dev/null +++ b/changelog.d/5196.feature @@ -0,0 +1 @@ +Add an option to disable per-room profiles. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 09ee0e8984..d218aefee5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -276,6 +276,12 @@ listeners: # #require_membership_for_aliases: false +# Whether to allow per-room membership profiles through the send of membership +# events with profile information that differ from the target's global profile. +# Defaults to 'true'. +# +#allow_per_room_profiles: false + ## TLS ## From cc8c139a39c6c5e095873372f6c35420d5bd1f99 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 May 2019 15:20:59 +0100 Subject: [PATCH 151/429] Lint --- tests/rest/client/v1/test_rooms.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index ffd28772b0..8df0a07824 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -984,7 +984,9 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): request_data = json.dumps(data) request, channel = self.make_request( "PUT", - "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % (self.room_id, self.user_id), + "/_matrix/client/r0/rooms/%s/state/m.room.member/%s" % ( + self.room_id, self.user_id, + ), request_data, access_token=self.tok, ) From 8f9ce1a8a22a6299b62eb0d6ab73cb95a4e01d20 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 16 May 2019 15:25:54 +0100 Subject: [PATCH 152/429] Lint --- tests/rest/client/v1/test_rooms.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 8df0a07824..5f75ad7579 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1004,4 +1004,3 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): res_displayname = channel.json_body["content"]["displayname"] self.assertEqual(res_displayname, self.displayname, channel.result) - From 895179a4dcc993af9702e3ab36e5d2157fdd0c26 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 16 May 2019 16:41:05 +0100 Subject: [PATCH 153/429] Update docstring --- synapse/storage/stream.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 163363c0c2..824e77c89e 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -77,6 +77,15 @@ def generate_pagination_where_clause( would be generated for dir=b, from_token=(6, 7) and to_token=(5, 3). + Note that tokens are considered to be after the row they are in, e.g. if + a row A has a token T, then we consider A to be before T. This covention + is important when figuring out inequalities for the generated SQL, and + produces the following result: + - If paginatiting forwards then we exclude any rows matching the from + token, but include those that match the to token. + - If paginatiting backwards then we include any rows matching the from + token, but include those that match the to token. + Args: direction (str): Whether we're paginating backwards("b") or forwards ("f"). @@ -131,7 +140,9 @@ def _make_generic_sql_bound(bound, column_names, values, engine): names (tuple[str, str]): The column names. Must *not* be user defined as these get inserted directly into the SQL statement without escapes. - values (tuple[int, int]): The values to bound the columns by. + values (tuple[int|None, int]): The values to bound the columns by. If + the first value is None then only creates a bound on the second + column. engine: The database engine to generate the SQL for Returns: From d46aab3fa8bc02d8db9616490e849b9443fed8e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 May 2019 16:59:21 +0100 Subject: [PATCH 154/429] Add basic editing support --- synapse/events/utils.py | 30 ++++++- synapse/replication/slave/storage/events.py | 1 + synapse/storage/relations.py | 60 ++++++++++++- tests/rest/client/v2_alpha/test_relations.py | 91 ++++++++++++++++++-- 4 files changed, 167 insertions(+), 15 deletions(-) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 16d0c64372..2019ce9b1c 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -346,7 +346,7 @@ class EventClientSerializer(object): defer.returnValue(event) event_id = event.event_id - event = serialize_event(event, time_now, **kwargs) + serialized_event = serialize_event(event, time_now, **kwargs) # If MSC1849 is enabled then we need to look if thre are any relations # we need to bundle in with the event @@ -359,14 +359,36 @@ class EventClientSerializer(object): ) if annotations.chunk: - r = event["unsigned"].setdefault("m.relations", {}) + r = serialized_event["unsigned"].setdefault("m.relations", {}) r[RelationTypes.ANNOTATION] = annotations.to_dict() if references.chunk: - r = event["unsigned"].setdefault("m.relations", {}) + r = serialized_event["unsigned"].setdefault("m.relations", {}) r[RelationTypes.REFERENCES] = references.to_dict() - defer.returnValue(event) + edit = None + if event.type == EventTypes.Message: + edit = yield self.store.get_applicable_edit( + event.event_id, event.type, event.sender, + ) + + if edit: + # If there is an edit replace the content, preserving existing + # relations. + + relations = event.content.get("m.relates_to") + serialized_event["content"] = edit.content.get("m.new_content", {}) + if relations: + serialized_event["content"]["m.relates_to"] = relations + else: + serialized_event["content"].pop("m.relates_to", None) + + r = serialized_event["unsigned"].setdefault("m.relations", {}) + r[RelationTypes.REPLACES] = { + "event_id": edit.event_id, + } + + defer.returnValue(serialized_event) def serialize_events(self, events, time_now, **kwargs): """Serializes multiple events. diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 797450bc66..857128b311 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -143,3 +143,4 @@ class SlavedEventStore(EventFederationWorkerStore, if relates_to: self.get_relations_for_event.invalidate_many((relates_to,)) self.get_aggregation_groups_for_event.invalidate_many((relates_to,)) + self.get_applicable_edit.invalidate_many((relates_to,)) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index de67e305a1..aa91e52733 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -17,11 +17,13 @@ import logging import attr -from synapse.api.constants import RelationTypes +from twisted.internet import defer + +from synapse.api.constants import EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore from synapse.storage.stream import generate_pagination_where_clause -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedInlineCallbacks logger = logging.getLogger(__name__) @@ -312,6 +314,59 @@ class RelationsWorkerStore(SQLBaseStore): "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn ) + @cachedInlineCallbacks(tree=True) + def get_applicable_edit(self, event_id, event_type, sender): + """Get the most recent edit (if any) that has happened for the given + event. + + Correctly handles checking whether edits were allowed to happen. + + Args: + event_id (str): The original event ID + event_type (str): The original event type + sender (str): The original event sender + + Returns: + Deferred[EventBase|None]: Returns the most recent edit, if any. + """ + + # We only allow edits for `m.room.message` events that have the same sender + # and event type. We can't assert these things during regular event auth so + # we have to do the post hoc. + + if event_type != EventTypes.Message: + return + + sql = """ + SELECT event_id, origin_server_ts FROM events + INNER JOIN event_relations USING (event_id) + WHERE + relates_to_id = ? + AND relation_type = ? + AND type = ? + AND sender = ? + ORDER by origin_server_ts DESC, event_id DESC + LIMIT 1 + """ + + def _get_applicable_edit_txn(txn): + txn.execute( + sql, (event_id, RelationTypes.REPLACES, event_type, sender) + ) + row = txn.fetchone() + if row: + return row[0] + + edit_id = yield self.runInteraction( + "get_applicable_edit", _get_applicable_edit_txn + ) + + if not edit_id: + return + + edit_event = yield self.get_event(edit_id, allow_none=True) + defer.returnValue(edit_event) + class RelationsStore(RelationsWorkerStore): def _handle_event_relations(self, txn, event): @@ -357,3 +412,4 @@ class RelationsStore(RelationsWorkerStore): txn.call_after( self.get_aggregation_groups_for_event.invalidate_many, (parent_id,) ) + txn.call_after(self.get_applicable_edit.invalidate_many, (parent_id,)) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 775622bd2b..b0e4c47ae3 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -14,6 +14,7 @@ # limitations under the License. import itertools +import json import six @@ -102,11 +103,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): # relation event we sent above. self.assertEquals(len(channel.json_body["chunk"]), 1, channel.json_body) self.assert_dict( - { - "event_id": annotation_id, - "sender": self.user_id, - "type": "m.reaction", - }, + {"event_id": annotation_id, "sender": self.user_id, "type": "m.reaction"}, channel.json_body["chunk"][0], ) @@ -330,8 +327,6 @@ class RelationsTestCase(unittest.HomeserverTestCase): self.render(request) self.assertEquals(200, channel.code, channel.json_body) - self.maxDiff = None - self.assertEquals( channel.json_body["unsigned"].get("m.relations"), { @@ -347,7 +342,84 @@ class RelationsTestCase(unittest.HomeserverTestCase): }, ) - def _send_relation(self, relation_type, event_type, key=None): + def test_edit(self): + """Test that a simple edit works. + """ + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACES, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + ) + self.assertEquals(200, channel.code, channel.json_body) + + edit_event_id = channel.json_body["event_id"] + + request, channel = self.make_request( + "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id) + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEquals(channel.json_body["content"], new_body) + + self.assertEquals( + channel.json_body["unsigned"].get("m.relations"), + {RelationTypes.REPLACES: {"event_id": edit_event_id}}, + ) + + def test_multi_edit(self): + """Test that multiple edits, including attempts by people who + shouldn't be allowed, are correctly handled. + """ + + channel = self._send_relation( + RelationTypes.REPLACES, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "First edit"}, + }, + ) + self.assertEquals(200, channel.code, channel.json_body) + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + channel = self._send_relation( + RelationTypes.REPLACES, + "m.room.message", + content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + ) + self.assertEquals(200, channel.code, channel.json_body) + + edit_event_id = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.REPLACES, + "m.room.message.WRONG_TYPE", + content={ + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": {"msgtype": "m.text", "body": "Edit, but wrong type"}, + }, + ) + self.assertEquals(200, channel.code, channel.json_body) + + request, channel = self.make_request( + "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id) + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEquals(channel.json_body["content"], new_body) + + self.assertEquals( + channel.json_body["unsigned"].get("m.relations"), + {RelationTypes.REPLACES: {"event_id": edit_event_id}}, + ) + + def _send_relation(self, relation_type, event_type, key=None, content={}): """Helper function to send a relation pointing at `self.parent_id` Args: @@ -355,6 +427,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): event_type (str): The type of the event to create key (str|None): The aggregation key used for m.annotation relation type. + content(dict|None): The content of the created event. Returns: FakeChannel @@ -367,7 +440,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): "POST", "/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s" % (self.room, self.parent_id, relation_type, event_type, query), - b"{}", + json.dumps(content).encode("utf-8"), ) self.render(request) return channel From f89f688a55e1ddffbfa3613ac333f43e81d4b995 Mon Sep 17 00:00:00 2001 From: PauRE Date: Thu, 16 May 2019 20:04:26 +0200 Subject: [PATCH 155/429] Fix image orientation when generating thumbnail (#5039) --- changelog.d/5039.bugfix | 1 + synapse/python_dependencies.py | 2 +- synapse/rest/media/v1/media_repository.py | 9 ++++++ synapse/rest/media/v1/thumbnailer.py | 35 +++++++++++++++++++++++ 4 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5039.bugfix diff --git a/changelog.d/5039.bugfix b/changelog.d/5039.bugfix new file mode 100644 index 0000000000..212cff7ae8 --- /dev/null +++ b/changelog.d/5039.bugfix @@ -0,0 +1 @@ +Fix image orientation when generating thumbnails (needs pillow>=4.3.0). Contributed by Pau Rodriguez-Estivill. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 2708f5e820..fdcfb90a7e 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -53,7 +53,7 @@ REQUIREMENTS = [ "pyasn1-modules>=0.0.7", "daemonize>=2.3.1", "bcrypt>=3.1.0", - "pillow>=3.1.2", + "pillow>=4.3.0", "sortedcontainers>=1.4.4", "psutil>=2.0.0", "pymacaroons>=0.13.0", diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index bdffa97805..8569677355 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -444,6 +444,9 @@ class MediaRepository(object): ) return + if thumbnailer.transpose_method is not None: + m_width, m_height = thumbnailer.transpose() + if t_method == "crop": t_byte_source = thumbnailer.crop(t_width, t_height, t_type) elif t_method == "scale": @@ -578,6 +581,12 @@ class MediaRepository(object): ) return + if thumbnailer.transpose_method is not None: + m_width, m_height = yield logcontext.defer_to_thread( + self.hs.get_reactor(), + thumbnailer.transpose + ) + # We deduplicate the thumbnail sizes by ignoring the cropped versions if # they have the same dimensions of a scaled one. thumbnails = {} diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index a4b26c2587..3efd0d80fc 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -20,6 +20,17 @@ import PIL.Image as Image logger = logging.getLogger(__name__) +EXIF_ORIENTATION_TAG = 0x0112 +EXIF_TRANSPOSE_MAPPINGS = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90 +} + class Thumbnailer(object): @@ -31,6 +42,30 @@ class Thumbnailer(object): def __init__(self, input_path): self.image = Image.open(input_path) self.width, self.height = self.image.size + self.transpose_method = None + try: + # We don't use ImageOps.exif_transpose since it crashes with big EXIF + image_exif = self.image._getexif() + if image_exif is not None: + image_orientation = image_exif.get(EXIF_ORIENTATION_TAG) + self.transpose_method = EXIF_TRANSPOSE_MAPPINGS.get(image_orientation) + except Exception as e: + # A lot of parsing errors can happen when parsing EXIF + logger.info("Error parsing image EXIF information: %s", e) + + def transpose(self): + """Transpose the image using its EXIF Orientation tag + + Returns: + Tuple[int, int]: (width, height) containing the new image size in pixels. + """ + if self.transpose_method is not None: + self.image = self.image.transpose(self.transpose_method) + self.width, self.height = self.image.size + self.transpose_method = None + # We don't need EXIF any more + self.image.info["exif"] = None + return self.image.size def aspect(self, max_width, max_height): """Calculate the largest size that preserves aspect ratio which From 7ce1f97a1353e7bd9232c22a20835e40fa5662e0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 17 May 2019 12:38:03 +0100 Subject: [PATCH 156/429] Stop telling people to install the optional dependencies. (#5197) * Stop telling people to install the optional dependencies. They're optional. Also update the postgres docs a bit for clarity(?) --- INSTALL.md | 4 ++-- changelog.d/5197.misc | 1 + docs/postgres.rst | 45 +++++++++++++++++++++---------------------- 3 files changed, 25 insertions(+), 25 deletions(-) create mode 100644 changelog.d/5197.misc diff --git a/INSTALL.md b/INSTALL.md index b88d826f6c..1934593148 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -35,7 +35,7 @@ virtualenv -p python3 ~/synapse/env source ~/synapse/env/bin/activate pip install --upgrade pip pip install --upgrade setuptools -pip install matrix-synapse[all] +pip install matrix-synapse ``` This will download Synapse from [PyPI](https://pypi.org/project/matrix-synapse) @@ -48,7 +48,7 @@ update flag: ``` source ~/synapse/env/bin/activate -pip install -U matrix-synapse[all] +pip install -U matrix-synapse ``` Before you can start Synapse, you will need to generate a configuration diff --git a/changelog.d/5197.misc b/changelog.d/5197.misc new file mode 100644 index 0000000000..fca1d86b2e --- /dev/null +++ b/changelog.d/5197.misc @@ -0,0 +1 @@ +Stop telling people to install the optional dependencies by default. diff --git a/docs/postgres.rst b/docs/postgres.rst index f7ebbed0c3..e81e10403f 100644 --- a/docs/postgres.rst +++ b/docs/postgres.rst @@ -3,6 +3,28 @@ Using Postgres Postgres version 9.4 or later is known to work. +Install postgres client libraries +================================= + +Synapse will require the python postgres client library in order to connect to +a postgres database. + +* If you are using the `matrix.org debian/ubuntu + packages <../INSTALL.md#matrixorg-packages>`_, + the necessary libraries will already be installed. + +* For other pre-built packages, please consult the documentation from the + relevant package. + +* If you installed synapse `in a virtualenv + <../INSTALL.md#installing-from-source>`_, you can install the library with:: + + ~/synapse/env/bin/pip install matrix-synapse[postgres] + + (substituting the path to your virtualenv for ``~/synapse/env``, if you used a + different path). You will require the postgres development files. These are in + the ``libpq-dev`` package on Debian-derived distributions. + Set up database =============== @@ -26,29 +48,6 @@ encoding use, e.g.:: This would create an appropriate database named ``synapse`` owned by the ``synapse_user`` user (which must already exist). -Set up client in Debian/Ubuntu -=========================== - -Postgres support depends on the postgres python connector ``psycopg2``. In the -virtual env:: - - sudo apt-get install libpq-dev - pip install psycopg2 - -Set up client in RHEL/CentOs 7 -============================== - -Make sure you have the appropriate version of postgres-devel installed. For a -postgres 9.4, use the postgres 9.4 packages from -[here](https://wiki.postgresql.org/wiki/YUM_Installation). - -As with Debian/Ubuntu, postgres support depends on the postgres python connector -``psycopg2``. In the virtual env:: - - sudo yum install postgresql-devel libpqxx-devel.x86_64 - export PATH=/usr/pgsql-9.4/bin/:$PATH - pip install psycopg2 - Tuning Postgres =============== From afb463fb7a1878cca5e96feec4e883ae500421ab Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 17 May 2019 12:56:46 +0100 Subject: [PATCH 157/429] Some vagrant hackery for testing the debs --- debian/test/.gitignore | 2 ++ debian/test/provision.sh | 23 +++++++++++++++++++++++ debian/test/stretch/Vagrantfile | 13 +++++++++++++ debian/test/xenial/Vagrantfile | 10 ++++++++++ 4 files changed, 48 insertions(+) create mode 100644 debian/test/.gitignore create mode 100644 debian/test/provision.sh create mode 100644 debian/test/stretch/Vagrantfile create mode 100644 debian/test/xenial/Vagrantfile diff --git a/debian/test/.gitignore b/debian/test/.gitignore new file mode 100644 index 0000000000..95eda73fcc --- /dev/null +++ b/debian/test/.gitignore @@ -0,0 +1,2 @@ +.vagrant +*.log diff --git a/debian/test/provision.sh b/debian/test/provision.sh new file mode 100644 index 0000000000..a5c7f59712 --- /dev/null +++ b/debian/test/provision.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# provisioning script for vagrant boxes for testing the matrix-synapse debs. +# +# Will install the most recent matrix-synapse-py3 deb for this platform from +# the /debs directory. + +set -e + +apt-get update +apt-get install -y lsb-release + +deb=`ls /debs/matrix-synapse-py3_*+$(lsb_release -cs)*.deb | sort | tail -n1` + +debconf-set-selections < Date: Fri, 17 May 2019 13:27:19 +0100 Subject: [PATCH 158/429] expose SlavedProfileStore to ClientReaderSlavedStore (#5200) * expose SlavedProfileStore to ClientReaderSlavedStore --- changelog.d/5200.bugfix | 1 + synapse/app/client_reader.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/5200.bugfix diff --git a/changelog.d/5200.bugfix b/changelog.d/5200.bugfix new file mode 100644 index 0000000000..f346c7b0cc --- /dev/null +++ b/changelog.d/5200.bugfix @@ -0,0 +1 @@ +Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 864f1eac48..707f7c2f07 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -29,6 +29,7 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource +from synapse.replication.slave.storage import SlavedProfileStore from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore @@ -83,6 +84,7 @@ class ClientReaderSlavedStore( SlavedTransactionStore, SlavedClientIpStore, BaseSlavedStore, + SlavedProfileStore, ): pass From 5dbff345094327e61fa9c1425ba0f955c9530ba7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 May 2019 15:48:04 +0100 Subject: [PATCH 159/429] Fixup bsaed on review comments --- synapse/events/utils.py | 4 +-- synapse/replication/slave/storage/events.py | 2 +- synapse/storage/relations.py | 32 +++++++++++---------- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 2019ce9b1c..bf3c8f8dc1 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -368,9 +368,7 @@ class EventClientSerializer(object): edit = None if event.type == EventTypes.Message: - edit = yield self.store.get_applicable_edit( - event.event_id, event.type, event.sender, - ) + edit = yield self.store.get_applicable_edit(event_id) if edit: # If there is an edit replace the content, preserving existing diff --git a/synapse/replication/slave/storage/events.py b/synapse/replication/slave/storage/events.py index 857128b311..a3952506c1 100644 --- a/synapse/replication/slave/storage/events.py +++ b/synapse/replication/slave/storage/events.py @@ -143,4 +143,4 @@ class SlavedEventStore(EventFederationWorkerStore, if relates_to: self.get_relations_for_event.invalidate_many((relates_to,)) self.get_aggregation_groups_for_event.invalidate_many((relates_to,)) - self.get_applicable_edit.invalidate_many((relates_to,)) + self.get_applicable_edit.invalidate((relates_to,)) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index aa91e52733..6e216066ab 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -19,7 +19,7 @@ import attr from twisted.internet import defer -from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.constants import RelationTypes from synapse.api.errors import SynapseError from synapse.storage._base import SQLBaseStore from synapse.storage.stream import generate_pagination_where_clause @@ -314,8 +314,8 @@ class RelationsWorkerStore(SQLBaseStore): "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn ) - @cachedInlineCallbacks(tree=True) - def get_applicable_edit(self, event_id, event_type, sender): + @cachedInlineCallbacks() + def get_applicable_edit(self, event_id): """Get the most recent edit (if any) that has happened for the given event. @@ -323,8 +323,6 @@ class RelationsWorkerStore(SQLBaseStore): Args: event_id (str): The original event ID - event_type (str): The original event type - sender (str): The original event sender Returns: Deferred[EventBase|None]: Returns the most recent edit, if any. @@ -332,26 +330,28 @@ class RelationsWorkerStore(SQLBaseStore): # We only allow edits for `m.room.message` events that have the same sender # and event type. We can't assert these things during regular event auth so - # we have to do the post hoc. - - if event_type != EventTypes.Message: - return + # we have to do the checks post hoc. + # Fetches latest edit that has the same type and sender as the + # original, and is an `m.room.message`. sql = """ - SELECT event_id, origin_server_ts FROM events + SELECT edit.event_id FROM events AS edit INNER JOIN event_relations USING (event_id) + INNER JOIN events AS original ON + original.event_id = relates_to_id + AND edit.type = original.type + AND edit.sender = original.sender WHERE relates_to_id = ? AND relation_type = ? - AND type = ? - AND sender = ? - ORDER by origin_server_ts DESC, event_id DESC + AND edit.type = 'm.room.message' + ORDER by edit.origin_server_ts DESC, edit.event_id DESC LIMIT 1 """ def _get_applicable_edit_txn(txn): txn.execute( - sql, (event_id, RelationTypes.REPLACES, event_type, sender) + sql, (event_id, RelationTypes.REPLACES,) ) row = txn.fetchone() if row: @@ -412,4 +412,6 @@ class RelationsStore(RelationsWorkerStore): txn.call_after( self.get_aggregation_groups_for_event.invalidate_many, (parent_id,) ) - txn.call_after(self.get_applicable_edit.invalidate_many, (parent_id,)) + + if rel_type == RelationTypes.REPLACES: + txn.call_after(self.get_applicable_edit.invalidate, (parent_id,)) From 8dd9cca8ead9fc0cf0789edf9fcfce04814c5eda Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 May 2019 16:40:51 +0100 Subject: [PATCH 160/429] Spelling and clarifications --- synapse/storage/stream.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 824e77c89e..529ad4ea79 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -78,12 +78,12 @@ def generate_pagination_where_clause( would be generated for dir=b, from_token=(6, 7) and to_token=(5, 3). Note that tokens are considered to be after the row they are in, e.g. if - a row A has a token T, then we consider A to be before T. This covention + a row A has a token T, then we consider A to be before T. This convention is important when figuring out inequalities for the generated SQL, and produces the following result: - - If paginatiting forwards then we exclude any rows matching the from + - If paginating forwards then we exclude any rows matching the from token, but include those that match the to token. - - If paginatiting backwards then we include any rows matching the from + - If paginating backwards then we include any rows matching the from token, but include those that match the to token. Args: @@ -92,8 +92,12 @@ def generate_pagination_where_clause( column_names (tuple[str, str]): The column names to bound. Must *not* be user defined as these get inserted directly into the SQL statement without escapes. - from_token (tuple[int, int]|None) - to_token (tuple[int, int]|None) + from_token (tuple[int, int]|None): The start point for the pagination. + This is an exclusive minimum bound if direction is "f", and an + inclusive maximum bound if direction is "b". + to_token (tuple[int, int]|None): The endpoint point for the pagination. + This is an inclusive maximum bound if direction is "f", and an + exclusive minimum bound if direction is "b". engine: The database engine to generate the clauses for Returns: From 291e1eea5eab04df79ad607ca23fb421f11a63ff Mon Sep 17 00:00:00 2001 From: bytepoets-blo Date: Fri, 17 May 2019 18:27:14 +0200 Subject: [PATCH 161/429] fix mapping of return values for get_or_register_3pid_guest (#5177) * fix mapping of return values for get_or_register_3pid_guest --- changelog.d/5177.bugfix | 1 + synapse/handlers/room_member.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5177.bugfix diff --git a/changelog.d/5177.bugfix b/changelog.d/5177.bugfix new file mode 100644 index 0000000000..c2f1644ae5 --- /dev/null +++ b/changelog.d/5177.bugfix @@ -0,0 +1 @@ +Fix 3pid guest invites. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ffc588d454..93ac986c86 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -944,7 +944,7 @@ class RoomMemberHandler(object): } if self.config.invite_3pid_guest: - guest_access_token, guest_user_id = yield self.get_or_register_3pid_guest( + guest_user_id, guest_access_token = yield self.get_or_register_3pid_guest( requester=requester, medium=medium, address=address, From d4ca533d702e9518244b8ca6e1861a7c8f64adf1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 May 2019 17:45:51 +0100 Subject: [PATCH 162/429] Make tests use different user for each reaction it sends As users aren't allowed to react with the same emoji more than once. --- tests/rest/client/v2_alpha/test_relations.py | 80 +++++++++++++++++--- 1 file changed, 68 insertions(+), 12 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index b0e4c47ae3..cd965167f8 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -19,19 +19,22 @@ import json import six from synapse.api.constants import EventTypes, RelationTypes +from synapse.rest import admin from synapse.rest.client.v1 import login, room -from synapse.rest.client.v2_alpha import relations +from synapse.rest.client.v2_alpha import register, relations from tests import unittest class RelationsTestCase(unittest.HomeserverTestCase): - user_id = "@alice:test" servlets = [ relations.register_servlets, room.register_servlets, login.register_servlets, + register.register_servlets, + admin.register_servlets_for_client_rest_resource, ] + hijack_auth = False def make_homeserver(self, reactor, clock): # We need to enable msc1849 support for aggregations @@ -40,8 +43,12 @@ class RelationsTestCase(unittest.HomeserverTestCase): return self.setup_test_homeserver(config=config) def prepare(self, reactor, clock, hs): - self.room = self.helper.create_room_as(self.user_id) - res = self.helper.send(self.room, body="Hi!") + self.user_id, self.user_token = self._create_user("alice") + self.user2_id, self.user2_token = self._create_user("bob") + + self.room = self.helper.create_room_as(self.user_id, tok=self.user_token) + self.helper.join(self.room, user=self.user2_id, tok=self.user2_token) + res = self.helper.send(self.room, body="Hi!", tok=self.user_token) self.parent_id = res["event_id"] def test_send_relation(self): @@ -55,7 +62,9 @@ class RelationsTestCase(unittest.HomeserverTestCase): event_id = channel.json_body["event_id"] request, channel = self.make_request( - "GET", "/rooms/%s/event/%s" % (self.room, event_id) + "GET", + "/rooms/%s/event/%s" % (self.room, event_id), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -95,6 +104,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1" % (self.room, self.parent_id), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -135,6 +145,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/unstable/rooms/%s/relations/%s?limit=1%s" % (self.room, self.parent_id, from_token), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -156,15 +167,32 @@ class RelationsTestCase(unittest.HomeserverTestCase): """Test that we can paginate annotation groups correctly. """ + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 + + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) + + idx = 0 sent_groups = {u"👍": 10, u"a": 7, u"b": 5, u"c": 3, u"d": 2, u"e": 1} for key in itertools.chain.from_iterable( itertools.repeat(key, num) for key, num in sent_groups.items() ): channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", key=key + RelationTypes.ANNOTATION, + "m.reaction", + key=key, + access_token=access_tokens[idx], ) self.assertEquals(200, channel.code, channel.json_body) + idx += 1 + idx %= len(access_tokens) + prev_token = None found_groups = {} for _ in range(20): @@ -176,6 +204,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/unstable/rooms/%s/aggregations/%s?limit=1%s" % (self.room, self.parent_id, from_token), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -236,6 +265,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): encoded_key, from_token, ), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -263,7 +293,9 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEquals(200, channel.code, channel.json_body) - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) self.assertEquals(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") @@ -273,6 +305,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/unstable/rooms/%s/aggregations/%s" % (self.room, self.parent_id), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -295,6 +328,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): "GET", "/_matrix/client/unstable/rooms/%s/aggregations/%s/m.replace?limit=1" % (self.room, self.parent_id), + access_token=self.user_token, ) self.render(request) self.assertEquals(400, channel.code, channel.json_body) @@ -307,7 +341,9 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") self.assertEquals(200, channel.code, channel.json_body) - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) self.assertEquals(200, channel.code, channel.json_body) channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") @@ -322,7 +358,9 @@ class RelationsTestCase(unittest.HomeserverTestCase): reply_2 = channel.json_body["event_id"] request, channel = self.make_request( - "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id) + "GET", + "/rooms/%s/event/%s" % (self.room, self.parent_id), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -357,7 +395,9 @@ class RelationsTestCase(unittest.HomeserverTestCase): edit_event_id = channel.json_body["event_id"] request, channel = self.make_request( - "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id) + "GET", + "/rooms/%s/event/%s" % (self.room, self.parent_id), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -407,7 +447,9 @@ class RelationsTestCase(unittest.HomeserverTestCase): self.assertEquals(200, channel.code, channel.json_body) request, channel = self.make_request( - "GET", "/rooms/%s/event/%s" % (self.room, self.parent_id) + "GET", + "/rooms/%s/event/%s" % (self.room, self.parent_id), + access_token=self.user_token, ) self.render(request) self.assertEquals(200, channel.code, channel.json_body) @@ -419,7 +461,9 @@ class RelationsTestCase(unittest.HomeserverTestCase): {RelationTypes.REPLACES: {"event_id": edit_event_id}}, ) - def _send_relation(self, relation_type, event_type, key=None, content={}): + def _send_relation( + self, relation_type, event_type, key=None, content={}, access_token=None + ): """Helper function to send a relation pointing at `self.parent_id` Args: @@ -428,10 +472,15 @@ class RelationsTestCase(unittest.HomeserverTestCase): key (str|None): The aggregation key used for m.annotation relation type. content(dict|None): The content of the created event. + access_token (str|None): The access token used to send the relation, + defaults to `self.user_token` Returns: FakeChannel """ + if not access_token: + access_token = self.user_token + query = "" if key: query = "?key=" + six.moves.urllib.parse.quote_plus(key.encode("utf-8")) @@ -441,6 +490,13 @@ class RelationsTestCase(unittest.HomeserverTestCase): "/_matrix/client/unstable/rooms/%s/send_relation/%s/%s/%s%s" % (self.room, self.parent_id, relation_type, event_type, query), json.dumps(content).encode("utf-8"), + access_token=access_token, ) self.render(request) return channel + + def _create_user(self, localpart): + user_id = self.register_user(localpart, "abc123") + access_token = self.login(localpart, "abc123") + + return user_id, access_token From b63cc325a9362874a13f0079589afccd7d108f1f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 17 May 2019 18:01:39 +0100 Subject: [PATCH 163/429] Only count aggregations from distinct senders As a user isn't allowed to send a single emoji more than once. --- synapse/storage/relations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 6e216066ab..42f587a7d8 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -280,7 +280,7 @@ class RelationsWorkerStore(SQLBaseStore): having_clause = "" sql = """ - SELECT type, aggregation_key, COUNT(*), MAX(stream_ordering) + SELECT type, aggregation_key, COUNT(DISTINCT sender), MAX(stream_ordering) FROM event_relations INNER JOIN events USING (event_id) WHERE {where_clause} From ad5b4074e1a84b1e50a97144fbf1902ddc89db73 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 17 May 2019 19:37:31 +0100 Subject: [PATCH 164/429] Add startup background job for account validity If account validity is enabled in the server's configuration, this job will run at startup as a background job and will stick an expiration date to any registered account missing one. --- synapse/storage/_base.py | 62 +++++++++++++++++++++ synapse/storage/registration.py | 16 ++---- tests/rest/client/v2_alpha/test_register.py | 55 ++++++++++++++++++ 3 files changed, 121 insertions(+), 12 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 983ce026e1..06d516c9e2 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -227,6 +229,8 @@ class SQLBaseStore(object): # A set of tables that are not safe to use native upserts in. self._unsafe_to_upsert_tables = set(UNIQUE_INDEX_BACKGROUND_UPDATES.keys()) + self._account_validity = self.hs.config.account_validity + # We add the user_directory_search table to the blacklist on SQLite # because the existing search table does not have an index, making it # unsafe to use native upserts. @@ -243,6 +247,14 @@ class SQLBaseStore(object): self._check_safe_to_upsert, ) + if self._account_validity.enabled: + self._clock.call_later( + 0.0, + run_as_background_process, + "account_validity_set_expiration_dates", + self._set_expiration_date_when_missing, + ) + @defer.inlineCallbacks def _check_safe_to_upsert(self): """ @@ -275,6 +287,56 @@ class SQLBaseStore(object): self._check_safe_to_upsert, ) + @defer.inlineCallbacks + def _set_expiration_date_when_missing(self): + """ + Retrieves the list of registered users that don't have an expiration date, and + adds an expiration date for each of them. + """ + + def select_users_with_no_expiration_date_txn(txn): + """Retrieves the list of registered users with no expiration date from the + database. + """ + sql = ( + "SELECT users.name FROM users" + " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" + " WHERE account_validity.user_id is NULL;" + ) + txn.execute(sql, []) + return self.cursor_to_dict(txn) + + res = yield self.runInteraction( + "get_users_with_no_expiration_date", + select_users_with_no_expiration_date_txn, + ) + + if res: + for user in res: + self.runInteraction( + "set_expiration_date_for_user_background", + self.set_expiration_date_for_user_txn, + user["name"], + ) + + def set_expiration_date_for_user_txn(self, txn, user_id): + """Sets an expiration date to the account with the given user ID. + + Args: + user_id (str): User ID to set an expiration date for. + """ + now_ms = self._clock.time_msec() + expiration_ts = now_ms + self._account_validity.period + self._simple_insert_txn( + txn, + "account_validity", + values={ + "user_id": user_id, + "expiration_ts_ms": expiration_ts, + "email_sent": False, + }, + ) + def start_profiling(self): self._previous_loop_ts = self._clock.time_msec() diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 03a06a83d6..4cf159ba81 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2014 - 2016 OpenMarket Ltd +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -725,17 +727,7 @@ class RegistrationStore( raise StoreError(400, "User ID already taken.", errcode=Codes.USER_IN_USE) if self._account_validity.enabled: - now_ms = self.clock.time_msec() - expiration_ts = now_ms + self._account_validity.period - self._simple_insert_txn( - txn, - "account_validity", - values={ - "user_id": user_id, - "expiration_ts_ms": expiration_ts, - "email_sent": False, - } - ) + self.set_expiration_date_for_user_txn(txn, user_id) if token: # it's possible for this to get a conflict, but only for a single user diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 65685883db..d4a1d4d50c 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -1,3 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import datetime import json import os @@ -409,3 +426,41 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): self.assertEquals(channel.result["code"], b"200", channel.result) self.assertEqual(len(self.email_attempts), 1) + + +class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + ] + + def make_homeserver(self, reactor, clock): + self.validity_period = 10 + + config = self.default_config() + + config["enable_registration"] = True + config["account_validity"] = { + "enabled": False, + } + + self.hs = self.setup_test_homeserver(config=config) + self.hs.config.account_validity.period = self.validity_period + + self.store = self.hs.get_datastore() + + return self.hs + + def test_background_job(self): + """ + Tests whether the account validity startup background job does the right thing, + which is sticking an expiration date to every account that doesn't already have + one. + """ + user_id = self.register_user("kermit", "user") + + now_ms = self.hs.clock.time_msec() + self.get_success(self.store._set_expiration_date_when_missing()) + + res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + self.assertEqual(res, now_ms + self.validity_period) From 99c4ec1eefa7266a7b6475a23284104f5792a1b6 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 17 May 2019 19:38:41 +0100 Subject: [PATCH 165/429] Changelog --- changelog.d/5204.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5204.feature diff --git a/changelog.d/5204.feature b/changelog.d/5204.feature new file mode 100644 index 0000000000..2a7212ca18 --- /dev/null +++ b/changelog.d/5204.feature @@ -0,0 +1 @@ +Stick an expiration date to any registered user missing one at startup if account validity is enabled. From 3787133c9e3fcf0e9b85700418bf03c48ec86ab3 Mon Sep 17 00:00:00 2001 From: ReidAnderson Date: Mon, 20 May 2019 05:20:08 -0500 Subject: [PATCH 166/429] Limit UserIds to a length that fits in a state key (#5198) --- changelog.d/5198.bugfix | 1 + synapse/api/constants.py | 3 +++ synapse/handlers/register.py | 11 ++++++++++- tests/handlers/test_register.py | 7 +++++++ 4 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5198.bugfix diff --git a/changelog.d/5198.bugfix b/changelog.d/5198.bugfix new file mode 100644 index 0000000000..c6b156f17d --- /dev/null +++ b/changelog.d/5198.bugfix @@ -0,0 +1 @@ +Prevent registration for user ids that are to long to fit into a state key. Contributed by Reid Anderson. \ No newline at end of file diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 8547a63535..c7bf95b426 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -23,6 +23,9 @@ MAX_DEPTH = 2**63 - 1 # the maximum length for a room alias is 255 characters MAX_ALIAS_LENGTH = 255 +# the maximum length for a user id is 255 characters +MAX_USERID_LENGTH = 255 + class Membership(object): diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index a51d11a257..e83ee24f10 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -19,7 +19,7 @@ import logging from twisted.internet import defer from synapse import types -from synapse.api.constants import LoginType +from synapse.api.constants import MAX_USERID_LENGTH, LoginType from synapse.api.errors import ( AuthError, Codes, @@ -123,6 +123,15 @@ class RegistrationHandler(BaseHandler): self.check_user_id_not_appservice_exclusive(user_id) + if len(user_id) > MAX_USERID_LENGTH: + raise SynapseError( + 400, + "User ID may not be longer than %s characters" % ( + MAX_USERID_LENGTH, + ), + Codes.INVALID_USERNAME + ) + users = yield self.store.get_users_by_id_case_insensitive(user_id) if users: if not guest_access_token: diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 1c253d0579..5ffba2ca7a 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -228,3 +228,10 @@ class RegistrationTestCase(unittest.HomeserverTestCase): def test_register_not_support_user(self): res = self.get_success(self.handler.register(localpart='user')) self.assertFalse(self.store.is_support_user(res[0])) + + def test_invalid_user_id_length(self): + invalid_user_id = "x" * 256 + self.get_failure( + self.handler.register(localpart=invalid_user_id), + SynapseError + ) From 935af0da380f39ba284b78054270331bdbad7712 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 May 2019 10:13:05 +0100 Subject: [PATCH 167/429] Correctly update aggregation counts after redaction --- synapse/storage/events.py | 3 ++ synapse/storage/relations.py | 17 +++++++++ tests/rest/client/v2_alpha/test_relations.py | 37 ++++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index b025ebc926..881d6d0126 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1325,6 +1325,9 @@ class EventsStore( txn, event.room_id, event.redacts ) + # Remove from relations table. + self._handle_redaction(txn, event.redacts) + # Update the event_forward_extremities, event_backward_extremities and # event_edges tables. self._handle_mult_prev_events( diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 6e216066ab..63e6185ee3 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -415,3 +415,20 @@ class RelationsStore(RelationsWorkerStore): if rel_type == RelationTypes.REPLACES: txn.call_after(self.get_applicable_edit.invalidate, (parent_id,)) + + def _handle_redaction(self, txn, redacted_event_id): + """Handles receiving a redaction and checking whether we need to remove + any redacted relations from the database. + + Args: + txn + redacted_event_id (str): The event that was redacted. + """ + + self._simple_delete_txn( + txn, + table="event_relations", + keyvalues={ + "event_id": redacted_event_id, + } + ) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index cd965167f8..3737cdc396 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -320,6 +320,43 @@ class RelationsTestCase(unittest.HomeserverTestCase): }, ) + def test_aggregation_redactions(self): + """Test that annotations get correctly aggregated after a redactions. + """ + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + to_redact_event_id = channel.json_body["event_id"] + + channel = self._send_relation( + RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token + ) + self.assertEquals(200, channel.code, channel.json_body) + + # Now lets redact the 'a' reaction + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/%s/redact/%s" % (self.room, to_redact_event_id), + access_token=self.user_token, + content={}, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + request, channel = self.make_request( + "GET", + "/_matrix/client/unstable/rooms/%s/aggregations/%s" + % (self.room, self.parent_id), + access_token=self.user_token, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.json_body) + + self.assertEquals( + channel.json_body, + {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, + ) + def test_aggregation_must_be_annotation(self): """Test that aggregations must be annotations. """ From 2ac9c965ddfb049251d0406f93c1104b227009e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 May 2019 12:32:26 +0100 Subject: [PATCH 168/429] Fixup comments --- tests/rest/client/v2_alpha/test_relations.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 3737cdc396..b3dc4bd5bc 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -321,7 +321,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): ) def test_aggregation_redactions(self): - """Test that annotations get correctly aggregated after a redactions. + """Test that annotations get correctly aggregated after a redaction. """ channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") @@ -333,7 +333,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): ) self.assertEquals(200, channel.code, channel.json_body) - # Now lets redact the 'a' reaction + # Now lets redact one of the 'a' reactions request, channel = self.make_request( "POST", "/_matrix/client/r0/rooms/%s/redact/%s" % (self.room, to_redact_event_id), From 06671057b626042ca4645a370cd62edcf4d6211b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 May 2019 12:38:20 +0100 Subject: [PATCH 169/429] Newsfile --- changelog.d/5198.feature | 1 - changelog.d/5209.feature | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/5198.feature create mode 100644 changelog.d/5209.feature diff --git a/changelog.d/5198.feature b/changelog.d/5198.feature deleted file mode 100644 index c8f624d172..0000000000 --- a/changelog.d/5198.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for reactions. diff --git a/changelog.d/5209.feature b/changelog.d/5209.feature new file mode 100644 index 0000000000..747098c166 --- /dev/null +++ b/changelog.d/5209.feature @@ -0,0 +1 @@ +Add experimental support for relations (aka reactions and edits). From 1dff859d6aed58561a2b5913e5c9b897bbd3599c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 May 2019 14:31:19 +0100 Subject: [PATCH 170/429] Rename relation types to match MSC --- synapse/api/constants.py | 4 ++-- synapse/events/utils.py | 6 +++--- synapse/storage/relations.py | 8 +++---- tests/rest/client/v2_alpha/test_relations.py | 22 ++++++++++---------- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 28862a1d25..6b347b1749 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -125,5 +125,5 @@ class RelationTypes(object): """The types of relations known to this server. """ ANNOTATION = "m.annotation" - REPLACES = "m.replaces" - REFERENCES = "m.references" + REPLACE = "m.replace" + REFERENCE = "m.reference" diff --git a/synapse/events/utils.py b/synapse/events/utils.py index bf3c8f8dc1..27a2a9ef98 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -355,7 +355,7 @@ class EventClientSerializer(object): event_id, ) references = yield self.store.get_relations_for_event( - event_id, RelationTypes.REFERENCES, direction="f", + event_id, RelationTypes.REFERENCE, direction="f", ) if annotations.chunk: @@ -364,7 +364,7 @@ class EventClientSerializer(object): if references.chunk: r = serialized_event["unsigned"].setdefault("m.relations", {}) - r[RelationTypes.REFERENCES] = references.to_dict() + r[RelationTypes.REFERENCE] = references.to_dict() edit = None if event.type == EventTypes.Message: @@ -382,7 +382,7 @@ class EventClientSerializer(object): serialized_event["content"].pop("m.relates_to", None) r = serialized_event["unsigned"].setdefault("m.relations", {}) - r[RelationTypes.REPLACES] = { + r[RelationTypes.REPLACE] = { "event_id": edit.event_id, } diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 63e6185ee3..493abe405e 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -351,7 +351,7 @@ class RelationsWorkerStore(SQLBaseStore): def _get_applicable_edit_txn(txn): txn.execute( - sql, (event_id, RelationTypes.REPLACES,) + sql, (event_id, RelationTypes.REPLACE,) ) row = txn.fetchone() if row: @@ -384,8 +384,8 @@ class RelationsStore(RelationsWorkerStore): rel_type = relation.get("rel_type") if rel_type not in ( RelationTypes.ANNOTATION, - RelationTypes.REFERENCES, - RelationTypes.REPLACES, + RelationTypes.REFERENCE, + RelationTypes.REPLACE, ): # Unknown relation type return @@ -413,7 +413,7 @@ class RelationsStore(RelationsWorkerStore): self.get_aggregation_groups_for_event.invalidate_many, (parent_id,) ) - if rel_type == RelationTypes.REPLACES: + if rel_type == RelationTypes.REPLACE: txn.call_after(self.get_applicable_edit.invalidate, (parent_id,)) def _handle_redaction(self, txn, redacted_event_id): diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index b3dc4bd5bc..3d040cf118 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -363,8 +363,8 @@ class RelationsTestCase(unittest.HomeserverTestCase): request, channel = self.make_request( "GET", - "/_matrix/client/unstable/rooms/%s/aggregations/%s/m.replace?limit=1" - % (self.room, self.parent_id), + "/_matrix/client/unstable/rooms/%s/aggregations/%s/%s?limit=1" + % (self.room, self.parent_id, RelationTypes.REPLACE), access_token=self.user_token, ) self.render(request) @@ -386,11 +386,11 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") self.assertEquals(200, channel.code, channel.json_body) - channel = self._send_relation(RelationTypes.REFERENCES, "m.room.test") + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") self.assertEquals(200, channel.code, channel.json_body) reply_1 = channel.json_body["event_id"] - channel = self._send_relation(RelationTypes.REFERENCES, "m.room.test") + channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") self.assertEquals(200, channel.code, channel.json_body) reply_2 = channel.json_body["event_id"] @@ -411,7 +411,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): {"type": "m.reaction", "key": "b", "count": 1}, ] }, - RelationTypes.REFERENCES: { + RelationTypes.REFERENCE: { "chunk": [{"event_id": reply_1}, {"event_id": reply_2}] }, }, @@ -423,7 +423,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( - RelationTypes.REPLACES, + RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, ) @@ -443,7 +443,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): self.assertEquals( channel.json_body["unsigned"].get("m.relations"), - {RelationTypes.REPLACES: {"event_id": edit_event_id}}, + {RelationTypes.REPLACE: {"event_id": edit_event_id}}, ) def test_multi_edit(self): @@ -452,7 +452,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): """ channel = self._send_relation( - RelationTypes.REPLACES, + RelationTypes.REPLACE, "m.room.message", content={ "msgtype": "m.text", @@ -464,7 +464,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): new_body = {"msgtype": "m.text", "body": "I've been edited!"} channel = self._send_relation( - RelationTypes.REPLACES, + RelationTypes.REPLACE, "m.room.message", content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, ) @@ -473,7 +473,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): edit_event_id = channel.json_body["event_id"] channel = self._send_relation( - RelationTypes.REPLACES, + RelationTypes.REPLACE, "m.room.message.WRONG_TYPE", content={ "msgtype": "m.text", @@ -495,7 +495,7 @@ class RelationsTestCase(unittest.HomeserverTestCase): self.assertEquals( channel.json_body["unsigned"].get("m.relations"), - {RelationTypes.REPLACES: {"event_id": edit_event_id}}, + {RelationTypes.REPLACE: {"event_id": edit_event_id}}, ) def _send_relation( From d642178654a025664fd4ff984c1259a1b1271845 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 May 2019 14:32:16 +0100 Subject: [PATCH 171/429] Newsfile --- changelog.d/5211.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5211.feature diff --git a/changelog.d/5211.feature b/changelog.d/5211.feature new file mode 100644 index 0000000000..747098c166 --- /dev/null +++ b/changelog.d/5211.feature @@ -0,0 +1 @@ +Add experimental support for relations (aka reactions and edits). From 5206648a4a2c94543d46e5c22da6fd595b120eeb Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 20 May 2019 15:54:42 +0100 Subject: [PATCH 172/429] Add a test room version which updates event ID format (#5210) Implements MSC1884 --- changelog.d/5210.feature | 1 + synapse/api/room_versions.py | 13 +++++++++++-- synapse/events/__init__.py | 23 ++++++++++++++++++++++- 3 files changed, 34 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5210.feature diff --git a/changelog.d/5210.feature b/changelog.d/5210.feature new file mode 100644 index 0000000000..a7476bf9b9 --- /dev/null +++ b/changelog.d/5210.feature @@ -0,0 +1 @@ +Add a new room version which uses a new event ID format. diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index e77abe1040..485b3d0237 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -19,13 +19,15 @@ class EventFormatVersions(object): """This is an internal enum for tracking the version of the event format, independently from the room version. """ - V1 = 1 # $id:server format - V2 = 2 # MSC1659-style $hash format: introduced for room v3 + V1 = 1 # $id:server event id format + V2 = 2 # MSC1659-style $hash event id format: introduced for room v3 + V3 = 3 # MSC1884-style $hash format: introduced for room v4 KNOWN_EVENT_FORMAT_VERSIONS = { EventFormatVersions.V1, EventFormatVersions.V2, + EventFormatVersions.V3, } @@ -75,6 +77,12 @@ class RoomVersions(object): EventFormatVersions.V2, StateResolutionVersions.V2, ) + EVENTID_NOSLASH_TEST = RoomVersion( + "eventid-noslash-test", + RoomDisposition.UNSTABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + ) # the version we will give rooms which are created on this server @@ -87,5 +95,6 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V2, RoomVersions.V3, RoomVersions.STATE_V2_TEST, + RoomVersions.EVENTID_NOSLASH_TEST, ) } # type: dict[str, RoomVersion] diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 12056d5be2..badeb903fc 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -335,13 +335,32 @@ class FrozenEventV2(EventBase): return self.__repr__() def __repr__(self): - return "" % ( + return "<%s event_id='%s', type='%s', state_key='%s'>" % ( + self.__class__.__name__, self.event_id, self.get("type", None), self.get("state_key", None), ) +class FrozenEventV3(FrozenEventV2): + """FrozenEventV3, which differs from FrozenEventV2 only in the event_id format""" + format_version = EventFormatVersions.V3 # All events of this type are V3 + + @property + def event_id(self): + # We have to import this here as otherwise we get an import loop which + # is hard to break. + from synapse.crypto.event_signing import compute_event_reference_hash + + if self._event_id: + return self._event_id + self._event_id = "$" + encode_base64( + compute_event_reference_hash(self)[1], urlsafe=True + ) + return self._event_id + + def room_version_to_event_format(room_version): """Converts a room version string to the event format @@ -376,6 +395,8 @@ def event_type_from_format_version(format_version): return FrozenEvent elif format_version == EventFormatVersions.V2: return FrozenEventV2 + elif format_version == EventFormatVersions.V3: + return FrozenEventV3 else: raise Exception( "No event format %r" % (format_version,) From 24b93b9c76e0623c231abf37b84e0b841879b890 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 20 May 2019 16:21:34 +0100 Subject: [PATCH 173/429] Revert "expose SlavedProfileStore to ClientReaderSlavedStore (#5200)" This reverts commit ce5bcefc609db40740c692bd53a1ef84ab675e8c. This caused: ``` Traceback (most recent call last): File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code exec(code, run_globals) File "/home/synapse/src/synapse/app/client_reader.py", line 32, in from synapse.replication.slave.storage import SlavedProfileStore ImportError: cannot import name 'SlavedProfileStore' from 'synapse.replication.slave.storage' (/home/synapse/src/synapse/replication/slave/storage/__init__.py) error starting synapse.app.client_reader('/home/synapse/config/workers/client_reader.yaml') (exit code: 1); see above for logs ``` --- changelog.d/5200.bugfix | 1 - synapse/app/client_reader.py | 2 -- 2 files changed, 3 deletions(-) delete mode 100644 changelog.d/5200.bugfix diff --git a/changelog.d/5200.bugfix b/changelog.d/5200.bugfix deleted file mode 100644 index f346c7b0cc..0000000000 --- a/changelog.d/5200.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 707f7c2f07..864f1eac48 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -29,7 +29,6 @@ from synapse.http.server import JsonResource from synapse.http.site import SynapseSite from synapse.metrics import RegistryProxy from synapse.metrics.resource import METRICS_PREFIX, MetricsResource -from synapse.replication.slave.storage import SlavedProfileStore from synapse.replication.slave.storage._base import BaseSlavedStore from synapse.replication.slave.storage.account_data import SlavedAccountDataStore from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore @@ -84,7 +83,6 @@ class ClientReaderSlavedStore( SlavedTransactionStore, SlavedClientIpStore, BaseSlavedStore, - SlavedProfileStore, ): pass From c7ec06e8a6909343f5860a5eecbe3aa69f03c151 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 May 2019 17:39:05 +0100 Subject: [PATCH 174/429] Block attempts to annotate the same event twice --- synapse/handlers/message.py | 16 ++++++- synapse/storage/relations.py | 48 ++++++++++++++++++-- tests/rest/client/v2_alpha/test_relations.py | 27 ++++++++++- 3 files changed, 86 insertions(+), 5 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 7b2c33a922..0c892c8dba 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -22,7 +22,7 @@ from canonicaljson import encode_canonical_json, json from twisted.internet import defer from twisted.internet.defer import succeed -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventTypes, Membership, RelationTypes from synapse.api.errors import ( AuthError, Codes, @@ -601,6 +601,20 @@ class EventCreationHandler(object): self.validator.validate_new(event) + # We now check that if this event is an annotation that the can't + # annotate the same way twice (e.g. stops users from liking an event + # multiple times). + relation = event.content.get("m.relates_to", {}) + if relation.get("rel_type") == RelationTypes.ANNOTATION: + relates_to = relation["event_id"] + aggregation_key = relation["key"] + + already_exists = yield self.store.has_user_annotated_event( + relates_to, event.type, aggregation_key, event.sender, + ) + if already_exists: + raise SynapseError(400, "Can't send same reaction twice") + logger.debug( "Created event %s", event.event_id, diff --git a/synapse/storage/relations.py b/synapse/storage/relations.py index 493abe405e..7d51b38d77 100644 --- a/synapse/storage/relations.py +++ b/synapse/storage/relations.py @@ -350,9 +350,7 @@ class RelationsWorkerStore(SQLBaseStore): """ def _get_applicable_edit_txn(txn): - txn.execute( - sql, (event_id, RelationTypes.REPLACE,) - ) + txn.execute(sql, (event_id, RelationTypes.REPLACE)) row = txn.fetchone() if row: return row[0] @@ -367,6 +365,50 @@ class RelationsWorkerStore(SQLBaseStore): edit_event = yield self.get_event(edit_id, allow_none=True) defer.returnValue(edit_event) + def has_user_annotated_event(self, parent_id, event_type, aggregation_key, sender): + """Check if a user has already annotated an event with the same key + (e.g. already liked an event). + + Args: + parent_id (str): The event being annotated + event_type (str): The event type of the annotation + aggregation_key (str): The aggregation key of the annotation + sender (str): The sender of the annotation + + Returns: + Deferred[bool] + """ + + sql = """ + SELECT 1 FROM event_relations + INNER JOIN events USING (event_id) + WHERE + relates_to_id = ? + AND relation_type = ? + AND type = ? + AND sender = ? + AND aggregation_key = ? + LIMIT 1; + """ + + def _get_if_user_has_annotated_event(txn): + txn.execute( + sql, + ( + parent_id, + RelationTypes.ANNOTATION, + event_type, + sender, + aggregation_key, + ), + ) + + return bool(txn.fetchone()) + + return self.runInteraction( + "get_if_user_has_annotated_event", _get_if_user_has_annotated_event + ) + class RelationsStore(RelationsWorkerStore): def _handle_event_relations(self, txn, event): diff --git a/tests/rest/client/v2_alpha/test_relations.py b/tests/rest/client/v2_alpha/test_relations.py index 3d040cf118..43b3049daa 100644 --- a/tests/rest/client/v2_alpha/test_relations.py +++ b/tests/rest/client/v2_alpha/test_relations.py @@ -90,6 +90,15 @@ class RelationsTestCase(unittest.HomeserverTestCase): channel = self._send_relation(RelationTypes.ANNOTATION, EventTypes.Member) self.assertEquals(400, channel.code, channel.json_body) + def test_deny_double_react(self): + """Test that we deny relations on membership events + """ + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(200, channel.code, channel.json_body) + + channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + self.assertEquals(400, channel.code, channel.json_body) + def test_basic_paginate_relations(self): """Tests that calling pagination API corectly the latest relations. """ @@ -234,14 +243,30 @@ class RelationsTestCase(unittest.HomeserverTestCase): """Test that we can paginate within an annotation group. """ + # We need to create ten separate users to send each reaction. + access_tokens = [self.user_token, self.user2_token] + idx = 0 + while len(access_tokens) < 10: + user_id, token = self._create_user("test" + str(idx)) + idx += 1 + + self.helper.join(self.room, user=user_id, tok=token) + access_tokens.append(token) + + idx = 0 expected_event_ids = [] for _ in range(10): channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", key=u"👍" + RelationTypes.ANNOTATION, + "m.reaction", + key=u"👍", + access_token=access_tokens[idx], ) self.assertEquals(200, channel.code, channel.json_body) expected_event_ids.append(channel.json_body["event_id"]) + idx += 1 + # Also send a different type of reaction so that we test we don't see it channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") self.assertEquals(200, channel.code, channel.json_body) From 0620dd49db505a7830486c8798eabc455764c79c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 May 2019 17:40:24 +0100 Subject: [PATCH 175/429] Newsfile --- changelog.d/5212.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5212.feature diff --git a/changelog.d/5212.feature b/changelog.d/5212.feature new file mode 100644 index 0000000000..747098c166 --- /dev/null +++ b/changelog.d/5212.feature @@ -0,0 +1 @@ +Add experimental support for relations (aka reactions and edits). From 5ceee46c6bd55376ff2188b6e674035d7da95f24 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 21 May 2019 13:38:51 +0100 Subject: [PATCH 176/429] Do the select and insert in a single transaction --- synapse/storage/_base.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 06d516c9e2..fa6839ceca 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -304,21 +304,17 @@ class SQLBaseStore(object): " WHERE account_validity.user_id is NULL;" ) txn.execute(sql, []) - return self.cursor_to_dict(txn) - res = yield self.runInteraction( + res = self.cursor_to_dict(txn) + if res: + for user in res: + self.set_expiration_date_for_user_txn(txn, user["name"]) + + yield self.runInteraction( "get_users_with_no_expiration_date", select_users_with_no_expiration_date_txn, ) - if res: - for user in res: - self.runInteraction( - "set_expiration_date_for_user_background", - self.set_expiration_date_for_user_txn, - user["name"], - ) - def set_expiration_date_for_user_txn(self, txn, user_id): """Sets an expiration date to the account with the given user ID. From 04d53794d6848cca2567d67f494ba8405d0bf1cf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 21 May 2019 13:47:25 +0100 Subject: [PATCH 177/429] Fix error handling for rooms whose versions are unknown. (#5219) If we remove support for a particular room version, we should behave more gracefully. This should make client requests fail with a 400 rather than a 500, and will ignore individiual PDUs in a federation transaction, rather than the whole transaction. --- changelog.d/5219.bugfix | 1 + synapse/api/errors.py | 18 ++++++++++++++++-- synapse/events/__init__.py | 8 ++++++-- synapse/events/builder.py | 6 +++--- synapse/federation/federation_server.py | 14 +++++++++++++- 5 files changed, 39 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5219.bugfix diff --git a/changelog.d/5219.bugfix b/changelog.d/5219.bugfix new file mode 100644 index 0000000000..c1e17adc5d --- /dev/null +++ b/changelog.d/5219.bugfix @@ -0,0 +1 @@ +Fix error handling for rooms whose versions are unknown. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index ff89259dec..e91697049c 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -328,9 +328,23 @@ class RoomKeysVersionError(SynapseError): self.current_version = current_version -class IncompatibleRoomVersionError(SynapseError): - """A server is trying to join a room whose version it does not support.""" +class UnsupportedRoomVersionError(SynapseError): + """The client's request to create a room used a room version that the server does + not support.""" + def __init__(self): + super(UnsupportedRoomVersionError, self).__init__( + code=400, + msg="Homeserver does not support this room version", + errcode=Codes.UNSUPPORTED_ROOM_VERSION, + ) + +class IncompatibleRoomVersionError(SynapseError): + """A server is trying to join a room whose version it does not support. + + Unlike UnsupportedRoomVersionError, it is specific to the case of the make_join + failing. + """ def __init__(self, room_version): super(IncompatibleRoomVersionError, self).__init__( code=400, diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index badeb903fc..1edd19cc13 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -21,6 +21,7 @@ import six from unpaddedbase64 import encode_base64 +from synapse.api.errors import UnsupportedRoomVersionError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, EventFormatVersions from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze @@ -369,12 +370,15 @@ def room_version_to_event_format(room_version): Returns: int + + Raises: + UnsupportedRoomVersionError if the room version is unknown """ v = KNOWN_ROOM_VERSIONS.get(room_version) if not v: - # We should have already checked version, so this should not happen - raise RuntimeError("Unrecognized room version %s" % (room_version,)) + # this can happen if support is withdrawn for a room version + raise UnsupportedRoomVersionError() return v.event_format diff --git a/synapse/events/builder.py b/synapse/events/builder.py index fba27177c7..1fe995f212 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -18,6 +18,7 @@ import attr from twisted.internet import defer from synapse.api.constants import MAX_DEPTH +from synapse.api.errors import UnsupportedRoomVersionError from synapse.api.room_versions import ( KNOWN_EVENT_FORMAT_VERSIONS, KNOWN_ROOM_VERSIONS, @@ -178,9 +179,8 @@ class EventBuilderFactory(object): """ v = KNOWN_ROOM_VERSIONS.get(room_version) if not v: - raise Exception( - "No event format defined for version %r" % (room_version,) - ) + # this can happen if support is withdrawn for a room version + raise UnsupportedRoomVersionError() return self.for_room_version(v, key_values) def for_room_version(self, room_version, key_values): diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index df60828dba..4c28c1dc3c 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -33,6 +33,7 @@ from synapse.api.errors import ( IncompatibleRoomVersionError, NotFoundError, SynapseError, + UnsupportedRoomVersionError, ) from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.crypto.event_signing import compute_event_signature @@ -198,11 +199,22 @@ class FederationServer(FederationBase): try: room_version = yield self.store.get_room_version(room_id) - format_ver = room_version_to_event_format(room_version) except NotFoundError: logger.info("Ignoring PDU for unknown room_id: %s", room_id) continue + try: + format_ver = room_version_to_event_format(room_version) + except UnsupportedRoomVersionError: + # this can happen if support for a given room version is withdrawn, + # so that we still get events for said room. + logger.info( + "Ignoring PDU for room %s with unknown version %s", + room_id, + room_version, + ) + continue + event = event_from_pdu_json(p, format_ver) pdus_by_room.setdefault(room_id, []).append(event) From de7672b78f4dc250f7cfe151f4185bf186f3728c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 13:54:09 +0100 Subject: [PATCH 178/429] Don't bundle events in /sync or /events As we'll send down the annotations too anyway, so this just ends up confusing clients. --- synapse/events/utils.py | 5 +++-- synapse/handlers/events.py | 3 +++ synapse/rest/client/v2_alpha/sync.py | 3 +++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 27a2a9ef98..e2d4384de1 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -330,12 +330,13 @@ class EventClientSerializer(object): ) @defer.inlineCallbacks - def serialize_event(self, event, time_now, **kwargs): + def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs): """Serializes a single event. Args: event (EventBase) time_now (int): The current time in milliseconds + bundle_aggregations (bool): Whether to bundle in related events **kwargs: Arguments to pass to `serialize_event` Returns: @@ -350,7 +351,7 @@ class EventClientSerializer(object): # If MSC1849 is enabled then we need to look if thre are any relations # we need to bundle in with the event - if self.experimental_msc1849_support_enabled: + if self.experimental_msc1849_support_enabled and bundle_aggregations: annotations = yield self.store.get_aggregation_groups_for_event( event_id, ) diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 6003ad9cca..eb525070cf 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -122,6 +122,9 @@ class EventStreamHandler(BaseHandler): chunks = yield self._event_serializer.serialize_events( events, time_now, as_client_event=as_client_event, + # We don't bundle "live" events, as otherwise clients + # will end up double counting annotations. + bundle_aggregations=False, ) chunk = { diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index c701e534e7..d3025025e3 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -358,6 +358,9 @@ class SyncRestServlet(RestServlet): def serialize(events): return self._event_serializer.serialize_events( events, time_now=time_now, + # We don't bundle "live" events, as otherwise clients + # will end up double counting annotations. + bundle_aggregations=False, token_id=token_id, event_format=event_formatter, only_event_fields=only_fields, From ef13dc4846793b8969fbe76ebc7b7f3c607c8c1a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 13:59:09 +0100 Subject: [PATCH 179/429] Newsfile --- changelog.d/5220.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5220.feature diff --git a/changelog.d/5220.feature b/changelog.d/5220.feature new file mode 100644 index 0000000000..747098c166 --- /dev/null +++ b/changelog.d/5220.feature @@ -0,0 +1 @@ +Add experimental support for relations (aka reactions and edits). From 384122efa84ed419e4e18f6156ded3d4ea4f43da Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 21 May 2019 14:39:36 +0100 Subject: [PATCH 180/429] Doc --- docs/sample_config.yaml | 8 ++++++++ synapse/config/registration.py | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d218aefee5..f658ec8ecd 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -747,6 +747,14 @@ uploads_path: "DATADIR/uploads" # link. ``%(app)s`` can be used as a placeholder for the ``app_name`` parameter # from the ``email`` section. # +# Once this feature is enabled, Synapse will look for registered users without an +# expiration date at startup and will add one to every account it found using the +# current settings at that time. +# This means that, if a validity period is set, and Synapse is restarted (it will +# then derive an expiration date from the current validity period), and some time +# after that the validity period changes and Synapse is restarted, the users' +# expiration dates won't be updated unless their account is manually renewed. +# #account_validity: # enabled: True # period: 6w diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 1309bce3ee..693288f938 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -123,6 +123,14 @@ class RegistrationConfig(Config): # link. ``%%(app)s`` can be used as a placeholder for the ``app_name`` parameter # from the ``email`` section. # + # Once this feature is enabled, Synapse will look for registered users without an + # expiration date at startup and will add one to every account it found using the + # current settings at that time. + # This means that, if a validity period is set, and Synapse is restarted (it will + # then derive an expiration date from the current validity period), and some time + # after that the validity period changes and Synapse is restarted, the users' + # expiration dates won't be updated unless their account is manually renewed. + # #account_validity: # enabled: True # period: 6w From 7b0e804a4a684a210abf5107e720582f68f464e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 15:21:38 +0100 Subject: [PATCH 181/429] Fix get_max_topological_token to never return None --- synapse/storage/stream.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 529ad4ea79..0b5f5f9663 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -592,8 +592,18 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) def get_max_topological_token(self, room_id, stream_key): + """Get the max topological token in a room that before given stream + ordering. + + Args: + room_id (str) + stream_key (int) + + Returns: + Deferred[int] + """ sql = ( - "SELECT max(topological_ordering) FROM events" + "SELECT coalesce(max(topological_ordering), 0) FROM events" " WHERE room_id = ? AND stream_ordering < ?" ) return self._execute( From c448f35de21119c877e31d2fc6c31de4dbe7db23 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 15:35:13 +0100 Subject: [PATCH 182/429] Newsfile --- changelog.d/5221.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5221.bugfix diff --git a/changelog.d/5221.bugfix b/changelog.d/5221.bugfix new file mode 100644 index 0000000000..03aa363d15 --- /dev/null +++ b/changelog.d/5221.bugfix @@ -0,0 +1 @@ +Fix race when backfilling in rooms with worker mode. From bab3eddac46a0d7bdfccfc980eececbf8f60f721 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 21 May 2019 15:58:01 +0100 Subject: [PATCH 183/429] Pin eliot to <1.8 on python 3.5.2 (#5218) * Pin eliot to <1.8 on python 3.5.2 Fixes https://github.com/matrix-org/synapse/issues/5199 * Add support for 'markers' to python_dependencies * tell xargs not to strip quotes --- changelog.d/5218.bugfix | 1 + synapse/python_dependencies.py | 40 +++++++++++++++++++++++++++++----- tox.ini | 2 +- 3 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 changelog.d/5218.bugfix diff --git a/changelog.d/5218.bugfix b/changelog.d/5218.bugfix new file mode 100644 index 0000000000..cd624ecfd0 --- /dev/null +++ b/changelog.d/5218.bugfix @@ -0,0 +1 @@ +Fix incompatibility between ACME support and Python 3.5.2. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index fdcfb90a7e..e3f828c4bb 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -16,7 +16,12 @@ import logging -from pkg_resources import DistributionNotFound, VersionConflict, get_distribution +from pkg_resources import ( + DistributionNotFound, + Requirement, + VersionConflict, + get_provider, +) logger = logging.getLogger(__name__) @@ -91,7 +96,13 @@ CONDITIONAL_REQUIREMENTS = { # ACME support is required to provision TLS certificates from authorities # that use the protocol, such as Let's Encrypt. - "acme": ["txacme>=0.9.2"], + "acme": [ + "txacme>=0.9.2", + + # txacme depends on eliot. Eliot 1.8.0 is incompatible with + # python 3.5.2, as per https://github.com/itamarst/eliot/issues/418 + 'eliot<1.8.0;python_version<"3.5.3"', + ], "saml2": ["pysaml2>=4.5.0"], "systemd": ["systemd-python>=231"], @@ -125,10 +136,10 @@ class DependencyException(Exception): @property def dependencies(self): for i in self.args[0]: - yield '"' + i + '"' + yield "'" + i + "'" -def check_requirements(for_feature=None, _get_distribution=get_distribution): +def check_requirements(for_feature=None): deps_needed = [] errors = [] @@ -139,7 +150,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution): for dependency in reqs: try: - _get_distribution(dependency) + _check_requirement(dependency) except VersionConflict as e: deps_needed.append(dependency) errors.append( @@ -157,7 +168,7 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution): for dependency in OPTS: try: - _get_distribution(dependency) + _check_requirement(dependency) except VersionConflict as e: deps_needed.append(dependency) errors.append( @@ -175,6 +186,23 @@ def check_requirements(for_feature=None, _get_distribution=get_distribution): raise DependencyException(deps_needed) +def _check_requirement(dependency_string): + """Parses a dependency string, and checks if the specified requirement is installed + + Raises: + VersionConflict if the requirement is installed, but with the the wrong version + DistributionNotFound if nothing is found to provide the requirement + """ + req = Requirement.parse(dependency_string) + + # first check if the markers specify that this requirement needs installing + if req.marker is not None and not req.marker.evaluate(): + # not required for this environment + return + + get_provider(req) + + if __name__ == "__main__": import sys diff --git a/tox.ini b/tox.ini index d0e519ce46..543b232ae7 100644 --- a/tox.ini +++ b/tox.ini @@ -94,7 +94,7 @@ commands = # Make all greater-thans equals so we test the oldest version of our direct # dependencies, but make the pyopenssl 17.0, which can work against an # OpenSSL 1.1 compiled cryptography (as older ones don't compile on Travis). - /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs pip install' + /bin/sh -c 'python -m synapse.python_dependencies | sed -e "s/>=/==/g" -e "s/psycopg2==2.6//" -e "s/pyopenssl==16.0.0/pyopenssl==17.0.0/" | xargs -d"\n" pip install' # Add this so that coverage will run on subprocesses /bin/sh -c 'echo "import coverage; coverage.process_startup()" > {envsitepackagesdir}/../sitecustomize.py' From c4aef549ad1f55661675a789f89fe9e041fac874 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 21 May 2019 16:10:54 +0100 Subject: [PATCH 184/429] Exclude soft-failed events from fwd-extremity candidates. (#5146) When considering the candidates to be forward-extremities, we must exclude soft failures. Hopefully fixes #5090. --- changelog.d/5146.bugfix | 1 + synapse/handlers/federation.py | 7 ++++++- synapse/storage/events.py | 9 +++++++-- 3 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5146.bugfix diff --git a/changelog.d/5146.bugfix b/changelog.d/5146.bugfix new file mode 100644 index 0000000000..a54abed92b --- /dev/null +++ b/changelog.d/5146.bugfix @@ -0,0 +1 @@ +Exclude soft-failed events from forward-extremity candidates: fixes "No forward extremities left!" error. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 0684778882..2202ed699a 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1916,6 +1916,11 @@ class FederationHandler(BaseHandler): event.room_id, latest_event_ids=extrem_ids, ) + logger.debug( + "Doing soft-fail check for %s: state %s", + event.event_id, current_state_ids, + ) + # Now check if event pass auth against said current state auth_types = auth_types_for_event(event) current_state_ids = [ @@ -1932,7 +1937,7 @@ class FederationHandler(BaseHandler): self.auth.check(room_version, event, auth_events=current_auth_events) except AuthError as e: logger.warn( - "Failed current state auth resolution for %r because %s", + "Soft-failing %r because %s", event, e, ) event.internal_metadata.soft_failed = True diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 881d6d0126..2ffc27ff41 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -575,10 +575,11 @@ class EventsStore( def _get_events(txn, batch): sql = """ - SELECT prev_event_id + SELECT prev_event_id, internal_metadata FROM event_edges INNER JOIN events USING (event_id) LEFT JOIN rejections USING (event_id) + LEFT JOIN event_json USING (event_id) WHERE prev_event_id IN (%s) AND NOT events.outlier @@ -588,7 +589,11 @@ class EventsStore( ) txn.execute(sql, batch) - results.extend(r[0] for r in txn) + results.extend( + r[0] + for r in txn + if not json.loads(r[1]).get("soft_failed") + ) for chunk in batch_iter(event_ids, 100): yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) From 17f68048374cda8bc639d7c048ae21624a124635 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 21 May 2019 16:22:54 +0100 Subject: [PATCH 185/429] Introduce room v4 which updates event ID format. (#5217) Implements https://github.com/matrix-org/matrix-doc/pull/2002. --- changelog.d/5210.feature | 2 +- changelog.d/5217.feature | 1 + synapse/api/room_versions.py | 8 ++++---- 3 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5217.feature diff --git a/changelog.d/5210.feature b/changelog.d/5210.feature index a7476bf9b9..c78325a6ac 100644 --- a/changelog.d/5210.feature +++ b/changelog.d/5210.feature @@ -1 +1 @@ -Add a new room version which uses a new event ID format. +Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). diff --git a/changelog.d/5217.feature b/changelog.d/5217.feature new file mode 100644 index 0000000000..c78325a6ac --- /dev/null +++ b/changelog.d/5217.feature @@ -0,0 +1 @@ +Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 485b3d0237..b2895355a8 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -77,9 +77,9 @@ class RoomVersions(object): EventFormatVersions.V2, StateResolutionVersions.V2, ) - EVENTID_NOSLASH_TEST = RoomVersion( - "eventid-noslash-test", - RoomDisposition.UNSTABLE, + V4 = RoomVersion( + "4", + RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, ) @@ -95,6 +95,6 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V2, RoomVersions.V3, RoomVersions.STATE_V2_TEST, - RoomVersions.EVENTID_NOSLASH_TEST, + RoomVersions.V4, ) } # type: dict[str, RoomVersion] From 44b8ba484e67c0f13ca566ef6776b95504bced12 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 16:51:45 +0100 Subject: [PATCH 186/429] Fix words --- synapse/handlers/message.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0c892c8dba..792edc7579 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -601,9 +601,9 @@ class EventCreationHandler(object): self.validator.validate_new(event) - # We now check that if this event is an annotation that the can't - # annotate the same way twice (e.g. stops users from liking an event - # multiple times). + # If this event is an annotation then we check that that the sender + # can't annotate the same way twice (e.g. stops users from liking an + # event multiple times). relation = event.content.get("m.relates_to", {}) if relation.get("rel_type") == RelationTypes.ANNOTATION: relates_to = relation["event_id"] From 959550b645a0752f6a8592d90fff44683b189523 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 May 2019 16:51:49 +0100 Subject: [PATCH 187/429] 0.99.5rc1 --- CHANGES.md | 39 +++++++++++++++++++++++++++++++++++++++ changelog.d/3484.misc | 1 - changelog.d/5039.bugfix | 1 - changelog.d/5043.feature | 1 - changelog.d/5146.bugfix | 1 - changelog.d/5171.misc | 1 - changelog.d/5174.bugfix | 1 - changelog.d/5177.bugfix | 1 - changelog.d/5181.feature | 1 - changelog.d/5183.misc | 1 - changelog.d/5184.misc | 1 - changelog.d/5185.misc | 1 - changelog.d/5187.bugfix | 1 - changelog.d/5190.feature | 1 - changelog.d/5191.misc | 1 - changelog.d/5196.feature | 1 - changelog.d/5197.misc | 1 - changelog.d/5198.bugfix | 1 - changelog.d/5204.feature | 1 - changelog.d/5209.feature | 1 - changelog.d/5210.feature | 1 - changelog.d/5211.feature | 1 - changelog.d/5217.feature | 1 - changelog.d/5218.bugfix | 1 - changelog.d/5219.bugfix | 1 - synapse/__init__.py | 2 +- 26 files changed, 40 insertions(+), 25 deletions(-) delete mode 100644 changelog.d/3484.misc delete mode 100644 changelog.d/5039.bugfix delete mode 100644 changelog.d/5043.feature delete mode 100644 changelog.d/5146.bugfix delete mode 100644 changelog.d/5171.misc delete mode 100644 changelog.d/5174.bugfix delete mode 100644 changelog.d/5177.bugfix delete mode 100644 changelog.d/5181.feature delete mode 100644 changelog.d/5183.misc delete mode 100644 changelog.d/5184.misc delete mode 100644 changelog.d/5185.misc delete mode 100644 changelog.d/5187.bugfix delete mode 100644 changelog.d/5190.feature delete mode 100644 changelog.d/5191.misc delete mode 100644 changelog.d/5196.feature delete mode 100644 changelog.d/5197.misc delete mode 100644 changelog.d/5198.bugfix delete mode 100644 changelog.d/5204.feature delete mode 100644 changelog.d/5209.feature delete mode 100644 changelog.d/5210.feature delete mode 100644 changelog.d/5211.feature delete mode 100644 changelog.d/5217.feature delete mode 100644 changelog.d/5218.bugfix delete mode 100644 changelog.d/5219.bugfix diff --git a/CHANGES.md b/CHANGES.md index 1e9c3cf953..d23166caa8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,42 @@ +Synapse 0.99.5rc1 (2019-05-21) +============================== + +Features +-------- + +- Add ability to blacklist IP ranges for the federation client. ([\#5043](https://github.com/matrix-org/synapse/issues/5043)) +- Ratelimiting configuration for clients sending messages and the federation server has been altered to match login ratelimiting. The old configuration names will continue working. Check the sample config for details of the new names. ([\#5181](https://github.com/matrix-org/synapse/issues/5181)) +- Drop support for the undocumented /_matrix/client/v2_alpha API prefix. ([\#5190](https://github.com/matrix-org/synapse/issues/5190)) +- Add an option to disable per-room profiles. ([\#5196](https://github.com/matrix-org/synapse/issues/5196)) +- Stick an expiration date to any registered user missing one at startup if account validity is enabled. ([\#5204](https://github.com/matrix-org/synapse/issues/5204)) +- Add experimental support for relations (aka reactions and edits). ([\#5209](https://github.com/matrix-org/synapse/issues/5209), [\#5211](https://github.com/matrix-org/synapse/issues/5211)) +- Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). ([\#5210](https://github.com/matrix-org/synapse/issues/5210), [\#5217](https://github.com/matrix-org/synapse/issues/5217)) + + +Bugfixes +-------- + +- Fix image orientation when generating thumbnails (needs pillow>=4.3.0). Contributed by Pau Rodriguez-Estivill. ([\#5039](https://github.com/matrix-org/synapse/issues/5039)) +- Exclude soft-failed events from forward-extremity candidates: fixes "No forward extremities left!" error. ([\#5146](https://github.com/matrix-org/synapse/issues/5146)) +- Re-order stages in registration flows such that msisdn and email verification are done last. ([\#5174](https://github.com/matrix-org/synapse/issues/5174)) +- Fix 3pid guest invites. ([\#5177](https://github.com/matrix-org/synapse/issues/5177)) +- Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session. ([\#5187](https://github.com/matrix-org/synapse/issues/5187)) +- Prevent registration for user ids that are to long to fit into a state key. Contributed by Reid Anderson. ([\#5198](https://github.com/matrix-org/synapse/issues/5198)) +- Fix incompatibility between ACME support and Python 3.5.2. ([\#5218](https://github.com/matrix-org/synapse/issues/5218)) +- Fix error handling for rooms whose versions are unknown. ([\#5219](https://github.com/matrix-org/synapse/issues/5219)) + + +Internal Changes +---------------- + +- Make /sync attempt to return device updates for both joined and invited users. Note that this doesn't currently work correctly due to other bugs. ([\#3484](https://github.com/matrix-org/synapse/issues/3484)) +- Update tests to consistently be configured via the same code that is used when loading from configuration files. ([\#5171](https://github.com/matrix-org/synapse/issues/5171), [\#5185](https://github.com/matrix-org/synapse/issues/5185)) +- Allow client event serialization to be async. ([\#5183](https://github.com/matrix-org/synapse/issues/5183)) +- Expose DataStore._get_events as get_events_as_list. ([\#5184](https://github.com/matrix-org/synapse/issues/5184)) +- Make generating SQL bounds for pagination generic. ([\#5191](https://github.com/matrix-org/synapse/issues/5191)) +- Stop telling people to install the optional dependencies by default. ([\#5197](https://github.com/matrix-org/synapse/issues/5197)) + + Synapse 0.99.4 (2019-05-15) =========================== diff --git a/changelog.d/3484.misc b/changelog.d/3484.misc deleted file mode 100644 index 3645849844..0000000000 --- a/changelog.d/3484.misc +++ /dev/null @@ -1 +0,0 @@ -Make /sync attempt to return device updates for both joined and invited users. Note that this doesn't currently work correctly due to other bugs. diff --git a/changelog.d/5039.bugfix b/changelog.d/5039.bugfix deleted file mode 100644 index 212cff7ae8..0000000000 --- a/changelog.d/5039.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix image orientation when generating thumbnails (needs pillow>=4.3.0). Contributed by Pau Rodriguez-Estivill. diff --git a/changelog.d/5043.feature b/changelog.d/5043.feature deleted file mode 100644 index 0f1e0ee30e..0000000000 --- a/changelog.d/5043.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability to blacklist IP ranges for the federation client. diff --git a/changelog.d/5146.bugfix b/changelog.d/5146.bugfix deleted file mode 100644 index a54abed92b..0000000000 --- a/changelog.d/5146.bugfix +++ /dev/null @@ -1 +0,0 @@ -Exclude soft-failed events from forward-extremity candidates: fixes "No forward extremities left!" error. diff --git a/changelog.d/5171.misc b/changelog.d/5171.misc deleted file mode 100644 index d148b03b51..0000000000 --- a/changelog.d/5171.misc +++ /dev/null @@ -1 +0,0 @@ -Update tests to consistently be configured via the same code that is used when loading from configuration files. diff --git a/changelog.d/5174.bugfix b/changelog.d/5174.bugfix deleted file mode 100644 index 0f26d46b2c..0000000000 --- a/changelog.d/5174.bugfix +++ /dev/null @@ -1 +0,0 @@ -Re-order stages in registration flows such that msisdn and email verification are done last. diff --git a/changelog.d/5177.bugfix b/changelog.d/5177.bugfix deleted file mode 100644 index c2f1644ae5..0000000000 --- a/changelog.d/5177.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix 3pid guest invites. diff --git a/changelog.d/5181.feature b/changelog.d/5181.feature deleted file mode 100644 index 5ce13aa2ea..0000000000 --- a/changelog.d/5181.feature +++ /dev/null @@ -1 +0,0 @@ -Ratelimiting configuration for clients sending messages and the federation server has been altered to match login ratelimiting. The old configuration names will continue working. Check the sample config for details of the new names. diff --git a/changelog.d/5183.misc b/changelog.d/5183.misc deleted file mode 100644 index a8970f29eb..0000000000 --- a/changelog.d/5183.misc +++ /dev/null @@ -1 +0,0 @@ -Allow client event serialization to be async. diff --git a/changelog.d/5184.misc b/changelog.d/5184.misc deleted file mode 100644 index 1588bdef6c..0000000000 --- a/changelog.d/5184.misc +++ /dev/null @@ -1 +0,0 @@ -Expose DataStore._get_events as get_events_as_list. diff --git a/changelog.d/5185.misc b/changelog.d/5185.misc deleted file mode 100644 index d148b03b51..0000000000 --- a/changelog.d/5185.misc +++ /dev/null @@ -1 +0,0 @@ -Update tests to consistently be configured via the same code that is used when loading from configuration files. diff --git a/changelog.d/5187.bugfix b/changelog.d/5187.bugfix deleted file mode 100644 index df176cf5b2..0000000000 --- a/changelog.d/5187.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session. diff --git a/changelog.d/5190.feature b/changelog.d/5190.feature deleted file mode 100644 index 34904aa7a8..0000000000 --- a/changelog.d/5190.feature +++ /dev/null @@ -1 +0,0 @@ -Drop support for the undocumented /_matrix/client/v2_alpha API prefix. diff --git a/changelog.d/5191.misc b/changelog.d/5191.misc deleted file mode 100644 index e0615fec9c..0000000000 --- a/changelog.d/5191.misc +++ /dev/null @@ -1 +0,0 @@ -Make generating SQL bounds for pagination generic. diff --git a/changelog.d/5196.feature b/changelog.d/5196.feature deleted file mode 100644 index 1ffb928f62..0000000000 --- a/changelog.d/5196.feature +++ /dev/null @@ -1 +0,0 @@ -Add an option to disable per-room profiles. diff --git a/changelog.d/5197.misc b/changelog.d/5197.misc deleted file mode 100644 index fca1d86b2e..0000000000 --- a/changelog.d/5197.misc +++ /dev/null @@ -1 +0,0 @@ -Stop telling people to install the optional dependencies by default. diff --git a/changelog.d/5198.bugfix b/changelog.d/5198.bugfix deleted file mode 100644 index c6b156f17d..0000000000 --- a/changelog.d/5198.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent registration for user ids that are to long to fit into a state key. Contributed by Reid Anderson. \ No newline at end of file diff --git a/changelog.d/5204.feature b/changelog.d/5204.feature deleted file mode 100644 index 2a7212ca18..0000000000 --- a/changelog.d/5204.feature +++ /dev/null @@ -1 +0,0 @@ -Stick an expiration date to any registered user missing one at startup if account validity is enabled. diff --git a/changelog.d/5209.feature b/changelog.d/5209.feature deleted file mode 100644 index 747098c166..0000000000 --- a/changelog.d/5209.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for relations (aka reactions and edits). diff --git a/changelog.d/5210.feature b/changelog.d/5210.feature deleted file mode 100644 index c78325a6ac..0000000000 --- a/changelog.d/5210.feature +++ /dev/null @@ -1 +0,0 @@ -Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). diff --git a/changelog.d/5211.feature b/changelog.d/5211.feature deleted file mode 100644 index 747098c166..0000000000 --- a/changelog.d/5211.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for relations (aka reactions and edits). diff --git a/changelog.d/5217.feature b/changelog.d/5217.feature deleted file mode 100644 index c78325a6ac..0000000000 --- a/changelog.d/5217.feature +++ /dev/null @@ -1 +0,0 @@ -Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). diff --git a/changelog.d/5218.bugfix b/changelog.d/5218.bugfix deleted file mode 100644 index cd624ecfd0..0000000000 --- a/changelog.d/5218.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix incompatibility between ACME support and Python 3.5.2. diff --git a/changelog.d/5219.bugfix b/changelog.d/5219.bugfix deleted file mode 100644 index c1e17adc5d..0000000000 --- a/changelog.d/5219.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error handling for rooms whose versions are unknown. diff --git a/synapse/__init__.py b/synapse/__init__.py index bf9e810da6..42af03b53d 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.4" +__version__ = "0.99.5rc1" From 8aed6d87ffe9a16b2c2809177fa819ddba966bd9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 May 2019 16:57:56 +0100 Subject: [PATCH 188/429] Fix spelling in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index d23166caa8..7eab35be30 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -21,7 +21,7 @@ Bugfixes - Re-order stages in registration flows such that msisdn and email verification are done last. ([\#5174](https://github.com/matrix-org/synapse/issues/5174)) - Fix 3pid guest invites. ([\#5177](https://github.com/matrix-org/synapse/issues/5177)) - Fix a bug where the register endpoint would fail with M_THREEPID_IN_USE instead of returning an account previously registered in the same session. ([\#5187](https://github.com/matrix-org/synapse/issues/5187)) -- Prevent registration for user ids that are to long to fit into a state key. Contributed by Reid Anderson. ([\#5198](https://github.com/matrix-org/synapse/issues/5198)) +- Prevent registration for user ids that are too long to fit into a state key. Contributed by Reid Anderson. ([\#5198](https://github.com/matrix-org/synapse/issues/5198)) - Fix incompatibility between ACME support and Python 3.5.2. ([\#5218](https://github.com/matrix-org/synapse/issues/5218)) - Fix error handling for rooms whose versions are unknown. ([\#5219](https://github.com/matrix-org/synapse/issues/5219)) From 9259cd4bee2b40f6de9f99361d33473e78f43bef Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 May 2019 17:06:21 +0100 Subject: [PATCH 189/429] Newsfile --- changelog.d/5203.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5203.feature diff --git a/changelog.d/5203.feature b/changelog.d/5203.feature new file mode 100644 index 0000000000..747098c166 --- /dev/null +++ b/changelog.d/5203.feature @@ -0,0 +1 @@ +Add experimental support for relations (aka reactions and edits). From 4a30e4acb4ef14431914bd42ad09a51bd81d6c3e Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 21 May 2019 11:36:50 -0500 Subject: [PATCH 190/429] Room Statistics (#4338) --- changelog.d/4338.feature | 1 + docs/sample_config.yaml | 16 + synapse/api/constants.py | 1 + synapse/config/homeserver.py | 42 +- synapse/config/stats.py | 60 +++ synapse/handlers/stats.py | 325 ++++++++++++++++ synapse/server.py | 6 + synapse/storage/__init__.py | 2 + synapse/storage/events_worker.py | 24 ++ synapse/storage/roommember.py | 32 ++ synapse/storage/schema/delta/54/stats.sql | 80 ++++ synapse/storage/state_deltas.py | 12 +- synapse/storage/stats.py | 450 ++++++++++++++++++++++ tests/handlers/test_stats.py | 251 ++++++++++++ tests/rest/client/v1/utils.py | 17 + 15 files changed, 1306 insertions(+), 13 deletions(-) create mode 100644 changelog.d/4338.feature create mode 100644 synapse/config/stats.py create mode 100644 synapse/handlers/stats.py create mode 100644 synapse/storage/schema/delta/54/stats.sql create mode 100644 synapse/storage/stats.py create mode 100644 tests/handlers/test_stats.py diff --git a/changelog.d/4338.feature b/changelog.d/4338.feature new file mode 100644 index 0000000000..01285e965c --- /dev/null +++ b/changelog.d/4338.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f658ec8ecd..559fbcdd01 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1153,6 +1153,22 @@ password_config: # + +# Local statistics collection. Used in populating the room directory. +# +# 'bucket_size' controls how large each statistics timeslice is. It can +# be defined in a human readable short form -- e.g. "1d", "1y". +# +# 'retention' controls how long historical statistics will be kept for. +# It can be defined in a human readable short form -- e.g. "1d", "1y". +# +# +#stats: +# enabled: true +# bucket_size: 1d +# retention: 1y + + # Server Notices room configuration # # Uncomment this section to enable a room which can be used to send notices diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 6b347b1749..ee129c8689 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -79,6 +79,7 @@ class EventTypes(object): RoomHistoryVisibility = "m.room.history_visibility" CanonicalAlias = "m.room.canonical_alias" + Encryption = "m.room.encryption" RoomAvatar = "m.room.avatar" RoomEncryption = "m.room.encryption" GuestAccess = "m.room.guest_access" diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 727fdc54d8..5c4fc8ff21 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + from .api import ApiConfig from .appservice import AppServiceConfig from .captcha import CaptchaConfig @@ -36,20 +37,41 @@ from .saml2_config import SAML2Config from .server import ServerConfig from .server_notices_config import ServerNoticesConfig from .spam_checker import SpamCheckerConfig +from .stats import StatsConfig from .tls import TlsConfig from .user_directory import UserDirectoryConfig from .voip import VoipConfig from .workers import WorkerConfig -class HomeServerConfig(ServerConfig, TlsConfig, DatabaseConfig, LoggingConfig, - RatelimitConfig, ContentRepositoryConfig, CaptchaConfig, - VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig, - AppServiceConfig, KeyConfig, SAML2Config, CasConfig, - JWTConfig, PasswordConfig, EmailConfig, - WorkerConfig, PasswordAuthProviderConfig, PushConfig, - SpamCheckerConfig, GroupsConfig, UserDirectoryConfig, - ConsentConfig, - ServerNoticesConfig, RoomDirectoryConfig, - ): +class HomeServerConfig( + ServerConfig, + TlsConfig, + DatabaseConfig, + LoggingConfig, + RatelimitConfig, + ContentRepositoryConfig, + CaptchaConfig, + VoipConfig, + RegistrationConfig, + MetricsConfig, + ApiConfig, + AppServiceConfig, + KeyConfig, + SAML2Config, + CasConfig, + JWTConfig, + PasswordConfig, + EmailConfig, + WorkerConfig, + PasswordAuthProviderConfig, + PushConfig, + SpamCheckerConfig, + GroupsConfig, + UserDirectoryConfig, + ConsentConfig, + StatsConfig, + ServerNoticesConfig, + RoomDirectoryConfig, +): pass diff --git a/synapse/config/stats.py b/synapse/config/stats.py new file mode 100644 index 0000000000..80fc1b9dd0 --- /dev/null +++ b/synapse/config/stats.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import division + +import sys + +from ._base import Config + + +class StatsConfig(Config): + """Stats Configuration + Configuration for the behaviour of synapse's stats engine + """ + + def read_config(self, config): + self.stats_enabled = True + self.stats_bucket_size = 86400 + self.stats_retention = sys.maxsize + stats_config = config.get("stats", None) + if stats_config: + self.stats_enabled = stats_config.get("enabled", self.stats_enabled) + self.stats_bucket_size = ( + self.parse_duration(stats_config.get("bucket_size", "1d")) / 1000 + ) + self.stats_retention = ( + self.parse_duration( + stats_config.get("retention", "%ds" % (sys.maxsize,)) + ) + / 1000 + ) + + def default_config(self, config_dir_path, server_name, **kwargs): + return """ + # Local statistics collection. Used in populating the room directory. + # + # 'bucket_size' controls how large each statistics timeslice is. It can + # be defined in a human readable short form -- e.g. "1d", "1y". + # + # 'retention' controls how long historical statistics will be kept for. + # It can be defined in a human readable short form -- e.g. "1d", "1y". + # + # + #stats: + # enabled: true + # bucket_size: 1d + # retention: 1y + """ diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py new file mode 100644 index 0000000000..0e92b405ba --- /dev/null +++ b/synapse/handlers/stats.py @@ -0,0 +1,325 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.handlers.state_deltas import StateDeltasHandler +from synapse.metrics import event_processing_positions +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import UserID +from synapse.util.metrics import Measure + +logger = logging.getLogger(__name__) + + +class StatsHandler(StateDeltasHandler): + """Handles keeping the *_stats tables updated with a simple time-series of + information about the users, rooms and media on the server, such that admins + have some idea of who is consuming their resources. + + Heavily derived from UserDirectoryHandler + """ + + def __init__(self, hs): + super(StatsHandler, self).__init__(hs) + self.hs = hs + self.store = hs.get_datastore() + self.state = hs.get_state_handler() + self.server_name = hs.hostname + self.clock = hs.get_clock() + self.notifier = hs.get_notifier() + self.is_mine_id = hs.is_mine_id + self.stats_bucket_size = hs.config.stats_bucket_size + + # The current position in the current_state_delta stream + self.pos = None + + # Guard to ensure we only process deltas one at a time + self._is_processing = False + + if hs.config.stats_enabled: + self.notifier.add_replication_callback(self.notify_new_event) + + # We kick this off so that we don't have to wait for a change before + # we start populating stats + self.clock.call_later(0, self.notify_new_event) + + def notify_new_event(self): + """Called when there may be more deltas to process + """ + if not self.hs.config.stats_enabled: + return + + if self._is_processing: + return + + @defer.inlineCallbacks + def process(): + try: + yield self._unsafe_process() + finally: + self._is_processing = False + + self._is_processing = True + run_as_background_process("stats.notify_new_event", process) + + @defer.inlineCallbacks + def _unsafe_process(self): + # If self.pos is None then means we haven't fetched it from DB + if self.pos is None: + self.pos = yield self.store.get_stats_stream_pos() + + # If still None then the initial background update hasn't happened yet + if self.pos is None: + defer.returnValue(None) + + # Loop round handling deltas until we're up to date + while True: + with Measure(self.clock, "stats_delta"): + deltas = yield self.store.get_current_state_deltas(self.pos) + if not deltas: + return + + logger.info("Handling %d state deltas", len(deltas)) + yield self._handle_deltas(deltas) + + self.pos = deltas[-1]["stream_id"] + yield self.store.update_stats_stream_pos(self.pos) + + event_processing_positions.labels("stats").set(self.pos) + + @defer.inlineCallbacks + def _handle_deltas(self, deltas): + """ + Called with the state deltas to process + """ + for delta in deltas: + typ = delta["type"] + state_key = delta["state_key"] + room_id = delta["room_id"] + event_id = delta["event_id"] + stream_id = delta["stream_id"] + prev_event_id = delta["prev_event_id"] + + logger.debug("Handling: %r %r, %s", typ, state_key, event_id) + + token = yield self.store.get_earliest_token_for_room_stats(room_id) + + # If the earliest token to begin from is larger than our current + # stream ID, skip processing this delta. + if token is not None and token >= stream_id: + logger.debug( + "Ignoring: %s as earlier than this room's initial ingestion event", + event_id, + ) + continue + + if event_id is None and prev_event_id is None: + # Errr... + continue + + event_content = {} + + if event_id is not None: + event_content = (yield self.store.get_event(event_id)).content or {} + + # quantise time to the nearest bucket + now = yield self.store.get_received_ts(event_id) + now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size + + if typ == EventTypes.Member: + # we could use _get_key_change here but it's a bit inefficient + # given we're not testing for a specific result; might as well + # just grab the prev_membership and membership strings and + # compare them. + prev_event_content = {} + if prev_event_id is not None: + prev_event_content = ( + yield self.store.get_event(prev_event_id) + ).content + + membership = event_content.get("membership", Membership.LEAVE) + prev_membership = prev_event_content.get("membership", Membership.LEAVE) + + if prev_membership == membership: + continue + + if prev_membership == Membership.JOIN: + yield self.store.update_stats_delta( + now, "room", room_id, "joined_members", -1 + ) + elif prev_membership == Membership.INVITE: + yield self.store.update_stats_delta( + now, "room", room_id, "invited_members", -1 + ) + elif prev_membership == Membership.LEAVE: + yield self.store.update_stats_delta( + now, "room", room_id, "left_members", -1 + ) + elif prev_membership == Membership.BAN: + yield self.store.update_stats_delta( + now, "room", room_id, "banned_members", -1 + ) + else: + err = "%s is not a valid prev_membership" % (repr(prev_membership),) + logger.error(err) + raise ValueError(err) + + if membership == Membership.JOIN: + yield self.store.update_stats_delta( + now, "room", room_id, "joined_members", +1 + ) + elif membership == Membership.INVITE: + yield self.store.update_stats_delta( + now, "room", room_id, "invited_members", +1 + ) + elif membership == Membership.LEAVE: + yield self.store.update_stats_delta( + now, "room", room_id, "left_members", +1 + ) + elif membership == Membership.BAN: + yield self.store.update_stats_delta( + now, "room", room_id, "banned_members", +1 + ) + else: + err = "%s is not a valid membership" % (repr(membership),) + logger.error(err) + raise ValueError(err) + + user_id = state_key + if self.is_mine_id(user_id): + # update user_stats as it's one of our users + public = yield self._is_public_room(room_id) + + if membership == Membership.LEAVE: + yield self.store.update_stats_delta( + now, + "user", + user_id, + "public_rooms" if public else "private_rooms", + -1, + ) + elif membership == Membership.JOIN: + yield self.store.update_stats_delta( + now, + "user", + user_id, + "public_rooms" if public else "private_rooms", + +1, + ) + + elif typ == EventTypes.Create: + # Newly created room. Add it with all blank portions. + yield self.store.update_room_state( + room_id, + { + "join_rules": None, + "history_visibility": None, + "encryption": None, + "name": None, + "topic": None, + "avatar": None, + "canonical_alias": None, + }, + ) + + elif typ == EventTypes.JoinRules: + yield self.store.update_room_state( + room_id, {"join_rules": event_content.get("join_rule")} + ) + + is_public = yield self._get_key_change( + prev_event_id, event_id, "join_rule", JoinRules.PUBLIC + ) + if is_public is not None: + yield self.update_public_room_stats(now, room_id, is_public) + + elif typ == EventTypes.RoomHistoryVisibility: + yield self.store.update_room_state( + room_id, + {"history_visibility": event_content.get("history_visibility")}, + ) + + is_public = yield self._get_key_change( + prev_event_id, event_id, "history_visibility", "world_readable" + ) + if is_public is not None: + yield self.update_public_room_stats(now, room_id, is_public) + + elif typ == EventTypes.Encryption: + yield self.store.update_room_state( + room_id, {"encryption": event_content.get("algorithm")} + ) + elif typ == EventTypes.Name: + yield self.store.update_room_state( + room_id, {"name": event_content.get("name")} + ) + elif typ == EventTypes.Topic: + yield self.store.update_room_state( + room_id, {"topic": event_content.get("topic")} + ) + elif typ == EventTypes.RoomAvatar: + yield self.store.update_room_state( + room_id, {"avatar": event_content.get("url")} + ) + elif typ == EventTypes.CanonicalAlias: + yield self.store.update_room_state( + room_id, {"canonical_alias": event_content.get("alias")} + ) + + @defer.inlineCallbacks + def update_public_room_stats(self, ts, room_id, is_public): + """ + Increment/decrement a user's number of public rooms when a room they are + in changes to/from public visibility. + + Args: + ts (int): Timestamp in seconds + room_id (str) + is_public (bool) + """ + # For now, blindly iterate over all local users in the room so that + # we can handle the whole problem of copying buckets over as needed + user_ids = yield self.store.get_users_in_room(room_id) + + for user_id in user_ids: + if self.hs.is_mine(UserID.from_string(user_id)): + yield self.store.update_stats_delta( + ts, "user", user_id, "public_rooms", +1 if is_public else -1 + ) + yield self.store.update_stats_delta( + ts, "user", user_id, "private_rooms", -1 if is_public else +1 + ) + + @defer.inlineCallbacks + def _is_public_room(self, room_id): + join_rules = yield self.state.get_current_state(room_id, EventTypes.JoinRules) + history_visibility = yield self.state.get_current_state( + room_id, EventTypes.RoomHistoryVisibility + ) + + if (join_rules and join_rules.content.get("join_rule") == JoinRules.PUBLIC) or ( + ( + history_visibility + and history_visibility.content.get("history_visibility") + == "world_readable" + ) + ): + defer.returnValue(True) + else: + defer.returnValue(False) diff --git a/synapse/server.py b/synapse/server.py index 80d40b9272..9229a68a8d 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -72,6 +72,7 @@ from synapse.handlers.room_list import RoomListHandler from synapse.handlers.room_member import RoomMemberMasterHandler from synapse.handlers.room_member_worker import RoomMemberWorkerHandler from synapse.handlers.set_password import SetPasswordHandler +from synapse.handlers.stats import StatsHandler from synapse.handlers.sync import SyncHandler from synapse.handlers.typing import TypingHandler from synapse.handlers.user_directory import UserDirectoryHandler @@ -139,6 +140,7 @@ class HomeServer(object): 'acme_handler', 'auth_handler', 'device_handler', + 'stats_handler', 'e2e_keys_handler', 'e2e_room_keys_handler', 'event_handler', @@ -191,6 +193,7 @@ class HomeServer(object): REQUIRED_ON_MASTER_STARTUP = [ "user_directory_handler", + "stats_handler" ] # This is overridden in derived application classes @@ -474,6 +477,9 @@ class HomeServer(object): def build_secrets(self): return Secrets() + def build_stats_handler(self): + return StatsHandler(self) + def build_spam_checker(self): return SpamChecker(self) diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7522d3fd57..66675d08ae 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -55,6 +55,7 @@ from .roommember import RoomMemberStore from .search import SearchStore from .signatures import SignatureStore from .state import StateStore +from .stats import StatsStore from .stream import StreamStore from .tags import TagsStore from .transactions import TransactionStore @@ -100,6 +101,7 @@ class DataStore( GroupServerStore, UserErasureStore, MonthlyActiveUsersStore, + StatsStore, RelationsStore, ): def __init__(self, db_conn, hs): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index adc6cf26b5..83ffae2132 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -611,3 +611,27 @@ class EventsWorkerStore(SQLBaseStore): return res return self.runInteraction("get_rejection_reasons", f) + + def _get_total_state_event_counts_txn(self, txn, room_id): + """ + See get_state_event_counts. + """ + sql = "SELECT COUNT(*) FROM state_events WHERE room_id=?" + txn.execute(sql, (room_id,)) + row = txn.fetchone() + return row[0] if row else 0 + + def get_total_state_event_counts(self, room_id): + """ + Gets the total number of state events in a room. + + Args: + room_id (str) + + Returns: + Deferred[int] + """ + return self.runInteraction( + "get_total_state_event_counts", + self._get_total_state_event_counts_txn, room_id + ) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 57df17bcc2..4bd1669458 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -142,6 +142,38 @@ class RoomMemberWorkerStore(EventsWorkerStore): return self.runInteraction("get_room_summary", _get_room_summary_txn) + def _get_user_count_in_room_txn(self, txn, room_id, membership): + """ + See get_user_count_in_room. + """ + sql = ( + "SELECT count(*) FROM room_memberships as m" + " INNER JOIN current_state_events as c" + " ON m.event_id = c.event_id " + " AND m.room_id = c.room_id " + " AND m.user_id = c.state_key" + " WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?" + ) + + txn.execute(sql, (room_id, membership)) + row = txn.fetchone() + return row[0] + + def get_user_count_in_room(self, room_id, membership): + """ + Get the user count in a room with a particular membership. + + Args: + room_id (str) + membership (Membership) + + Returns: + Deferred[int] + """ + return self.runInteraction( + "get_users_in_room", self._get_user_count_in_room_txn, room_id, membership + ) + @cached() def get_invited_rooms_for_user(self, user_id): """ Get all the rooms the user is invited to diff --git a/synapse/storage/schema/delta/54/stats.sql b/synapse/storage/schema/delta/54/stats.sql new file mode 100644 index 0000000000..652e58308e --- /dev/null +++ b/synapse/storage/schema/delta/54/stats.sql @@ -0,0 +1,80 @@ +/* Copyright 2018 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE stats_stream_pos ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT, + CHECK (Lock='X') +); + +INSERT INTO stats_stream_pos (stream_id) VALUES (null); + +CREATE TABLE user_stats ( + user_id TEXT NOT NULL, + ts BIGINT NOT NULL, + bucket_size INT NOT NULL, + public_rooms INT NOT NULL, + private_rooms INT NOT NULL +); + +CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts); + +CREATE TABLE room_stats ( + room_id TEXT NOT NULL, + ts BIGINT NOT NULL, + bucket_size INT NOT NULL, + current_state_events INT NOT NULL, + joined_members INT NOT NULL, + invited_members INT NOT NULL, + left_members INT NOT NULL, + banned_members INT NOT NULL, + state_events INT NOT NULL +); + +CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts); + +-- cache of current room state; useful for the publicRooms list +CREATE TABLE room_state ( + room_id TEXT NOT NULL, + join_rules TEXT, + history_visibility TEXT, + encryption TEXT, + name TEXT, + topic TEXT, + avatar TEXT, + canonical_alias TEXT + -- get aliases straight from the right table +); + +CREATE UNIQUE INDEX room_state_room ON room_state(room_id); + +CREATE TABLE room_stats_earliest_token ( + room_id TEXT NOT NULL, + token BIGINT NOT NULL +); + +CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id); + +-- Set up staging tables +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_stats_createtables', '{}'); + +-- Run through each room and update stats +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_process_rooms', '{}', 'populate_stats_createtables'); + +-- Clean up staging tables +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_cleanup', '{}', 'populate_stats_process_rooms'); diff --git a/synapse/storage/state_deltas.py b/synapse/storage/state_deltas.py index 31a0279b18..5fdb442104 100644 --- a/synapse/storage/state_deltas.py +++ b/synapse/storage/state_deltas.py @@ -84,10 +84,16 @@ class StateDeltasStore(SQLBaseStore): "get_current_state_deltas", get_current_state_deltas_txn ) - def get_max_stream_id_in_current_state_deltas(self): - return self._simple_select_one_onecol( + def _get_max_stream_id_in_current_state_deltas_txn(self, txn): + return self._simple_select_one_onecol_txn( + txn, table="current_state_delta_stream", keyvalues={}, retcol="COALESCE(MAX(stream_id), -1)", - desc="get_max_stream_id_in_current_state_deltas", + ) + + def get_max_stream_id_in_current_state_deltas(self): + return self.runInteraction( + "get_max_stream_id_in_current_state_deltas", + self._get_max_stream_id_in_current_state_deltas_txn, ) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py new file mode 100644 index 0000000000..71b80a891d --- /dev/null +++ b/synapse/storage/stats.py @@ -0,0 +1,450 @@ +# -*- coding: utf-8 -*- +# Copyright 2018, 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from twisted.internet import defer + +from synapse.api.constants import EventTypes, Membership +from synapse.storage.state_deltas import StateDeltasStore +from synapse.util.caches.descriptors import cached + +logger = logging.getLogger(__name__) + +# these fields track absolutes (e.g. total number of rooms on the server) +ABSOLUTE_STATS_FIELDS = { + "room": ( + "current_state_events", + "joined_members", + "invited_members", + "left_members", + "banned_members", + "state_events", + ), + "user": ("public_rooms", "private_rooms"), +} + +TYPE_TO_ROOM = {"room": ("room_stats", "room_id"), "user": ("user_stats", "user_id")} + +TEMP_TABLE = "_temp_populate_stats" + + +class StatsStore(StateDeltasStore): + def __init__(self, db_conn, hs): + super(StatsStore, self).__init__(db_conn, hs) + + self.server_name = hs.hostname + self.clock = self.hs.get_clock() + self.stats_enabled = hs.config.stats_enabled + self.stats_bucket_size = hs.config.stats_bucket_size + + self.register_background_update_handler( + "populate_stats_createtables", self._populate_stats_createtables + ) + self.register_background_update_handler( + "populate_stats_process_rooms", self._populate_stats_process_rooms + ) + self.register_background_update_handler( + "populate_stats_cleanup", self._populate_stats_cleanup + ) + + @defer.inlineCallbacks + def _populate_stats_createtables(self, progress, batch_size): + + if not self.stats_enabled: + yield self._end_background_update("populate_stats_createtables") + defer.returnValue(1) + + # Get all the rooms that we want to process. + def _make_staging_area(txn): + sql = ( + "CREATE TABLE IF NOT EXISTS " + + TEMP_TABLE + + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)" + ) + txn.execute(sql) + + sql = ( + "CREATE TABLE IF NOT EXISTS " + + TEMP_TABLE + + "_position(position TEXT NOT NULL)" + ) + txn.execute(sql) + + # Get rooms we want to process from the database + sql = """ + SELECT room_id, count(*) FROM current_state_events + GROUP BY room_id + """ + txn.execute(sql) + rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()] + self._simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms) + del rooms + + new_pos = yield self.get_max_stream_id_in_current_state_deltas() + yield self.runInteraction("populate_stats_temp_build", _make_staging_area) + yield self._simple_insert(TEMP_TABLE + "_position", {"position": new_pos}) + self.get_earliest_token_for_room_stats.invalidate_all() + + yield self._end_background_update("populate_stats_createtables") + defer.returnValue(1) + + @defer.inlineCallbacks + def _populate_stats_cleanup(self, progress, batch_size): + """ + Update the user directory stream position, then clean up the old tables. + """ + if not self.stats_enabled: + yield self._end_background_update("populate_stats_cleanup") + defer.returnValue(1) + + position = yield self._simple_select_one_onecol( + TEMP_TABLE + "_position", None, "position" + ) + yield self.update_stats_stream_pos(position) + + def _delete_staging_area(txn): + txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_rooms") + txn.execute("DROP TABLE IF EXISTS " + TEMP_TABLE + "_position") + + yield self.runInteraction("populate_stats_cleanup", _delete_staging_area) + + yield self._end_background_update("populate_stats_cleanup") + defer.returnValue(1) + + @defer.inlineCallbacks + def _populate_stats_process_rooms(self, progress, batch_size): + + if not self.stats_enabled: + yield self._end_background_update("populate_stats_process_rooms") + defer.returnValue(1) + + # If we don't have progress filed, delete everything. + if not progress: + yield self.delete_all_stats() + + def _get_next_batch(txn): + # Only fetch 250 rooms, so we don't fetch too many at once, even + # if those 250 rooms have less than batch_size state events. + sql = """ + SELECT room_id, events FROM %s_rooms + ORDER BY events DESC + LIMIT 250 + """ % ( + TEMP_TABLE, + ) + txn.execute(sql) + rooms_to_work_on = txn.fetchall() + + if not rooms_to_work_on: + return None + + # Get how many are left to process, so we can give status on how + # far we are in processing + txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") + progress["remaining"] = txn.fetchone()[0] + + return rooms_to_work_on + + rooms_to_work_on = yield self.runInteraction( + "populate_stats_temp_read", _get_next_batch + ) + + # No more rooms -- complete the transaction. + if not rooms_to_work_on: + yield self._end_background_update("populate_stats_process_rooms") + defer.returnValue(1) + + logger.info( + "Processing the next %d rooms of %d remaining", + (len(rooms_to_work_on), progress["remaining"]), + ) + + # Number of state events we've processed by going through each room + processed_event_count = 0 + + for room_id, event_count in rooms_to_work_on: + + current_state_ids = yield self.get_current_state_ids(room_id) + + join_rules = yield self.get_event( + current_state_ids.get((EventTypes.JoinRules, "")), allow_none=True + ) + history_visibility = yield self.get_event( + current_state_ids.get((EventTypes.RoomHistoryVisibility, "")), + allow_none=True, + ) + encryption = yield self.get_event( + current_state_ids.get((EventTypes.RoomEncryption, "")), allow_none=True + ) + name = yield self.get_event( + current_state_ids.get((EventTypes.Name, "")), allow_none=True + ) + topic = yield self.get_event( + current_state_ids.get((EventTypes.Topic, "")), allow_none=True + ) + avatar = yield self.get_event( + current_state_ids.get((EventTypes.RoomAvatar, "")), allow_none=True + ) + canonical_alias = yield self.get_event( + current_state_ids.get((EventTypes.CanonicalAlias, "")), allow_none=True + ) + + def _or_none(x, arg): + if x: + return x.content.get(arg) + return None + + yield self.update_room_state( + room_id, + { + "join_rules": _or_none(join_rules, "join_rule"), + "history_visibility": _or_none( + history_visibility, "history_visibility" + ), + "encryption": _or_none(encryption, "algorithm"), + "name": _or_none(name, "name"), + "topic": _or_none(topic, "topic"), + "avatar": _or_none(avatar, "url"), + "canonical_alias": _or_none(canonical_alias, "alias"), + }, + ) + + now = self.hs.get_reactor().seconds() + + # quantise time to the nearest bucket + now = (now // self.stats_bucket_size) * self.stats_bucket_size + + def _fetch_data(txn): + + # Get the current token of the room + current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) + + current_state_events = len(current_state_ids) + joined_members = self._get_user_count_in_room_txn( + txn, room_id, Membership.JOIN + ) + invited_members = self._get_user_count_in_room_txn( + txn, room_id, Membership.INVITE + ) + left_members = self._get_user_count_in_room_txn( + txn, room_id, Membership.LEAVE + ) + banned_members = self._get_user_count_in_room_txn( + txn, room_id, Membership.BAN + ) + total_state_events = self._get_total_state_event_counts_txn( + txn, room_id + ) + + self._update_stats_txn( + txn, + "room", + room_id, + now, + { + "bucket_size": self.stats_bucket_size, + "current_state_events": current_state_events, + "joined_members": joined_members, + "invited_members": invited_members, + "left_members": left_members, + "banned_members": banned_members, + "state_events": total_state_events, + }, + ) + self._simple_insert_txn( + txn, + "room_stats_earliest_token", + {"room_id": room_id, "token": current_token}, + ) + + yield self.runInteraction("update_room_stats", _fetch_data) + + # We've finished a room. Delete it from the table. + yield self._simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id}) + # Update the remaining counter. + progress["remaining"] -= 1 + yield self.runInteraction( + "populate_stats", + self._background_update_progress_txn, + "populate_stats_process_rooms", + progress, + ) + + processed_event_count += event_count + + if processed_event_count > batch_size: + # Don't process any more rooms, we've hit our batch size. + defer.returnValue(processed_event_count) + + defer.returnValue(processed_event_count) + + def delete_all_stats(self): + """ + Delete all statistics records. + """ + + def _delete_all_stats_txn(txn): + txn.execute("DELETE FROM room_state") + txn.execute("DELETE FROM room_stats") + txn.execute("DELETE FROM room_stats_earliest_token") + txn.execute("DELETE FROM user_stats") + + return self.runInteraction("delete_all_stats", _delete_all_stats_txn) + + def get_stats_stream_pos(self): + return self._simple_select_one_onecol( + table="stats_stream_pos", + keyvalues={}, + retcol="stream_id", + desc="stats_stream_pos", + ) + + def update_stats_stream_pos(self, stream_id): + return self._simple_update_one( + table="stats_stream_pos", + keyvalues={}, + updatevalues={"stream_id": stream_id}, + desc="update_stats_stream_pos", + ) + + def update_room_state(self, room_id, fields): + """ + Args: + room_id (str) + fields (dict[str:Any]) + """ + return self._simple_upsert( + table="room_state", + keyvalues={"room_id": room_id}, + values=fields, + desc="update_room_state", + ) + + def get_deltas_for_room(self, room_id, start, size=100): + """ + Get statistics deltas for a given room. + + Args: + room_id (str) + start (int): Pagination start. Number of entries, not timestamp. + size (int): How many entries to return. + + Returns: + Deferred[list[dict]], where the dict has the keys of + ABSOLUTE_STATS_FIELDS["room"] and "ts". + """ + return self._simple_select_list_paginate( + "room_stats", + {"room_id": room_id}, + "ts", + start, + size, + retcols=(list(ABSOLUTE_STATS_FIELDS["room"]) + ["ts"]), + order_direction="DESC", + ) + + def get_all_room_state(self): + return self._simple_select_list( + "room_state", None, retcols=("name", "topic", "canonical_alias") + ) + + @cached() + def get_earliest_token_for_room_stats(self, room_id): + """ + Fetch the "earliest token". This is used by the room stats delta + processor to ignore deltas that have been processed between the + start of the background task and any particular room's stats + being calculated. + + Returns: + Deferred[int] + """ + return self._simple_select_one_onecol( + "room_stats_earliest_token", + {"room_id": room_id}, + retcol="token", + allow_none=True, + ) + + def update_stats(self, stats_type, stats_id, ts, fields): + table, id_col = TYPE_TO_ROOM[stats_type] + return self._simple_upsert( + table=table, + keyvalues={id_col: stats_id, "ts": ts}, + values=fields, + desc="update_stats", + ) + + def _update_stats_txn(self, txn, stats_type, stats_id, ts, fields): + table, id_col = TYPE_TO_ROOM[stats_type] + return self._simple_upsert_txn( + txn, table=table, keyvalues={id_col: stats_id, "ts": ts}, values=fields + ) + + def update_stats_delta(self, ts, stats_type, stats_id, field, value): + def _update_stats_delta(txn): + table, id_col = TYPE_TO_ROOM[stats_type] + + sql = ( + "SELECT * FROM %s" + " WHERE %s=? and ts=(" + " SELECT MAX(ts) FROM %s" + " WHERE %s=?" + ")" + ) % (table, id_col, table, id_col) + txn.execute(sql, (stats_id, stats_id)) + rows = self.cursor_to_dict(txn) + if len(rows) == 0: + # silently skip as we don't have anything to apply a delta to yet. + # this tries to minimise any race between the initial sync and + # subsequent deltas arriving. + return + + current_ts = ts + latest_ts = rows[0]["ts"] + if current_ts < latest_ts: + # This one is in the past, but we're just encountering it now. + # Mark it as part of the current bucket. + current_ts = latest_ts + elif ts != latest_ts: + # we have to copy our absolute counters over to the new entry. + values = { + key: rows[0][key] for key in ABSOLUTE_STATS_FIELDS[stats_type] + } + values[id_col] = stats_id + values["ts"] = ts + values["bucket_size"] = self.stats_bucket_size + + self._simple_insert_txn(txn, table=table, values=values) + + # actually update the new value + if stats_type in ABSOLUTE_STATS_FIELDS[stats_type]: + self._simple_update_txn( + txn, + table=table, + keyvalues={id_col: stats_id, "ts": current_ts}, + updatevalues={field: value}, + ) + else: + sql = ("UPDATE %s SET %s=%s+? WHERE %s=? AND ts=?") % ( + table, + field, + field, + id_col, + ) + txn.execute(sql, (value, stats_id, current_ts)) + + return self.runInteraction("update_stats_delta", _update_stats_delta) diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py new file mode 100644 index 0000000000..249aba3d59 --- /dev/null +++ b/tests/handlers/test_stats.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from mock import Mock + +from twisted.internet import defer + +from synapse.api.constants import EventTypes, Membership +from synapse.rest import admin +from synapse.rest.client.v1 import login, room + +from tests import unittest + + +class StatsRoomTests(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, hs): + + self.store = hs.get_datastore() + self.handler = self.hs.get_stats_handler() + + def _add_background_updates(self): + """ + Add the background updates we need to run. + """ + # Ugh, have to reset this flag + self.store._all_done = False + + self.get_success( + self.store._simple_insert( + "background_updates", + {"update_name": "populate_stats_createtables", "progress_json": "{}"}, + ) + ) + self.get_success( + self.store._simple_insert( + "background_updates", + { + "update_name": "populate_stats_process_rooms", + "progress_json": "{}", + "depends_on": "populate_stats_createtables", + }, + ) + ) + self.get_success( + self.store._simple_insert( + "background_updates", + { + "update_name": "populate_stats_cleanup", + "progress_json": "{}", + "depends_on": "populate_stats_process_rooms", + }, + ) + ) + + def test_initial_room(self): + """ + The background updates will build the table from scratch. + """ + r = self.get_success(self.store.get_all_room_state()) + self.assertEqual(len(r), 0) + + # Disable stats + self.hs.config.stats_enabled = False + self.handler.stats_enabled = False + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + self.helper.send_state( + room_1, event_type="m.room.topic", body={"topic": "foo"}, tok=u1_token + ) + + # Stats disabled, shouldn't have done anything + r = self.get_success(self.store.get_all_room_state()) + self.assertEqual(len(r), 0) + + # Enable stats + self.hs.config.stats_enabled = True + self.handler.stats_enabled = True + + # Do the initial population of the user directory via the background update + self._add_background_updates() + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + r = self.get_success(self.store.get_all_room_state()) + + self.assertEqual(len(r), 1) + self.assertEqual(r[0]["topic"], "foo") + + def test_initial_earliest_token(self): + """ + Ingestion via notify_new_event will ignore tokens that the background + update have already processed. + """ + self.reactor.advance(86401) + + self.hs.config.stats_enabled = False + self.handler.stats_enabled = False + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + u2 = self.register_user("u2", "pass") + u2_token = self.login("u2", "pass") + + u3 = self.register_user("u3", "pass") + u3_token = self.login("u3", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + self.helper.send_state( + room_1, event_type="m.room.topic", body={"topic": "foo"}, tok=u1_token + ) + + # Begin the ingestion by creating the temp tables. This will also store + # the position that the deltas should begin at, once they take over. + self.hs.config.stats_enabled = True + self.handler.stats_enabled = True + self.store._all_done = False + self.get_success(self.store.update_stats_stream_pos(None)) + + self.get_success( + self.store._simple_insert( + "background_updates", + {"update_name": "populate_stats_createtables", "progress_json": "{}"}, + ) + ) + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + # Now, before the table is actually ingested, add some more events. + self.helper.invite(room=room_1, src=u1, targ=u2, tok=u1_token) + self.helper.join(room=room_1, user=u2, tok=u2_token) + + # Now do the initial ingestion. + self.get_success( + self.store._simple_insert( + "background_updates", + {"update_name": "populate_stats_process_rooms", "progress_json": "{}"}, + ) + ) + self.get_success( + self.store._simple_insert( + "background_updates", + { + "update_name": "populate_stats_cleanup", + "progress_json": "{}", + "depends_on": "populate_stats_process_rooms", + }, + ) + ) + + self.store._all_done = False + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + self.reactor.advance(86401) + + # Now add some more events, triggering ingestion. Because of the stream + # position being set to before the events sent in the middle, a simpler + # implementation would reprocess those events, and say there were four + # users, not three. + self.helper.invite(room=room_1, src=u1, targ=u3, tok=u1_token) + self.helper.join(room=room_1, user=u3, tok=u3_token) + + # Get the deltas! There should be two -- day 1, and day 2. + r = self.get_success(self.store.get_deltas_for_room(room_1, 0)) + + # The oldest has 2 joined members + self.assertEqual(r[-1]["joined_members"], 2) + + # The newest has 3 + self.assertEqual(r[0]["joined_members"], 3) + + def test_incorrect_state_transition(self): + """ + If the state transition is not one of (JOIN, INVITE, LEAVE, BAN) to + (JOIN, INVITE, LEAVE, BAN), an error is raised. + """ + events = { + "a1": {"membership": Membership.LEAVE}, + "a2": {"membership": "not a real thing"}, + } + + def get_event(event_id): + m = Mock() + m.content = events[event_id] + d = defer.Deferred() + self.reactor.callLater(0.0, d.callback, m) + return d + + def get_received_ts(event_id): + return defer.succeed(1) + + self.store.get_received_ts = get_received_ts + self.store.get_event = get_event + + deltas = [ + { + "type": EventTypes.Member, + "state_key": "some_user", + "room_id": "room", + "event_id": "a1", + "prev_event_id": "a2", + "stream_id": "bleb", + } + ] + + f = self.get_failure(self.handler._handle_deltas(deltas), ValueError) + self.assertEqual( + f.value.args[0], "'not a real thing' is not a valid prev_membership" + ) + + # And the other way... + deltas = [ + { + "type": EventTypes.Member, + "state_key": "some_user", + "room_id": "room", + "event_id": "a2", + "prev_event_id": "a1", + "stream_id": "bleb", + } + ] + + f = self.get_failure(self.handler._handle_deltas(deltas), ValueError) + self.assertEqual( + f.value.args[0], "'not a real thing' is not a valid membership" + ) diff --git a/tests/rest/client/v1/utils.py b/tests/rest/client/v1/utils.py index 05b0143c42..f7133fc12e 100644 --- a/tests/rest/client/v1/utils.py +++ b/tests/rest/client/v1/utils.py @@ -127,3 +127,20 @@ class RestHelper(object): ) return channel.json_body + + def send_state(self, room_id, event_type, body, tok, expect_code=200): + path = "/_matrix/client/r0/rooms/%s/state/%s" % (room_id, event_type) + if tok: + path = path + "?access_token=%s" % tok + + request, channel = make_request( + self.hs.get_reactor(), "PUT", path, json.dumps(body).encode('utf8') + ) + render(request, self.resource, self.hs.get_reactor()) + + assert int(channel.result["code"]) == expect_code, ( + "Expected: %d, got: %d, resp: %r" + % (expect_code, int(channel.result["code"]), channel.result["body"]) + ) + + return channel.json_body From e26e6b3230f0b55376f0f3bf823dd789ac7064d0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 21 May 2019 17:37:19 +0100 Subject: [PATCH 191/429] update changelog --- CHANGES.md | 2 +- changelog.d/5203.feature | 1 - changelog.d/5212.feature | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) delete mode 100644 changelog.d/5203.feature delete mode 100644 changelog.d/5212.feature diff --git a/CHANGES.md b/CHANGES.md index 7eab35be30..25ceec8b48 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features - Drop support for the undocumented /_matrix/client/v2_alpha API prefix. ([\#5190](https://github.com/matrix-org/synapse/issues/5190)) - Add an option to disable per-room profiles. ([\#5196](https://github.com/matrix-org/synapse/issues/5196)) - Stick an expiration date to any registered user missing one at startup if account validity is enabled. ([\#5204](https://github.com/matrix-org/synapse/issues/5204)) -- Add experimental support for relations (aka reactions and edits). ([\#5209](https://github.com/matrix-org/synapse/issues/5209), [\#5211](https://github.com/matrix-org/synapse/issues/5211)) +- Add experimental support for relations (aka reactions and edits). ([\#5209](https://github.com/matrix-org/synapse/issues/5209), [\#5211](https://github.com/matrix-org/synapse/issues/5211), [\#5203](https://github.com/matrix-org/synapse/issues/5203), [\#5212](https://github.com/matrix-org/synapse/issues/5212)) - Add a room version 4 which uses a new event ID format, as per [MSC2002](https://github.com/matrix-org/matrix-doc/pull/2002). ([\#5210](https://github.com/matrix-org/synapse/issues/5210), [\#5217](https://github.com/matrix-org/synapse/issues/5217)) diff --git a/changelog.d/5203.feature b/changelog.d/5203.feature deleted file mode 100644 index 747098c166..0000000000 --- a/changelog.d/5203.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for relations (aka reactions and edits). diff --git a/changelog.d/5212.feature b/changelog.d/5212.feature deleted file mode 100644 index 747098c166..0000000000 --- a/changelog.d/5212.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for relations (aka reactions and edits). From 2dfbeea66f65f23582e045821ffd0dfda1dbab94 Mon Sep 17 00:00:00 2001 From: Steffen <33749463+YellowGarbageBag@users.noreply.github.com> Date: Wed, 22 May 2019 13:53:16 +0200 Subject: [PATCH 192/429] Update README.md (#5222) Add missing backslash --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index b27a692d5b..df5d0151e2 100644 --- a/docker/README.md +++ b/docker/README.md @@ -161,7 +161,7 @@ specify values for `SYNAPSE_CONFIG_PATH`, `SYNAPSE_SERVER_NAME` and example: ``` -docker run -it --rm +docker run -it --rm \ --mount type=volume,src=synapse-data,dst=/data \ -e SYNAPSE_CONFIG_PATH=/data/homeserver.yaml \ -e SYNAPSE_SERVER_NAME=my.matrix.host \ From 66b75e2d81a2bacf33d51ea5339fa9066264955d Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 22 May 2019 13:55:32 +0100 Subject: [PATCH 193/429] Neilj/ensure get profileinfo available in client reader slaved store (#5213) * expose SlavedProfileStore to ClientReaderSlavedStore --- changelog.d/5200.bugfix | 1 + synapse/app/client_reader.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/5200.bugfix diff --git a/changelog.d/5200.bugfix b/changelog.d/5200.bugfix new file mode 100644 index 0000000000..f346c7b0cc --- /dev/null +++ b/changelog.d/5200.bugfix @@ -0,0 +1 @@ +Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. diff --git a/synapse/app/client_reader.py b/synapse/app/client_reader.py index 864f1eac48..a16e037f32 100644 --- a/synapse/app/client_reader.py +++ b/synapse/app/client_reader.py @@ -38,6 +38,7 @@ from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.directory import DirectoryStore from synapse.replication.slave.storage.events import SlavedEventStore from synapse.replication.slave.storage.keys import SlavedKeyStore +from synapse.replication.slave.storage.profile import SlavedProfileStore from synapse.replication.slave.storage.push_rule import SlavedPushRuleStore from synapse.replication.slave.storage.receipts import SlavedReceiptsStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore @@ -81,6 +82,7 @@ class ClientReaderSlavedStore( SlavedApplicationServiceStore, SlavedRegistrationStore, SlavedTransactionStore, + SlavedProfileStore, SlavedClientIpStore, BaseSlavedStore, ): From 8031a6f3d50f0d0bc80dbe38e354db19f75fdf09 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 22 May 2019 15:40:28 +0100 Subject: [PATCH 194/429] 0.99.5 --- CHANGES.md | 6 ++++++ debian/changelog | 4 ++++ synapse/__init__.py | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 25ceec8b48..d1c9ea0e1a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.99.5 (2019-05-22) +=========================== + +No significant changes. + + Synapse 0.99.5rc1 (2019-05-21) ============================== diff --git a/debian/changelog b/debian/changelog index 35cf8ffb20..e148c32728 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,7 @@ +matrix-synapse-py3 (0.99.5) stable; urgency=medium + + * New synapse release 0.99.5. + matrix-synapse-py3 (0.99.4) stable; urgency=medium [ Christoph Müller ] diff --git a/synapse/__init__.py b/synapse/__init__.py index 42af03b53d..d66d2411a1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.5rc1" +__version__ = "0.99.5" From 62388a1e44278f9aa22244750433ac46142b266a Mon Sep 17 00:00:00 2001 From: Marcus Hoffmann Date: Wed, 22 May 2019 17:48:12 +0200 Subject: [PATCH 195/429] remove urllib3 pin (#5230) requests 2.22.0 as been released supporting urllib3 1.25.2 Signed-off-by: Marcus Hoffmann --- changelog.d/5230.misc | 1 + synapse/python_dependencies.py | 8 -------- 2 files changed, 1 insertion(+), 8 deletions(-) create mode 100644 changelog.d/5230.misc diff --git a/changelog.d/5230.misc b/changelog.d/5230.misc new file mode 100644 index 0000000000..c681bc9748 --- /dev/null +++ b/changelog.d/5230.misc @@ -0,0 +1 @@ +Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index e3f828c4bb..f64baa4d58 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -74,14 +74,6 @@ REQUIREMENTS = [ "attrs>=17.4.0", "netaddr>=0.7.18", - - # requests is a transitive dep of treq, and urlib3 is a transitive dep - # of requests, as well as of sentry-sdk. - # - # As of requests 2.21, requests does not yet support urllib3 1.25. - # (If we do not pin it here, pip will give us the latest urllib3 - # due to the dep via sentry-sdk.) - "urllib3<1.25", ] CONDITIONAL_REQUIREMENTS = { From c31e375ade1b59a7fe38628337e9e1aa3de91feb Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 22 May 2019 17:45:44 +0100 Subject: [PATCH 196/429] 0.99.5 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++-- synapse/__init__.py | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d1c9ea0e1a..6bdfdd6d70 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.99.5.1 (2019-05-22) +============================= + +No significant changes. + + Synapse 0.99.5 (2019-05-22) =========================== diff --git a/debian/changelog b/debian/changelog index e148c32728..90c6b86c5b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,8 @@ -matrix-synapse-py3 (0.99.5) stable; urgency=medium +matrix-synapse-py3 (0.99.5.1) stable; urgency=medium - * New synapse release 0.99.5. + * New synapse release 0.99.5.1. + + -- Synapse Packaging team Wed, 22 May 2019 16:22:24 +0000 matrix-synapse-py3 (0.99.4) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index d66d2411a1..4f95778eea 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.5" +__version__ = "0.99.5.1" From 006bd8f4f6e231ac1fafef1a83a68c3efb0b1bbf Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 22 May 2019 17:49:53 +0100 Subject: [PATCH 197/429] Revert "0.99.5" This reverts commit c31e375ade1b59a7fe38628337e9e1aa3de91feb. --- CHANGES.md | 6 ------ debian/changelog | 6 ++---- synapse/__init__.py | 2 +- 3 files changed, 3 insertions(+), 11 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6bdfdd6d70..d1c9ea0e1a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,9 +1,3 @@ -Synapse 0.99.5.1 (2019-05-22) -============================= - -No significant changes. - - Synapse 0.99.5 (2019-05-22) =========================== diff --git a/debian/changelog b/debian/changelog index 90c6b86c5b..e148c32728 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,6 @@ -matrix-synapse-py3 (0.99.5.1) stable; urgency=medium +matrix-synapse-py3 (0.99.5) stable; urgency=medium - * New synapse release 0.99.5.1. - - -- Synapse Packaging team Wed, 22 May 2019 16:22:24 +0000 + * New synapse release 0.99.5. matrix-synapse-py3 (0.99.4) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index 4f95778eea..d66d2411a1 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.5.1" +__version__ = "0.99.5" From 3d5bba581ba0fc442ce64e3e0d68ad755869db82 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 22 May 2019 17:52:44 +0100 Subject: [PATCH 198/429] 0.99.5.1 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++-- synapse/__init__.py | 2 +- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index d1c9ea0e1a..6bdfdd6d70 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 0.99.5.1 (2019-05-22) +============================= + +No significant changes. + + Synapse 0.99.5 (2019-05-22) =========================== diff --git a/debian/changelog b/debian/changelog index e148c32728..90c6b86c5b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,6 +1,8 @@ -matrix-synapse-py3 (0.99.5) stable; urgency=medium +matrix-synapse-py3 (0.99.5.1) stable; urgency=medium - * New synapse release 0.99.5. + * New synapse release 0.99.5.1. + + -- Synapse Packaging team Wed, 22 May 2019 16:22:24 +0000 matrix-synapse-py3 (0.99.4) stable; urgency=medium diff --git a/synapse/__init__.py b/synapse/__init__.py index d66d2411a1..4f95778eea 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.5" +__version__ = "0.99.5.1" From 1a94de60e89090b8930bcdefe51f47f204f52605 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 22 May 2019 18:39:33 +0100 Subject: [PATCH 199/429] Run black on synapse.crypto.keyring (#5232) --- changelog.d/5232.misc | 1 + synapse/crypto/keyring.py | 286 ++++++++++++++++++-------------------- 2 files changed, 138 insertions(+), 149 deletions(-) create mode 100644 changelog.d/5232.misc diff --git a/changelog.d/5232.misc b/changelog.d/5232.misc new file mode 100644 index 0000000000..1cdc71f095 --- /dev/null +++ b/changelog.d/5232.misc @@ -0,0 +1 @@ +Run black on synapse.crypto.keyring. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d8ba870cca..5cc98542ce 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -56,9 +56,9 @@ from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) -VerifyKeyRequest = namedtuple("VerifyRequest", ( - "server_name", "key_ids", "json_object", "deferred" -)) +VerifyKeyRequest = namedtuple( + "VerifyRequest", ("server_name", "key_ids", "json_object", "deferred") +) """ A request for a verify key to verify a JSON object. @@ -96,9 +96,7 @@ class Keyring(object): def verify_json_for_server(self, server_name, json_object): return logcontext.make_deferred_yieldable( - self.verify_json_objects_for_server( - [(server_name, json_object)] - )[0] + self.verify_json_objects_for_server([(server_name, json_object)])[0] ) def verify_json_objects_for_server(self, server_and_json): @@ -130,18 +128,15 @@ class Keyring(object): if not key_ids: return defer.fail( SynapseError( - 400, - "Not signed by %s" % (server_name,), - Codes.UNAUTHORIZED, + 400, "Not signed by %s" % (server_name,), Codes.UNAUTHORIZED ) ) - logger.debug("Verifying for %s with key_ids %s", - server_name, key_ids) + logger.debug("Verifying for %s with key_ids %s", server_name, key_ids) # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, defer.Deferred(), + server_name, key_ids, json_object, defer.Deferred() ) verify_requests.append(verify_request) @@ -179,15 +174,13 @@ class Keyring(object): # any other lookups until we have finished. # The deferreds are called with no logcontext. server_to_deferred = { - rq.server_name: defer.Deferred() - for rq in verify_requests + rq.server_name: defer.Deferred() for rq in verify_requests } # We want to wait for any previous lookups to complete before # proceeding. yield self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], - server_to_deferred, + [rq.server_name for rq in verify_requests], server_to_deferred ) # Actually start fetching keys. @@ -216,9 +209,7 @@ class Keyring(object): return res for verify_request in verify_requests: - verify_request.deferred.addBoth( - remove_deferreds, verify_request, - ) + verify_request.deferred.addBoth(remove_deferreds, verify_request) except Exception: logger.exception("Error starting key lookups") @@ -248,7 +239,8 @@ class Keyring(object): break logger.info( "Waiting for existing lookups for %s to complete [loop %i]", - [w[0] for w in wait_on], loop_count, + [w[0] for w in wait_on], + loop_count, ) with PreserveLoggingContext(): yield defer.DeferredList((w[1] for w in wait_on)) @@ -335,13 +327,14 @@ class Keyring(object): with PreserveLoggingContext(): for verify_request in requests_missing_keys: - verify_request.deferred.errback(SynapseError( - 401, - "No key for %s with id %s" % ( - verify_request.server_name, verify_request.key_ids, - ), - Codes.UNAUTHORIZED, - )) + verify_request.deferred.errback( + SynapseError( + 401, + "No key for %s with id %s" + % (verify_request.server_name, verify_request.key_ids), + Codes.UNAUTHORIZED, + ) + ) def on_err(err): with PreserveLoggingContext(): @@ -383,25 +376,26 @@ class Keyring(object): ) defer.returnValue(result) except KeyLookupError as e: - logger.warning( - "Key lookup failed from %r: %s", perspective_name, e, - ) + logger.warning("Key lookup failed from %r: %s", perspective_name, e) except Exception as e: logger.exception( "Unable to get key from %r: %s %s", perspective_name, - type(e).__name__, str(e), + type(e).__name__, + str(e), ) defer.returnValue({}) - results = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background(get_key, p_name, p_keys) - for p_name, p_keys in self.perspective_servers.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + results = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(get_key, p_name, p_keys) + for p_name, p_keys in self.perspective_servers.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) union_of_keys = {} for result in results: @@ -412,32 +406,30 @@ class Keyring(object): @defer.inlineCallbacks def get_keys_from_server(self, server_name_and_key_ids): - results = yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.get_server_verify_key_v2_direct, - server_name, - key_ids, - ) - for server_name, key_ids in server_name_and_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + results = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.get_server_verify_key_v2_direct, server_name, key_ids + ) + for server_name, key_ids in server_name_and_key_ids + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) merged = {} for result in results: merged.update(result) - defer.returnValue({ - server_name: keys - for server_name, keys in merged.items() - if keys - }) + defer.returnValue( + {server_name: keys for server_name, keys in merged.items() if keys} + ) @defer.inlineCallbacks - def get_server_verify_key_v2_indirect(self, server_names_and_key_ids, - perspective_name, - perspective_keys): + def get_server_verify_key_v2_indirect( + self, server_names_and_key_ids, perspective_name, perspective_keys + ): # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. @@ -448,9 +440,7 @@ class Keyring(object): data={ u"server_keys": { server_name: { - key_id: { - u"minimum_valid_until_ts": 0 - } for key_id in key_ids + key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids } for server_name, key_ids in server_names_and_key_ids } @@ -458,21 +448,19 @@ class Keyring(object): long_retries=True, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from( - KeyLookupError("Failed to connect to remote server"), e, - ) + raise_from(KeyLookupError("Failed to connect to remote server"), e) except HttpResponseException as e: - raise_from( - KeyLookupError("Remote server returned an error"), e, - ) + raise_from(KeyLookupError("Remote server returned an error"), e) keys = {} responses = query_response["server_keys"] for response in responses: - if (u"signatures" not in response - or perspective_name not in response[u"signatures"]): + if ( + u"signatures" not in response + or perspective_name not in response[u"signatures"] + ): raise KeyLookupError( "Key response not signed by perspective server" " %r" % (perspective_name,) @@ -482,9 +470,7 @@ class Keyring(object): for key_id in response[u"signatures"][perspective_name]: if key_id in perspective_keys: verify_signed_json( - response, - perspective_name, - perspective_keys[key_id] + response, perspective_name, perspective_keys[key_id] ) verified = True @@ -494,7 +480,7 @@ class Keyring(object): " known key, signed with: %r, known keys: %r", perspective_name, list(response[u"signatures"][perspective_name]), - list(perspective_keys) + list(perspective_keys), ) raise KeyLookupError( "Response not signed with a known key for perspective" @@ -508,18 +494,20 @@ class Keyring(object): keys.setdefault(server_name, {}).update(processed_response) - yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store_keys, - server_name=server_name, - from_server=perspective_name, - verify_keys=response_keys, - ) - for server_name, response_keys in keys.items() - ], - consumeErrors=True - ).addErrback(unwrapFirstError)) + yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.store_keys, + server_name=server_name, + from_server=perspective_name, + verify_keys=response_keys, + ) + for server_name, response_keys in keys.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) defer.returnValue(keys) @@ -534,26 +522,26 @@ class Keyring(object): try: response = yield self.client.get_json( destination=server_name, - path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), + path="/_matrix/key/v2/server/" + + urllib.parse.quote(requested_key_id), ignore_backoff=True, ) except (NotRetryingDestination, RequestSendFailed) as e: - raise_from( - KeyLookupError("Failed to connect to remote server"), e, - ) + raise_from(KeyLookupError("Failed to connect to remote server"), e) except HttpResponseException as e: - raise_from( - KeyLookupError("Remote server returned an error"), e, - ) + raise_from(KeyLookupError("Remote server returned an error"), e) - if (u"signatures" not in response - or server_name not in response[u"signatures"]): + if ( + u"signatures" not in response + or server_name not in response[u"signatures"] + ): raise KeyLookupError("Key response not signed by remote server") if response["server_name"] != server_name: - raise KeyLookupError("Expected a response for server %r not %r" % ( - server_name, response["server_name"] - )) + raise KeyLookupError( + "Expected a response for server %r not %r" + % (server_name, response["server_name"]) + ) response_keys = yield self.process_v2_response( from_server=server_name, @@ -564,16 +552,12 @@ class Keyring(object): keys.update(response_keys) yield self.store_keys( - server_name=server_name, - from_server=server_name, - verify_keys=keys, + server_name=server_name, from_server=server_name, verify_keys=keys ) defer.returnValue({server_name: keys}) @defer.inlineCallbacks - def process_v2_response( - self, from_server, response_json, requested_ids=[], - ): + def process_v2_response(self, from_server, response_json, requested_ids=[]): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from @@ -627,20 +611,13 @@ class Keyring(object): for key_id in response_json["signatures"].get(server_name, {}): if key_id not in response_json["verify_keys"]: raise KeyLookupError( - "Key response must include verification keys for all" - " signatures" + "Key response must include verification keys for all" " signatures" ) if key_id in verify_keys: - verify_signed_json( - response_json, - server_name, - verify_keys[key_id] - ) + verify_signed_json(response_json, server_name, verify_keys[key_id]) signed_key_json = sign_json( - response_json, - self.config.server_name, - self.config.signing_key[0], + response_json, self.config.server_name, self.config.signing_key[0] ) signed_key_json_bytes = encode_canonical_json(signed_key_json) @@ -653,21 +630,23 @@ class Keyring(object): response_keys.update(verify_keys) response_keys.update(old_verify_keys) - yield logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store.store_server_keys_json, - server_name=server_name, - key_id=key_id, - from_server=from_server, - ts_now_ms=time_now_ms, - ts_expires_ms=ts_valid_until_ms, - key_json_bytes=signed_key_json_bytes, - ) - for key_id in updated_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.store.store_server_keys_json, + server_name=server_name, + key_id=key_id, + from_server=from_server, + ts_now_ms=time_now_ms, + ts_expires_ms=ts_valid_until_ms, + key_json_bytes=signed_key_json_bytes, + ) + for key_id in updated_key_ids + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) defer.returnValue(response_keys) @@ -681,16 +660,21 @@ class Keyring(object): A deferred that completes when the keys are stored. """ # TODO(markjh): Store whether the keys have expired. - return logcontext.make_deferred_yieldable(defer.gatherResults( - [ - run_in_background( - self.store.store_server_verify_key, - server_name, server_name, key.time_added, key - ) - for key_id, key in verify_keys.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError)) + return logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.store.store_server_verify_key, + server_name, + server_name, + key.time_added, + key, + ) + for key_id, key in verify_keys.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) @defer.inlineCallbacks @@ -713,17 +697,19 @@ def _handle_key_deferred(verify_request): except KeyLookupError as e: logger.warn( "Failed to download keys for %s: %s %s", - server_name, type(e).__name__, str(e), + server_name, + type(e).__name__, + str(e), ) raise SynapseError( - 502, - "Error downloading keys for %s" % (server_name,), - Codes.UNAUTHORIZED, + 502, "Error downloading keys for %s" % (server_name,), Codes.UNAUTHORIZED ) except Exception as e: logger.exception( "Got Exception when downloading keys for %s: %s %s", - server_name, type(e).__name__, str(e), + server_name, + type(e).__name__, + str(e), ) raise SynapseError( 401, @@ -733,22 +719,24 @@ def _handle_key_deferred(verify_request): json_object = verify_request.json_object - logger.debug("Got key %s %s:%s for server %s, verifying" % ( - key_id, verify_key.alg, verify_key.version, server_name, - )) + logger.debug( + "Got key %s %s:%s for server %s, verifying" + % (key_id, verify_key.alg, verify_key.version, server_name) + ) try: verify_signed_json(json_object, server_name, verify_key) except SignatureVerifyException as e: logger.debug( "Error verifying signature for %s:%s:%s with key %s: %s", - server_name, verify_key.alg, verify_key.version, + server_name, + verify_key.alg, + verify_key.version, encode_verify_key_base64(verify_key), str(e), ) raise SynapseError( 401, - "Invalid signature for server %s with key %s:%s: %s" % ( - server_name, verify_key.alg, verify_key.version, str(e), - ), + "Invalid signature for server %s with key %s:%s: %s" + % (server_name, verify_key.alg, verify_key.version, str(e)), Codes.UNAUTHORIZED, ) From 85d1e03b9d50c1c64d2742ef3ec4f2dcd2bf7f9f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:17:42 +0100 Subject: [PATCH 200/429] Simplifications and comments in do_auth (#5227) I was staring at this function trying to figure out wtf it was actually doing. This is (hopefully) a non-functional refactor which makes it a bit clearer. --- changelog.d/5227.misc | 1 + synapse/handlers/federation.py | 309 ++++++++++++++++++------------- synapse/storage/events_worker.py | 2 +- 3 files changed, 187 insertions(+), 125 deletions(-) create mode 100644 changelog.d/5227.misc diff --git a/changelog.d/5227.misc b/changelog.d/5227.misc new file mode 100644 index 0000000000..32bd7b6009 --- /dev/null +++ b/changelog.d/5227.misc @@ -0,0 +1 @@ +Simplifications and comments in do_auth. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2202ed699a..cf4fad7de0 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2013,15 +2013,44 @@ class FederationHandler(BaseHandler): Args: origin (str): - event (synapse.events.FrozenEvent): + event (synapse.events.EventBase): context (synapse.events.snapshot.EventContext): - auth_events (dict[(str, str)->str]): + auth_events (dict[(str, str)->synapse.events.EventBase]): + Map from (event_type, state_key) to event + + What we expect the event's auth_events to be, based on the event's + position in the dag. I think? maybe?? + + Also NB that this function adds entries to it. + Returns: + defer.Deferred[None] + """ + room_version = yield self.store.get_room_version(event.room_id) + + yield self._update_auth_events_and_context_for_auth( + origin, event, context, auth_events + ) + try: + self.auth.check(room_version, event, auth_events=auth_events) + except AuthError as e: + logger.warn("Failed auth resolution for %r because %s", event, e) + raise e + + @defer.inlineCallbacks + def _update_auth_events_and_context_for_auth( + self, origin, event, context, auth_events + ): + """Helper for do_auth. See there for docs. + + Args: + origin (str): + event (synapse.events.EventBase): + context (synapse.events.snapshot.EventContext): + auth_events (dict[(str, str)->synapse.events.EventBase]): Returns: defer.Deferred[None] """ - # Check if we have all the auth events. - current_state = set(e.event_id for e in auth_events.values()) event_auth_events = set(event.auth_event_ids()) if event.is_state(): @@ -2029,11 +2058,21 @@ class FederationHandler(BaseHandler): else: event_key = None - if event_auth_events - current_state: + # if the event's auth_events refers to events which are not in our + # calculated auth_events, we need to fetch those events from somewhere. + # + # we start by fetching them from the store, and then try calling /event_auth/. + missing_auth = event_auth_events.difference( + e.event_id for e in auth_events.values() + ) + + if missing_auth: # TODO: can we use store.have_seen_events here instead? have_events = yield self.store.get_seen_events_with_rejections( - event_auth_events - current_state + missing_auth ) + logger.debug("Got events %s from store", have_events) + missing_auth.difference_update(have_events.keys()) else: have_events = {} @@ -2042,13 +2081,12 @@ class FederationHandler(BaseHandler): for e in auth_events.values() }) - seen_events = set(have_events.keys()) - - missing_auth = event_auth_events - seen_events - current_state - if missing_auth: - logger.info("Missing auth: %s", missing_auth) # If we don't have all the auth events, we need to get them. + logger.info( + "auth_events contains unknown events: %s", + missing_auth, + ) try: remote_auth_chain = yield self.federation_client.get_event_auth( origin, event.room_id, event.event_id @@ -2089,145 +2127,168 @@ class FederationHandler(BaseHandler): have_events = yield self.store.get_seen_events_with_rejections( event.auth_event_ids() ) - seen_events = set(have_events.keys()) except Exception: # FIXME: logger.exception("Failed to get auth chain") + if event.internal_metadata.is_outlier(): + logger.info("Skipping auth_event fetch for outlier") + return + # FIXME: Assumes we have and stored all the state for all the # prev_events - current_state = set(e.event_id for e in auth_events.values()) - different_auth = event_auth_events - current_state + different_auth = event_auth_events.difference( + e.event_id for e in auth_events.values() + ) + + if not different_auth: + return + + logger.info( + "auth_events refers to events which are not in our calculated auth " + "chain: %s", + different_auth, + ) room_version = yield self.store.get_room_version(event.room_id) - if different_auth and not event.internal_metadata.is_outlier(): - # Do auth conflict res. - logger.info("Different auth: %s", different_auth) - - different_events = yield logcontext.make_deferred_yieldable( - defer.gatherResults([ - logcontext.run_in_background( - self.store.get_event, - d, - allow_none=True, - allow_rejected=False, - ) - for d in different_auth - if d in have_events and not have_events[d] - ], consumeErrors=True) - ).addErrback(unwrapFirstError) - - if different_events: - local_view = dict(auth_events) - remote_view = dict(auth_events) - remote_view.update({ - (d.type, d.state_key): d for d in different_events if d - }) - - new_state = yield self.state_handler.resolve_events( - room_version, - [list(local_view.values()), list(remote_view.values())], - event + different_events = yield logcontext.make_deferred_yieldable( + defer.gatherResults([ + logcontext.run_in_background( + self.store.get_event, + d, + allow_none=True, + allow_rejected=False, ) + for d in different_auth + if d in have_events and not have_events[d] + ], consumeErrors=True) + ).addErrback(unwrapFirstError) - auth_events.update(new_state) + if different_events: + local_view = dict(auth_events) + remote_view = dict(auth_events) + remote_view.update({ + (d.type, d.state_key): d for d in different_events if d + }) - current_state = set(e.event_id for e in auth_events.values()) - different_auth = event_auth_events - current_state + new_state = yield self.state_handler.resolve_events( + room_version, + [list(local_view.values()), list(remote_view.values())], + event + ) - yield self._update_context_for_auth_events( - event, context, auth_events, event_key, - ) + logger.info( + "After state res: updating auth_events with new state %s", + { + (d.type, d.state_key): d.event_id for d in new_state.values() + if auth_events.get((d.type, d.state_key)) != d + }, + ) - if different_auth and not event.internal_metadata.is_outlier(): - logger.info("Different auth after resolution: %s", different_auth) + auth_events.update(new_state) - # Only do auth resolution if we have something new to say. - # We can't rove an auth failure. - do_resolution = False + different_auth = event_auth_events.difference( + e.event_id for e in auth_events.values() + ) - provable = [ - RejectedReason.NOT_ANCESTOR, RejectedReason.NOT_ANCESTOR, - ] + yield self._update_context_for_auth_events( + event, context, auth_events, event_key, + ) - for e_id in different_auth: - if e_id in have_events: - if have_events[e_id] in provable: - do_resolution = True - break + if not different_auth: + # we're done + return - if do_resolution: - prev_state_ids = yield context.get_prev_state_ids(self.store) - # 1. Get what we think is the auth chain. - auth_ids = yield self.auth.compute_auth_events( - event, prev_state_ids - ) - local_auth_chain = yield self.store.get_auth_chain( - auth_ids, include_given=True - ) + logger.info( + "auth_events still refers to events which are not in the calculated auth " + "chain after state resolution: %s", + different_auth, + ) - try: - # 2. Get remote difference. - result = yield self.federation_client.query_auth( - origin, - event.room_id, - event.event_id, - local_auth_chain, - ) + # Only do auth resolution if we have something new to say. + # We can't prove an auth failure. + do_resolution = False - seen_remotes = yield self.store.have_seen_events( - [e.event_id for e in result["auth_chain"]] - ) + for e_id in different_auth: + if e_id in have_events: + if have_events[e_id] == RejectedReason.NOT_ANCESTOR: + do_resolution = True + break - # 3. Process any remote auth chain events we haven't seen. - for ev in result["auth_chain"]: - if ev.event_id in seen_remotes: - continue + if not do_resolution: + logger.info( + "Skipping auth resolution due to lack of provable rejection reasons" + ) + return - if ev.event_id == event.event_id: - continue + logger.info("Doing auth resolution") - try: - auth_ids = ev.auth_event_ids() - auth = { - (e.type, e.state_key): e - for e in result["auth_chain"] - if e.event_id in auth_ids - or event.type == EventTypes.Create - } - ev.internal_metadata.outlier = True + prev_state_ids = yield context.get_prev_state_ids(self.store) - logger.debug( - "do_auth %s different_auth: %s", - event.event_id, e.event_id - ) - - yield self._handle_new_event( - origin, ev, auth_events=auth - ) - - if ev.event_id in event_auth_events: - auth_events[(ev.type, ev.state_key)] = ev - except AuthError: - pass - - except Exception: - # FIXME: - logger.exception("Failed to query auth chain") - - # 4. Look at rejects and their proofs. - # TODO. - - yield self._update_context_for_auth_events( - event, context, auth_events, event_key, - ) + # 1. Get what we think is the auth chain. + auth_ids = yield self.auth.compute_auth_events( + event, prev_state_ids + ) + local_auth_chain = yield self.store.get_auth_chain( + auth_ids, include_given=True + ) try: - self.auth.check(room_version, event, auth_events=auth_events) - except AuthError as e: - logger.warn("Failed auth resolution for %r because %s", event, e) - raise e + # 2. Get remote difference. + result = yield self.federation_client.query_auth( + origin, + event.room_id, + event.event_id, + local_auth_chain, + ) + + seen_remotes = yield self.store.have_seen_events( + [e.event_id for e in result["auth_chain"]] + ) + + # 3. Process any remote auth chain events we haven't seen. + for ev in result["auth_chain"]: + if ev.event_id in seen_remotes: + continue + + if ev.event_id == event.event_id: + continue + + try: + auth_ids = ev.auth_event_ids() + auth = { + (e.type, e.state_key): e + for e in result["auth_chain"] + if e.event_id in auth_ids + or event.type == EventTypes.Create + } + ev.internal_metadata.outlier = True + + logger.debug( + "do_auth %s different_auth: %s", + event.event_id, e.event_id + ) + + yield self._handle_new_event( + origin, ev, auth_events=auth + ) + + if ev.event_id in event_auth_events: + auth_events[(ev.type, ev.state_key)] = ev + except AuthError: + pass + + except Exception: + # FIXME: + logger.exception("Failed to query auth chain") + + # 4. Look at rejects and their proofs. + # TODO. + + yield self._update_context_for_auth_events( + event, context, auth_events, event_key, + ) @defer.inlineCallbacks def _update_context_for_auth_events(self, event, context, auth_events, diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 83ffae2132..21b353cad3 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -610,7 +610,7 @@ class EventsWorkerStore(SQLBaseStore): return res - return self.runInteraction("get_rejection_reasons", f) + return self.runInteraction("get_seen_events_with_rejections", f) def _get_total_state_event_counts_txn(self, txn, room_id): """ From 2e052110ee0bca17b8e27b6b48ee8b7c64bc94ae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:45:39 +0100 Subject: [PATCH 201/429] Rewrite store_server_verify_key to store several keys at once (#5234) Storing server keys hammered the database a bit. This replaces the implementation which stored a single key, with one which can do many updates at once. --- changelog.d/5234.misc | 1 + synapse/crypto/keyring.py | 59 ++++++++------------------------- synapse/storage/keys.py | 63 ++++++++++++++++++++++-------------- tests/crypto/test_keyring.py | 14 ++++++-- tests/storage/test_keys.py | 44 +++++++++++++++++-------- 5 files changed, 95 insertions(+), 86 deletions(-) create mode 100644 changelog.d/5234.misc diff --git a/changelog.d/5234.misc b/changelog.d/5234.misc new file mode 100644 index 0000000000..43fbd6f67c --- /dev/null +++ b/changelog.d/5234.misc @@ -0,0 +1 @@ +Rewrite store_server_verify_key to store several keys at once. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5cc98542ce..badb5254ea 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -453,10 +453,11 @@ class Keyring(object): raise_from(KeyLookupError("Remote server returned an error"), e) keys = {} + added_keys = [] - responses = query_response["server_keys"] + time_now_ms = self.clock.time_msec() - for response in responses: + for response in query_response["server_keys"]: if ( u"signatures" not in response or perspective_name not in response[u"signatures"] @@ -492,21 +493,13 @@ class Keyring(object): ) server_name = response["server_name"] + added_keys.extend( + (server_name, key_id, key) for key_id, key in processed_response.items() + ) keys.setdefault(server_name, {}).update(processed_response) - yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store_keys, - server_name=server_name, - from_server=perspective_name, - verify_keys=response_keys, - ) - for server_name, response_keys in keys.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) + yield self.store.store_server_verify_keys( + perspective_name, time_now_ms, added_keys ) defer.returnValue(keys) @@ -519,6 +512,7 @@ class Keyring(object): if requested_key_id in keys: continue + time_now_ms = self.clock.time_msec() try: response = yield self.client.get_json( destination=server_name, @@ -548,12 +542,13 @@ class Keyring(object): requested_ids=[requested_key_id], response_json=response, ) - + yield self.store.store_server_verify_keys( + server_name, + time_now_ms, + ((server_name, key_id, key) for key_id, key in response_keys.items()), + ) keys.update(response_keys) - yield self.store_keys( - server_name=server_name, from_server=server_name, verify_keys=keys - ) defer.returnValue({server_name: keys}) @defer.inlineCallbacks @@ -650,32 +645,6 @@ class Keyring(object): defer.returnValue(response_keys) - def store_keys(self, server_name, from_server, verify_keys): - """Store a collection of verify keys for a given server - Args: - server_name(str): The name of the server the keys are for. - from_server(str): The server the keys were downloaded from. - verify_keys(dict): A mapping of key_id to VerifyKey. - Returns: - A deferred that completes when the keys are stored. - """ - # TODO(markjh): Store whether the keys have expired. - return logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store.store_server_verify_key, - server_name, - server_name, - key.time_added, - key, - ) - for key_id, key in verify_keys.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - @defer.inlineCallbacks def _handle_key_deferred(verify_request): diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 7036541792..3c5f52009b 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -84,38 +84,51 @@ class KeyStore(SQLBaseStore): return self.runInteraction("get_server_verify_keys", _txn) - def store_server_verify_key( - self, server_name, from_server, time_now_ms, verify_key - ): - """Stores a NACL verification key for the given server. + def store_server_verify_keys(self, from_server, ts_added_ms, verify_keys): + """Stores NACL verification keys for remote servers. Args: - server_name (str): The name of the server. - from_server (str): Where the verification key was looked up - time_now_ms (int): The time now in milliseconds - verify_key (nacl.signing.VerifyKey): The NACL verify key. + from_server (str): Where the verification keys were looked up + ts_added_ms (int): The time to record that the key was added + verify_keys (iterable[tuple[str, str, nacl.signing.VerifyKey]]): + keys to be stored. Each entry is a triplet of + (server_name, key_id, key). """ - key_id = "%s:%s" % (verify_key.alg, verify_key.version) - - # XXX fix this to not need a lock (#3819) - def _txn(txn): - self._simple_upsert_txn( - txn, - table="server_signature_keys", - keyvalues={"server_name": server_name, "key_id": key_id}, - values={ - "from_server": from_server, - "ts_added_ms": time_now_ms, - "verify_key": db_binary_type(verify_key.encode()), - }, + key_values = [] + value_values = [] + invalidations = [] + for server_name, key_id, verify_key in verify_keys: + key_values.append((server_name, key_id)) + value_values.append( + ( + from_server, + ts_added_ms, + db_binary_type(verify_key.encode()), + ) ) # invalidate takes a tuple corresponding to the params of # _get_server_verify_key. _get_server_verify_key only takes one # param, which is itself the 2-tuple (server_name, key_id). - txn.call_after( - self._get_server_verify_key.invalidate, ((server_name, key_id),) - ) + invalidations.append((server_name, key_id)) - return self.runInteraction("store_server_verify_key", _txn) + def _invalidate(res): + f = self._get_server_verify_key.invalidate + for i in invalidations: + f((i, )) + return res + + return self.runInteraction( + "store_server_verify_keys", + self._simple_upsert_many_txn, + table="server_signature_keys", + key_names=("server_name", "key_id"), + key_values=key_values, + value_names=( + "from_server", + "ts_added_ms", + "verify_key", + ), + value_values=value_values, + ).addCallback(_invalidate) def store_server_keys_json( self, server_name, key_id, from_server, ts_now_ms, ts_expires_ms, key_json_bytes diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 3c79d4afe7..bcffe53a91 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -192,8 +192,18 @@ class KeyringTestCase(unittest.HomeserverTestCase): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) - r = self.hs.datastore.store_server_verify_key( - "server9", "", time.time() * 1000, signedjson.key.get_verify_key(key1) + key1_id = "%s:%s" % (key1.alg, key1.version) + + r = self.hs.datastore.store_server_verify_keys( + "server9", + time.time() * 1000, + [ + ( + "server9", + key1_id, + signedjson.key.get_verify_key(key1), + ), + ], ) self.get_success(r) json1 = {} diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 6bfaa00fe9..71ad7aee32 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -31,23 +31,32 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): def test_get_server_verify_keys(self): store = self.hs.get_datastore() - d = store.store_server_verify_key("server1", "from_server", 0, KEY_1) - self.get_success(d) - d = store.store_server_verify_key("server1", "from_server", 0, KEY_2) + key_id_1 = "ed25519:key1" + key_id_2 = "ed25519:KEY_ID_2" + d = store.store_server_verify_keys( + "from_server", + 10, + [ + ("server1", key_id_1, KEY_1), + ("server1", key_id_2, KEY_2), + ], + ) self.get_success(d) d = store.get_server_verify_keys( - [ - ("server1", "ed25519:key1"), - ("server1", "ed25519:key2"), - ("server1", "ed25519:key3"), - ] + [("server1", key_id_1), ("server1", key_id_2), ("server1", "ed25519:key3")] ) res = self.get_success(d) self.assertEqual(len(res.keys()), 3) - self.assertEqual(res[("server1", "ed25519:key1")].version, "key1") - self.assertEqual(res[("server1", "ed25519:key2")].version, "key2") + res1 = res[("server1", key_id_1)] + self.assertEqual(res1, KEY_1) + self.assertEqual(res1.version, "key1") + + res2 = res[("server1", key_id_2)] + self.assertEqual(res2, KEY_2) + # version comes from the ID it was stored with + self.assertEqual(res2.version, "KEY_ID_2") # non-existent result gives None self.assertIsNone(res[("server1", "ed25519:key3")]) @@ -60,9 +69,14 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): key_id_1 = "ed25519:key1" key_id_2 = "ed25519:key2" - d = store.store_server_verify_key("srv1", "from_server", 0, KEY_1) - self.get_success(d) - d = store.store_server_verify_key("srv1", "from_server", 0, KEY_2) + d = store.store_server_verify_keys( + "from_server", + 0, + [ + ("srv1", key_id_1, KEY_1), + ("srv1", key_id_2, KEY_2), + ], + ) self.get_success(d) d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) @@ -81,7 +95,9 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): new_key_2 = signedjson.key.get_verify_key( signedjson.key.generate_signing_key("key2") ) - d = store.store_server_verify_key("srv1", "from_server", 10, new_key_2) + d = store.store_server_verify_keys( + "from_server", 10, [("srv1", key_id_2, new_key_2)] + ) self.get_success(d) d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) From cc187f933796a8151f0d11c03ef6faa8d1949256 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:46:05 +0100 Subject: [PATCH 202/429] Remove unused VerifyKey.expired and .time_added fields (#5235) These were never used, and poking arbitary data into objects from other packages seems confusing at best. --- changelog.d/5235.misc | 1 + synapse/crypto/keyring.py | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) create mode 100644 changelog.d/5235.misc diff --git a/changelog.d/5235.misc b/changelog.d/5235.misc new file mode 100644 index 0000000000..2296ad2a4f --- /dev/null +++ b/changelog.d/5235.misc @@ -0,0 +1 @@ +Remove unused VerifyKey.expired and .time_added fields. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index badb5254ea..ea910a2f21 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -589,7 +589,6 @@ class Keyring(object): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_key.time_added = time_now_ms verify_keys[key_id] = verify_key old_verify_keys = {} @@ -598,8 +597,6 @@ class Keyring(object): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_key.expired = key_data["expired_ts"] - verify_key.time_added = time_now_ms old_verify_keys[key_id] = verify_key server_name = response_json["server_name"] From 84660d91b21eb3357ee0287319bc7f50e2222b21 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 23 May 2019 11:51:39 +0100 Subject: [PATCH 203/429] Simplify process_v2_response (#5236) * Pass time_added_ms into process_v2_response * Simplify process_v2_response We can merge old_verify_keys into verify_keys, and reduce the number of dicts flying around. --- changelog.d/5236.misc | 1 + synapse/crypto/keyring.py | 50 +++++++++++++++++++++++---------------- 2 files changed, 30 insertions(+), 21 deletions(-) create mode 100644 changelog.d/5236.misc diff --git a/changelog.d/5236.misc b/changelog.d/5236.misc new file mode 100644 index 0000000000..cb4417a9f4 --- /dev/null +++ b/changelog.d/5236.misc @@ -0,0 +1 @@ +Simplify Keyring.process_v2_response. \ No newline at end of file diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index ea910a2f21..9d629b2238 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -489,7 +489,7 @@ class Keyring(object): ) processed_response = yield self.process_v2_response( - perspective_name, response + perspective_name, response, time_added_ms=time_now_ms ) server_name = response["server_name"] @@ -541,6 +541,7 @@ class Keyring(object): from_server=server_name, requested_ids=[requested_key_id], response_json=response, + time_added_ms=time_now_ms, ) yield self.store.store_server_verify_keys( server_name, @@ -552,7 +553,9 @@ class Keyring(object): defer.returnValue({server_name: keys}) @defer.inlineCallbacks - def process_v2_response(self, from_server, response_json, requested_ids=[]): + def process_v2_response( + self, from_server, response_json, time_added_ms, requested_ids=[] + ): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from @@ -573,6 +576,8 @@ class Keyring(object): response_json (dict): the json-decoded Server Keys response object + time_added_ms (int): the timestamp to record in server_keys_json + requested_ids (iterable[str]): a list of the key IDs that were requested. We will store the json for these key ids as well as any that are actually in the response @@ -581,8 +586,9 @@ class Keyring(object): Deferred[dict[str, nacl.signing.VerifyKey]]: map from key_id to key object """ - time_now_ms = self.clock.time_msec() - response_keys = {} + + # start by extracting the keys from the response, since they may be required + # to validate the signature on the response. verify_keys = {} for key_id, key_data in response_json["verify_keys"].items(): if is_signing_algorithm_supported(key_id): @@ -591,23 +597,27 @@ class Keyring(object): verify_key = decode_verify_key_bytes(key_id, key_bytes) verify_keys[key_id] = verify_key - old_verify_keys = {} + # TODO: improve this signature checking + server_name = response_json["server_name"] + for key_id in response_json["signatures"].get(server_name, {}): + if key_id not in verify_keys: + raise KeyLookupError( + "Key response must include verification keys for all signatures" + ) + + verify_signed_json( + response_json, server_name, verify_keys[key_id] + ) + for key_id, key_data in response_json["old_verify_keys"].items(): if is_signing_algorithm_supported(key_id): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - old_verify_keys[key_id] = verify_key - - server_name = response_json["server_name"] - for key_id in response_json["signatures"].get(server_name, {}): - if key_id not in response_json["verify_keys"]: - raise KeyLookupError( - "Key response must include verification keys for all" " signatures" - ) - if key_id in verify_keys: - verify_signed_json(response_json, server_name, verify_keys[key_id]) + verify_keys[key_id] = verify_key + # re-sign the json with our own key, so that it is ready if we are asked to + # give it out as a notary server signed_key_json = sign_json( response_json, self.config.server_name, self.config.signing_key[0] ) @@ -615,12 +625,10 @@ class Keyring(object): signed_key_json_bytes = encode_canonical_json(signed_key_json) ts_valid_until_ms = signed_key_json[u"valid_until_ts"] + # for reasons I don't quite understand, we store this json for the key ids we + # requested, as well as those we got. updated_key_ids = set(requested_ids) updated_key_ids.update(verify_keys) - updated_key_ids.update(old_verify_keys) - - response_keys.update(verify_keys) - response_keys.update(old_verify_keys) yield logcontext.make_deferred_yieldable( defer.gatherResults( @@ -630,7 +638,7 @@ class Keyring(object): server_name=server_name, key_id=key_id, from_server=from_server, - ts_now_ms=time_now_ms, + ts_now_ms=time_added_ms, ts_expires_ms=ts_valid_until_ms, key_json_bytes=signed_key_json_bytes, ) @@ -640,7 +648,7 @@ class Keyring(object): ).addErrback(unwrapFirstError) ) - defer.returnValue(response_keys) + defer.returnValue(verify_keys) @defer.inlineCallbacks From b75537beaf841089f9f07c9dbed04a7a420a8b1f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 3 Apr 2019 18:10:24 +0100 Subject: [PATCH 204/429] Store key validity time in the storage layer This is a first step to checking that the key is valid at the required moment. The idea here is that, rather than passing VerifyKey objects in and out of the storage layer, we instead pass FetchKeyResult objects, which simply wrap the VerifyKey and add a valid_until_ts field. --- changelog.d/5237.misc | 1 + synapse/crypto/keyring.py | 47 +++++++++++++------ synapse/storage/keys.py | 31 ++++++++---- .../delta/54/add_validity_to_server_keys.sql | 23 +++++++++ tests/crypto/test_keyring.py | 22 +++++---- tests/storage/test_keys.py | 44 +++++++++++------ 6 files changed, 122 insertions(+), 46 deletions(-) create mode 100644 changelog.d/5237.misc create mode 100644 synapse/storage/schema/delta/54/add_validity_to_server_keys.sql diff --git a/changelog.d/5237.misc b/changelog.d/5237.misc new file mode 100644 index 0000000000..f4fe3b821b --- /dev/null +++ b/changelog.d/5237.misc @@ -0,0 +1 @@ +Store key validity time in the storage layer. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 9d629b2238..14a27288fd 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -20,7 +20,6 @@ from collections import namedtuple from six import raise_from from six.moves import urllib -import nacl.signing from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -43,6 +42,7 @@ from synapse.api.errors import ( RequestSendFailed, SynapseError, ) +from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext, unwrapFirstError from synapse.util.logcontext import ( LoggingContext, @@ -307,11 +307,15 @@ class Keyring(object): # complete this VerifyKeyRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: - key = result_keys.get(key_id) - if key: + fetch_key_result = result_keys.get(key_id) + if fetch_key_result: with PreserveLoggingContext(): verify_request.deferred.callback( - (server_name, key_id, key) + ( + server_name, + key_id, + fetch_key_result.verify_key, + ) ) break else: @@ -348,12 +352,12 @@ class Keyring(object): def get_keys_from_store(self, server_name_and_key_ids): """ Args: - server_name_and_key_ids (iterable(Tuple[str, iterable[str]]): + server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): list of (server_name, iterable[key_id]) tuples to fetch keys for Returns: - Deferred: resolves to dict[str, dict[str, VerifyKey|None]]: map from - server_name -> key_id -> VerifyKey + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: + map from server_name -> key_id -> FetchKeyResult """ keys_to_fetch = ( (server_name, key_id) @@ -430,6 +434,18 @@ class Keyring(object): def get_server_verify_key_v2_indirect( self, server_names_and_key_ids, perspective_name, perspective_keys ): + """ + Args: + server_names_and_key_ids (iterable[Tuple[str, iterable[str]]]): + list of (server_name, iterable[key_id]) tuples to fetch keys for + perspective_name (str): name of the notary server to query for the keys + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the + notary server + + Returns: + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map + from server_name -> key_id -> FetchKeyResult + """ # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. @@ -506,7 +522,7 @@ class Keyring(object): @defer.inlineCallbacks def get_server_verify_key_v2_direct(self, server_name, key_ids): - keys = {} # type: dict[str, nacl.signing.VerifyKey] + keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: if requested_key_id in keys: @@ -583,9 +599,9 @@ class Keyring(object): actually in the response Returns: - Deferred[dict[str, nacl.signing.VerifyKey]]: - map from key_id to key object + Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ + ts_valid_until_ms = response_json[u"valid_until_ts"] # start by extracting the keys from the response, since they may be required # to validate the signature on the response. @@ -595,7 +611,9 @@ class Keyring(object): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_keys[key_id] = verify_key + verify_keys[key_id] = FetchKeyResult( + verify_key=verify_key, valid_until_ts=ts_valid_until_ms + ) # TODO: improve this signature checking server_name = response_json["server_name"] @@ -606,7 +624,7 @@ class Keyring(object): ) verify_signed_json( - response_json, server_name, verify_keys[key_id] + response_json, server_name, verify_keys[key_id].verify_key ) for key_id, key_data in response_json["old_verify_keys"].items(): @@ -614,7 +632,9 @@ class Keyring(object): key_base64 = key_data["key"] key_bytes = decode_base64(key_base64) verify_key = decode_verify_key_bytes(key_id, key_bytes) - verify_keys[key_id] = verify_key + verify_keys[key_id] = FetchKeyResult( + verify_key=verify_key, valid_until_ts=key_data["expired_ts"] + ) # re-sign the json with our own key, so that it is ready if we are asked to # give it out as a notary server @@ -623,7 +643,6 @@ class Keyring(object): ) signed_key_json_bytes = encode_canonical_json(signed_key_json) - ts_valid_until_ms = signed_key_json[u"valid_until_ts"] # for reasons I don't quite understand, we store this json for the key ids we # requested, as well as those we got. diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 3c5f52009b..5300720dbb 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -19,6 +19,7 @@ import logging import six +import attr from signedjson.key import decode_verify_key_bytes from synapse.util import batch_iter @@ -36,6 +37,12 @@ else: db_binary_type = memoryview +@attr.s(slots=True, frozen=True) +class FetchKeyResult(object): + verify_key = attr.ib() # VerifyKey: the key itself + valid_until_ts = attr.ib() # int: how long we can use this key for + + class KeyStore(SQLBaseStore): """Persistence for signature verification keys """ @@ -54,8 +61,8 @@ class KeyStore(SQLBaseStore): iterable of (server_name, key-id) tuples to fetch keys for Returns: - Deferred: resolves to dict[Tuple[str, str], VerifyKey|None]: - map from (server_name, key_id) -> VerifyKey, or None if the key is + Deferred: resolves to dict[Tuple[str, str], FetchKeyResult|None]: + map from (server_name, key_id) -> FetchKeyResult, or None if the key is unknown """ keys = {} @@ -65,17 +72,19 @@ class KeyStore(SQLBaseStore): # batch_iter always returns tuples so it's safe to do len(batch) sql = ( - "SELECT server_name, key_id, verify_key FROM server_signature_keys " - "WHERE 1=0" + "SELECT server_name, key_id, verify_key, ts_valid_until_ms " + "FROM server_signature_keys WHERE 1=0" ) + " OR (server_name=? AND key_id=?)" * len(batch) txn.execute(sql, tuple(itertools.chain.from_iterable(batch))) for row in txn: - server_name, key_id, key_bytes = row - keys[(server_name, key_id)] = decode_verify_key_bytes( - key_id, bytes(key_bytes) + server_name, key_id, key_bytes, ts_valid_until_ms = row + res = FetchKeyResult( + verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)), + valid_until_ts=ts_valid_until_ms, ) + keys[(server_name, key_id)] = res def _txn(txn): for batch in batch_iter(server_name_and_key_ids, 50): @@ -89,20 +98,21 @@ class KeyStore(SQLBaseStore): Args: from_server (str): Where the verification keys were looked up ts_added_ms (int): The time to record that the key was added - verify_keys (iterable[tuple[str, str, nacl.signing.VerifyKey]]): + verify_keys (iterable[tuple[str, str, FetchKeyResult]]): keys to be stored. Each entry is a triplet of (server_name, key_id, key). """ key_values = [] value_values = [] invalidations = [] - for server_name, key_id, verify_key in verify_keys: + for server_name, key_id, fetch_result in verify_keys: key_values.append((server_name, key_id)) value_values.append( ( from_server, ts_added_ms, - db_binary_type(verify_key.encode()), + fetch_result.valid_until_ts, + db_binary_type(fetch_result.verify_key.encode()), ) ) # invalidate takes a tuple corresponding to the params of @@ -125,6 +135,7 @@ class KeyStore(SQLBaseStore): value_names=( "from_server", "ts_added_ms", + "ts_valid_until_ms", "verify_key", ), value_values=value_values, diff --git a/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql b/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql new file mode 100644 index 0000000000..c01aa9d2d9 --- /dev/null +++ b/synapse/storage/schema/delta/54/add_validity_to_server_keys.sql @@ -0,0 +1,23 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* When we can use this key until, before we have to refresh it. */ +ALTER TABLE server_signature_keys ADD COLUMN ts_valid_until_ms BIGINT; + +UPDATE server_signature_keys SET ts_valid_until_ms = ( + SELECT MAX(ts_valid_until_ms) FROM server_keys_json skj WHERE + skj.server_name = server_signature_keys.server_name AND + skj.key_id = server_signature_keys.key_id +); diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index bcffe53a91..83de32b05d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -25,6 +25,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.crypto import keyring from synapse.crypto.keyring import KeyLookupError +from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -201,7 +202,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): ( "server9", key1_id, - signedjson.key.get_verify_key(key1), + FetchKeyResult(signedjson.key.get_verify_key(key1), 1000), ), ], ) @@ -251,9 +252,10 @@ class KeyringTestCase(unittest.HomeserverTestCase): server_name_and_key_ids = [(SERVER_NAME, ("key1",))] keys = self.get_success(kr.get_keys_from_server(server_name_and_key_ids)) k = keys[SERVER_NAME][testverifykey_id] - self.assertEqual(k, testverifykey) - self.assertEqual(k.alg, "ed25519") - self.assertEqual(k.version, "ver1") + self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) + self.assertEqual(k.verify_key, testverifykey) + self.assertEqual(k.verify_key.alg, "ed25519") + self.assertEqual(k.verify_key.version, "ver1") # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) @@ -321,9 +323,10 @@ class KeyringTestCase(unittest.HomeserverTestCase): keys = self.get_success(kr.get_keys_from_perspectives(server_name_and_key_ids)) self.assertIn(SERVER_NAME, keys) k = keys[SERVER_NAME][testverifykey_id] - self.assertEqual(k, testverifykey) - self.assertEqual(k.alg, "ed25519") - self.assertEqual(k.version, "ver1") + self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) + self.assertEqual(k.verify_key, testverifykey) + self.assertEqual(k.verify_key.alg, "ed25519") + self.assertEqual(k.verify_key.version, "ver1") # check that the perspectives store is correctly updated lookup_triplet = (SERVER_NAME, testverifykey_id, None) @@ -346,7 +349,10 @@ class KeyringTestCase(unittest.HomeserverTestCase): @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): - with LoggingContext("testctx"): + with LoggingContext("testctx") as ctx: + # we set the "request" prop to make it easier to follow what's going on in the + # logs. + ctx.request = "testctx" rv = yield f(*args, **kwargs) defer.returnValue(rv) diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index 71ad7aee32..e07ff01201 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -17,6 +17,8 @@ import signedjson.key from twisted.internet.defer import Deferred +from synapse.storage.keys import FetchKeyResult + import tests.unittest KEY_1 = signedjson.key.decode_verify_key_base64( @@ -37,8 +39,8 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): "from_server", 10, [ - ("server1", key_id_1, KEY_1), - ("server1", key_id_2, KEY_2), + ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), ], ) self.get_success(d) @@ -50,13 +52,15 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): self.assertEqual(len(res.keys()), 3) res1 = res[("server1", key_id_1)] - self.assertEqual(res1, KEY_1) - self.assertEqual(res1.version, "key1") + self.assertEqual(res1.verify_key, KEY_1) + self.assertEqual(res1.verify_key.version, "key1") + self.assertEqual(res1.valid_until_ts, 100) res2 = res[("server1", key_id_2)] - self.assertEqual(res2, KEY_2) + self.assertEqual(res2.verify_key, KEY_2) # version comes from the ID it was stored with - self.assertEqual(res2.version, "KEY_ID_2") + self.assertEqual(res2.verify_key.version, "KEY_ID_2") + self.assertEqual(res2.valid_until_ts, 200) # non-existent result gives None self.assertIsNone(res[("server1", "ed25519:key3")]) @@ -73,8 +77,8 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): "from_server", 0, [ - ("srv1", key_id_1, KEY_1), - ("srv1", key_id_2, KEY_2), + ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), ], ) self.get_success(d) @@ -82,26 +86,38 @@ class KeyStoreTestCase(tests.unittest.HomeserverTestCase): d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) res = self.get_success(d) self.assertEqual(len(res.keys()), 2) - self.assertEqual(res[("srv1", key_id_1)], KEY_1) - self.assertEqual(res[("srv1", key_id_2)], KEY_2) + + res1 = res[("srv1", key_id_1)] + self.assertEqual(res1.verify_key, KEY_1) + self.assertEqual(res1.valid_until_ts, 100) + + res2 = res[("srv1", key_id_2)] + self.assertEqual(res2.verify_key, KEY_2) + self.assertEqual(res2.valid_until_ts, 200) # we should be able to look up the same thing again without a db hit res = store.get_server_verify_keys([("srv1", key_id_1)]) if isinstance(res, Deferred): res = self.successResultOf(res) self.assertEqual(len(res.keys()), 1) - self.assertEqual(res[("srv1", key_id_1)], KEY_1) + self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1) new_key_2 = signedjson.key.get_verify_key( signedjson.key.generate_signing_key("key2") ) d = store.store_server_verify_keys( - "from_server", 10, [("srv1", key_id_2, new_key_2)] + "from_server", 10, [("srv1", key_id_2, FetchKeyResult(new_key_2, 300))] ) self.get_success(d) d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) res = self.get_success(d) self.assertEqual(len(res.keys()), 2) - self.assertEqual(res[("srv1", key_id_1)], KEY_1) - self.assertEqual(res[("srv1", key_id_2)], new_key_2) + + res1 = res[("srv1", key_id_1)] + self.assertEqual(res1.verify_key, KEY_1) + self.assertEqual(res1.valid_until_ts, 100) + + res2 = res[("srv1", key_id_2)] + self.assertEqual(res2.verify_key, new_key_2) + self.assertEqual(res2.valid_until_ts, 300) From 895b79ac2ece74500fb8a4ea158a6aec2adc0856 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 18:28:17 +0100 Subject: [PATCH 205/429] Factor out KeyFetchers from KeyRing Rather than have three methods which have to have the same interface, factor out a separate interface which is provided by three implementations. I find it easier to grok the code this way. --- changelog.d/5244.misc | 1 + synapse/crypto/keyring.py | 459 +++++++++++++++++++---------------- tests/crypto/test_keyring.py | 34 ++- 3 files changed, 276 insertions(+), 218 deletions(-) create mode 100644 changelog.d/5244.misc diff --git a/changelog.d/5244.misc b/changelog.d/5244.misc new file mode 100644 index 0000000000..9cc1fb869d --- /dev/null +++ b/changelog.d/5244.misc @@ -0,0 +1 @@ +Refactor synapse.crypto.keyring to use a KeyFetcher interface. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 14a27288fd..eaf41b983c 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -80,12 +80,13 @@ class KeyLookupError(ValueError): class Keyring(object): def __init__(self, hs): - self.store = hs.get_datastore() self.clock = hs.get_clock() - self.client = hs.get_http_client() - self.config = hs.get_config() - self.perspective_servers = self.config.perspectives - self.hs = hs + + self._key_fetchers = ( + StoreKeyFetcher(hs), + PerspectivesKeyFetcher(hs), + ServerKeyFetcher(hs), + ) # map from server name to Deferred. Has an entry for each server with # an ongoing key download; the Deferred completes once the download @@ -271,13 +272,6 @@ class Keyring(object): verify_requests (list[VerifyKeyRequest]): list of verify requests """ - # These are functions that produce keys given a list of key ids - key_fetch_fns = ( - self.get_keys_from_store, # First try the local store - self.get_keys_from_perspectives, # Then try via perspectives - self.get_keys_from_server, # Then try directly - ) - @defer.inlineCallbacks def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): @@ -288,8 +282,8 @@ class Keyring(object): verify_request.key_ids ) - for fn in key_fetch_fns: - results = yield fn(missing_keys.items()) + for f in self._key_fetchers: + results = yield f.get_keys(missing_keys.items()) # We now need to figure out which verify requests we have keys # for and which we don't @@ -348,8 +342,9 @@ class Keyring(object): run_in_background(do_iterations).addErrback(on_err) - @defer.inlineCallbacks - def get_keys_from_store(self, server_name_and_key_ids): + +class KeyFetcher(object): + def get_keys(self, server_name_and_key_ids): """ Args: server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): @@ -359,6 +354,18 @@ class Keyring(object): Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: map from server_name -> key_id -> FetchKeyResult """ + raise NotImplementedError + + +class StoreKeyFetcher(KeyFetcher): + """KeyFetcher impl which fetches keys from our data store""" + + def __init__(self, hs): + self.store = hs.get_datastore() + + @defer.inlineCallbacks + def get_keys(self, server_name_and_key_ids): + """see KeyFetcher.get_keys""" keys_to_fetch = ( (server_name, key_id) for server_name, key_ids in server_name_and_key_ids @@ -370,203 +377,11 @@ class Keyring(object): keys.setdefault(server_name, {})[key_id] = key defer.returnValue(keys) - @defer.inlineCallbacks - def get_keys_from_perspectives(self, server_name_and_key_ids): - @defer.inlineCallbacks - def get_key(perspective_name, perspective_keys): - try: - result = yield self.get_server_verify_key_v2_indirect( - server_name_and_key_ids, perspective_name, perspective_keys - ) - defer.returnValue(result) - except KeyLookupError as e: - logger.warning("Key lookup failed from %r: %s", perspective_name, e) - except Exception as e: - logger.exception( - "Unable to get key from %r: %s %s", - perspective_name, - type(e).__name__, - str(e), - ) - defer.returnValue({}) - - results = yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background(get_key, p_name, p_keys) - for p_name, p_keys in self.perspective_servers.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - - union_of_keys = {} - for result in results: - for server_name, keys in result.items(): - union_of_keys.setdefault(server_name, {}).update(keys) - - defer.returnValue(union_of_keys) - - @defer.inlineCallbacks - def get_keys_from_server(self, server_name_and_key_ids): - results = yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.get_server_verify_key_v2_direct, server_name, key_ids - ) - for server_name, key_ids in server_name_and_key_ids - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - - merged = {} - for result in results: - merged.update(result) - - defer.returnValue( - {server_name: keys for server_name, keys in merged.items() if keys} - ) - - @defer.inlineCallbacks - def get_server_verify_key_v2_indirect( - self, server_names_and_key_ids, perspective_name, perspective_keys - ): - """ - Args: - server_names_and_key_ids (iterable[Tuple[str, iterable[str]]]): - list of (server_name, iterable[key_id]) tuples to fetch keys for - perspective_name (str): name of the notary server to query for the keys - perspective_keys (dict[str, VerifyKey]): map of key_id->key for the - notary server - - Returns: - Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map - from server_name -> key_id -> FetchKeyResult - """ - # TODO(mark): Set the minimum_valid_until_ts to that needed by - # the events being validated or the current time if validating - # an incoming request. - try: - query_response = yield self.client.post_json( - destination=perspective_name, - path="/_matrix/key/v2/query", - data={ - u"server_keys": { - server_name: { - key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids - } - for server_name, key_ids in server_names_and_key_ids - } - }, - long_retries=True, - ) - except (NotRetryingDestination, RequestSendFailed) as e: - raise_from(KeyLookupError("Failed to connect to remote server"), e) - except HttpResponseException as e: - raise_from(KeyLookupError("Remote server returned an error"), e) - - keys = {} - added_keys = [] - - time_now_ms = self.clock.time_msec() - - for response in query_response["server_keys"]: - if ( - u"signatures" not in response - or perspective_name not in response[u"signatures"] - ): - raise KeyLookupError( - "Key response not signed by perspective server" - " %r" % (perspective_name,) - ) - - verified = False - for key_id in response[u"signatures"][perspective_name]: - if key_id in perspective_keys: - verify_signed_json( - response, perspective_name, perspective_keys[key_id] - ) - verified = True - - if not verified: - logging.info( - "Response from perspective server %r not signed with a" - " known key, signed with: %r, known keys: %r", - perspective_name, - list(response[u"signatures"][perspective_name]), - list(perspective_keys), - ) - raise KeyLookupError( - "Response not signed with a known key for perspective" - " server %r" % (perspective_name,) - ) - - processed_response = yield self.process_v2_response( - perspective_name, response, time_added_ms=time_now_ms - ) - server_name = response["server_name"] - - added_keys.extend( - (server_name, key_id, key) for key_id, key in processed_response.items() - ) - keys.setdefault(server_name, {}).update(processed_response) - - yield self.store.store_server_verify_keys( - perspective_name, time_now_ms, added_keys - ) - - defer.returnValue(keys) - - @defer.inlineCallbacks - def get_server_verify_key_v2_direct(self, server_name, key_ids): - keys = {} # type: dict[str, FetchKeyResult] - - for requested_key_id in key_ids: - if requested_key_id in keys: - continue - - time_now_ms = self.clock.time_msec() - try: - response = yield self.client.get_json( - destination=server_name, - path="/_matrix/key/v2/server/" - + urllib.parse.quote(requested_key_id), - ignore_backoff=True, - ) - except (NotRetryingDestination, RequestSendFailed) as e: - raise_from(KeyLookupError("Failed to connect to remote server"), e) - except HttpResponseException as e: - raise_from(KeyLookupError("Remote server returned an error"), e) - - if ( - u"signatures" not in response - or server_name not in response[u"signatures"] - ): - raise KeyLookupError("Key response not signed by remote server") - - if response["server_name"] != server_name: - raise KeyLookupError( - "Expected a response for server %r not %r" - % (server_name, response["server_name"]) - ) - - response_keys = yield self.process_v2_response( - from_server=server_name, - requested_ids=[requested_key_id], - response_json=response, - time_added_ms=time_now_ms, - ) - yield self.store.store_server_verify_keys( - server_name, - time_now_ms, - ((server_name, key_id, key) for key_id, key in response_keys.items()), - ) - keys.update(response_keys) - - defer.returnValue({server_name: keys}) +class BaseV2KeyFetcher(object): + def __init__(self, hs): + self.store = hs.get_datastore() + self.config = hs.get_config() @defer.inlineCallbacks def process_v2_response( @@ -670,6 +485,226 @@ class Keyring(object): defer.returnValue(verify_keys) +class PerspectivesKeyFetcher(BaseV2KeyFetcher): + """KeyFetcher impl which fetches keys from the "perspectives" servers""" + + def __init__(self, hs): + super(PerspectivesKeyFetcher, self).__init__(hs) + self.clock = hs.get_clock() + self.client = hs.get_http_client() + self.perspective_servers = self.config.perspectives + + @defer.inlineCallbacks + def get_keys(self, server_name_and_key_ids): + """see KeyFetcher.get_keys""" + + @defer.inlineCallbacks + def get_key(perspective_name, perspective_keys): + try: + result = yield self.get_server_verify_key_v2_indirect( + server_name_and_key_ids, perspective_name, perspective_keys + ) + defer.returnValue(result) + except KeyLookupError as e: + logger.warning("Key lookup failed from %r: %s", perspective_name, e) + except Exception as e: + logger.exception( + "Unable to get key from %r: %s %s", + perspective_name, + type(e).__name__, + str(e), + ) + + defer.returnValue({}) + + results = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background(get_key, p_name, p_keys) + for p_name, p_keys in self.perspective_servers.items() + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) + + union_of_keys = {} + for result in results: + for server_name, keys in result.items(): + union_of_keys.setdefault(server_name, {}).update(keys) + + defer.returnValue(union_of_keys) + + @defer.inlineCallbacks + def get_server_verify_key_v2_indirect( + self, server_names_and_key_ids, perspective_name, perspective_keys + ): + """ + Args: + server_names_and_key_ids (iterable[Tuple[str, iterable[str]]]): + list of (server_name, iterable[key_id]) tuples to fetch keys for + perspective_name (str): name of the notary server to query for the keys + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the + notary server + + Returns: + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map + from server_name -> key_id -> FetchKeyResult + """ + # TODO(mark): Set the minimum_valid_until_ts to that needed by + # the events being validated or the current time if validating + # an incoming request. + try: + query_response = yield self.client.post_json( + destination=perspective_name, + path="/_matrix/key/v2/query", + data={ + u"server_keys": { + server_name: { + key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids + } + for server_name, key_ids in server_names_and_key_ids + } + }, + long_retries=True, + ) + except (NotRetryingDestination, RequestSendFailed) as e: + raise_from(KeyLookupError("Failed to connect to remote server"), e) + except HttpResponseException as e: + raise_from(KeyLookupError("Remote server returned an error"), e) + + keys = {} + added_keys = [] + + time_now_ms = self.clock.time_msec() + + for response in query_response["server_keys"]: + if ( + u"signatures" not in response + or perspective_name not in response[u"signatures"] + ): + raise KeyLookupError( + "Key response not signed by perspective server" + " %r" % (perspective_name,) + ) + + verified = False + for key_id in response[u"signatures"][perspective_name]: + if key_id in perspective_keys: + verify_signed_json( + response, perspective_name, perspective_keys[key_id] + ) + verified = True + + if not verified: + logging.info( + "Response from perspective server %r not signed with a" + " known key, signed with: %r, known keys: %r", + perspective_name, + list(response[u"signatures"][perspective_name]), + list(perspective_keys), + ) + raise KeyLookupError( + "Response not signed with a known key for perspective" + " server %r" % (perspective_name,) + ) + + processed_response = yield self.process_v2_response( + perspective_name, response, time_added_ms=time_now_ms + ) + server_name = response["server_name"] + + added_keys.extend( + (server_name, key_id, key) for key_id, key in processed_response.items() + ) + keys.setdefault(server_name, {}).update(processed_response) + + yield self.store.store_server_verify_keys( + perspective_name, time_now_ms, added_keys + ) + + defer.returnValue(keys) + + +class ServerKeyFetcher(BaseV2KeyFetcher): + """KeyFetcher impl which fetches keys from the origin servers""" + + def __init__(self, hs): + super(ServerKeyFetcher, self).__init__(hs) + self.clock = hs.get_clock() + self.client = hs.get_http_client() + + @defer.inlineCallbacks + def get_keys(self, server_name_and_key_ids): + """see KeyFetcher.get_keys""" + results = yield logcontext.make_deferred_yieldable( + defer.gatherResults( + [ + run_in_background( + self.get_server_verify_key_v2_direct, server_name, key_ids + ) + for server_name, key_ids in server_name_and_key_ids + ], + consumeErrors=True, + ).addErrback(unwrapFirstError) + ) + + merged = {} + for result in results: + merged.update(result) + + defer.returnValue( + {server_name: keys for server_name, keys in merged.items() if keys} + ) + + @defer.inlineCallbacks + def get_server_verify_key_v2_direct(self, server_name, key_ids): + keys = {} # type: dict[str, FetchKeyResult] + + for requested_key_id in key_ids: + if requested_key_id in keys: + continue + + time_now_ms = self.clock.time_msec() + try: + response = yield self.client.get_json( + destination=server_name, + path="/_matrix/key/v2/server/" + + urllib.parse.quote(requested_key_id), + ignore_backoff=True, + ) + except (NotRetryingDestination, RequestSendFailed) as e: + raise_from(KeyLookupError("Failed to connect to remote server"), e) + except HttpResponseException as e: + raise_from(KeyLookupError("Remote server returned an error"), e) + + if ( + u"signatures" not in response + or server_name not in response[u"signatures"] + ): + raise KeyLookupError("Key response not signed by remote server") + + if response["server_name"] != server_name: + raise KeyLookupError( + "Expected a response for server %r not %r" + % (server_name, response["server_name"]) + ) + + response_keys = yield self.process_v2_response( + from_server=server_name, + requested_ids=[requested_key_id], + response_json=response, + time_added_ms=time_now_ms, + ) + yield self.store.store_server_verify_keys( + server_name, + time_now_ms, + ((server_name, key_id, key) for key_id, key in response_keys.items()), + ) + keys.update(response_keys) + + defer.returnValue({server_name: keys}) + + @defer.inlineCallbacks def _handle_key_deferred(verify_request): """Waits for the key to become available, and then performs a verification diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 83de32b05d..de61bad15d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -24,7 +24,11 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.crypto.keyring import KeyLookupError +from synapse.crypto.keyring import ( + KeyLookupError, + PerspectivesKeyFetcher, + ServerKeyFetcher, +) from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -218,12 +222,19 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.assertFalse(d.called) self.get_success(d) + +class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + self.http_client = Mock() + hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) + return hs + def test_get_keys_from_server(self): # arbitrarily advance the clock a bit self.reactor.advance(100) SERVER_NAME = "server2" - kr = keyring.Keyring(self.hs) + fetcher = ServerKeyFetcher(self.hs) testkey = signedjson.key.generate_signing_key("ver1") testverifykey = signedjson.key.get_verify_key(testkey) testverifykey_id = "ed25519:ver1" @@ -250,7 +261,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.http_client.get_json.side_effect = get_json server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(kr.get_keys_from_server(server_name_and_key_ids)) + keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) @@ -278,15 +289,26 @@ class KeyringTestCase(unittest.HomeserverTestCase): # change the server name: it should cause a rejection response["server_name"] = "OTHER_SERVER" self.get_failure( - kr.get_keys_from_server(server_name_and_key_ids), KeyLookupError + fetcher.get_keys(server_name_and_key_ids), KeyLookupError ) + +class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): + def make_homeserver(self, reactor, clock): + self.mock_perspective_server = MockPerspectiveServer() + self.http_client = Mock() + hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) + keys = self.mock_perspective_server.get_verify_keys() + hs.config.perspectives = {self.mock_perspective_server.server_name: keys} + return hs + def test_get_keys_from_perspectives(self): # arbitrarily advance the clock a bit self.reactor.advance(100) + fetcher = PerspectivesKeyFetcher(self.hs) + SERVER_NAME = "server2" - kr = keyring.Keyring(self.hs) testkey = signedjson.key.generate_signing_key("ver1") testverifykey = signedjson.key.get_verify_key(testkey) testverifykey_id = "ed25519:ver1" @@ -320,7 +342,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.http_client.post_json.side_effect = post_json server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(kr.get_keys_from_perspectives(server_name_and_key_ids)) + keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) self.assertIn(SERVER_NAME, keys) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) From ec24108cc2e937f49908df4c78f5cee9f81e0834 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 23 May 2019 14:52:13 +0100 Subject: [PATCH 206/429] Fix remote_key_resource --- synapse/rest/key/v2/remote_key_resource.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index eb8782aa6e..21c3c807b9 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -20,7 +20,7 @@ from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError -from synapse.crypto.keyring import KeyLookupError +from synapse.crypto.keyring import KeyLookupError, ServerKeyFetcher from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler from synapse.http.servlet import parse_integer, parse_json_object_from_request @@ -89,7 +89,7 @@ class RemoteKey(Resource): isLeaf = True def __init__(self, hs): - self.keyring = hs.get_keyring() + self.fetcher = ServerKeyFetcher(hs) self.store = hs.get_datastore() self.clock = hs.get_clock() self.federation_domain_whitelist = hs.config.federation_domain_whitelist @@ -217,7 +217,7 @@ class RemoteKey(Resource): if cache_misses and query_remote_on_cache_miss: for server_name, key_ids in cache_misses.items(): try: - yield self.keyring.get_server_verify_key_v2_direct( + yield self.fetcher.get_server_verify_key_v2_direct( server_name, key_ids ) except KeyLookupError as e: From 6368150a748e9303f34948873af360d8a62347b6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 23 May 2019 15:00:20 +0100 Subject: [PATCH 207/429] Add config option for setting homeserver's default room version (#5223) Replaces DEFAULT_ROOM_VERSION constant with a method that first checks the config, then returns a hardcoded value if the option is not present. That hardcoded value is now located in the server.py config file. --- changelog.d/5223.feature | 1 + docs/sample_config.yaml | 9 ++++++ synapse/api/room_versions.py | 4 --- synapse/config/server.py | 32 +++++++++++++++++++ synapse/handlers/room.py | 9 ++++-- synapse/rest/client/v2_alpha/capabilities.py | 5 +-- .../rest/client/v2_alpha/test_capabilities.py | 7 ++-- 7 files changed, 57 insertions(+), 10 deletions(-) create mode 100644 changelog.d/5223.feature diff --git a/changelog.d/5223.feature b/changelog.d/5223.feature new file mode 100644 index 0000000000..cfdf1ad41b --- /dev/null +++ b/changelog.d/5223.feature @@ -0,0 +1 @@ +Ability to configure default room version. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 559fbcdd01..2a5a514d61 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -83,6 +83,15 @@ pid_file: DATADIR/homeserver.pid # #restrict_public_rooms_to_local_users: true +# The default room version for newly created rooms. +# +# Known room versions are listed here: +# https://matrix.org/docs/spec/#complete-list-of-room-versions +# +# For example, for room version 1, default_room_version should be set +# to "1". +#default_room_version: "1" + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index b2895355a8..4085bd10b9 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -85,10 +85,6 @@ class RoomVersions(object): ) -# the version we will give rooms which are created on this server -DEFAULT_ROOM_VERSION = RoomVersions.V1 - - KNOWN_ROOM_VERSIONS = { v.identifier: v for v in ( RoomVersions.V1, diff --git a/synapse/config/server.py b/synapse/config/server.py index f34aa42afa..e9120d4d75 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -20,6 +20,7 @@ import os.path from netaddr import IPSet +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.endpoint import parse_and_validate_server_name from synapse.python_dependencies import DependencyException, check_requirements @@ -35,6 +36,8 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0'] +DEFAULT_ROOM_VERSION = "1" + class ServerConfig(Config): @@ -88,6 +91,22 @@ class ServerConfig(Config): "restrict_public_rooms_to_local_users", False, ) + default_room_version = config.get( + "default_room_version", DEFAULT_ROOM_VERSION, + ) + + # Ensure room version is a str + default_room_version = str(default_room_version) + + if default_room_version not in KNOWN_ROOM_VERSIONS: + raise ConfigError( + "Unknown default_room_version: %s, known room versions: %s" % + (default_room_version, list(KNOWN_ROOM_VERSIONS.keys())) + ) + + # Get the actual room version object rather than just the identifier + self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version] + # whether to enable search. If disabled, new entries will not be inserted # into the search tables and they will not be indexed. Users will receive # errors when attempting to search for messages. @@ -310,6 +329,10 @@ class ServerConfig(Config): unsecure_port = 8008 pid_file = os.path.join(data_dir_path, "homeserver.pid") + + # Bring DEFAULT_ROOM_VERSION into the local-scope for use in the + # default config string + default_room_version = DEFAULT_ROOM_VERSION return """\ ## Server ## @@ -384,6 +407,15 @@ class ServerConfig(Config): # #restrict_public_rooms_to_local_users: true + # The default room version for newly created rooms. + # + # Known room versions are listed here: + # https://matrix.org/docs/spec/#complete-list-of-room-versions + # + # For example, for room version 1, default_room_version should be set + # to "1". + #default_room_version: "%(default_room_version)s" + # The GC threshold parameters to pass to `gc.set_threshold`, if defined # #gc_thresholds: [700, 10, 10] diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e37ae96899..4a17911a87 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -27,7 +27,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes, JoinRules, RoomCreationPreset from synapse.api.errors import AuthError, Codes, NotFoundError, StoreError, SynapseError -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.storage.state import StateFilter from synapse.types import RoomAlias, RoomID, RoomStreamToken, StreamToken, UserID from synapse.util import stringutils @@ -70,6 +70,7 @@ class RoomCreationHandler(BaseHandler): self.spam_checker = hs.get_spam_checker() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() + self.config = hs.config # linearizer to stop two upgrades happening at once self._upgrade_linearizer = Linearizer("room_upgrade_linearizer") @@ -475,7 +476,11 @@ class RoomCreationHandler(BaseHandler): if ratelimit: yield self.ratelimit(requester) - room_version = config.get("room_version", DEFAULT_ROOM_VERSION.identifier) + room_version = config.get( + "room_version", + self.config.default_room_version.identifier, + ) + if not isinstance(room_version, string_types): raise SynapseError( 400, diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index a868d06098..2b4892330c 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -16,7 +16,7 @@ import logging from twisted.internet import defer -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.servlet import RestServlet from ._base import client_v2_patterns @@ -36,6 +36,7 @@ class CapabilitiesRestServlet(RestServlet): """ super(CapabilitiesRestServlet, self).__init__() self.hs = hs + self.config = hs.config self.auth = hs.get_auth() self.store = hs.get_datastore() @@ -48,7 +49,7 @@ class CapabilitiesRestServlet(RestServlet): response = { "capabilities": { "m.room_versions": { - "default": DEFAULT_ROOM_VERSION.identifier, + "default": self.config.default_room_version.identifier, "available": { v.identifier: v.disposition for v in KNOWN_ROOM_VERSIONS.values() diff --git a/tests/rest/client/v2_alpha/test_capabilities.py b/tests/rest/client/v2_alpha/test_capabilities.py index f3ef977404..bce5b0cf4c 100644 --- a/tests/rest/client/v2_alpha/test_capabilities.py +++ b/tests/rest/client/v2_alpha/test_capabilities.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import synapse.rest.admin -from synapse.api.room_versions import DEFAULT_ROOM_VERSION, KNOWN_ROOM_VERSIONS +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.rest.client.v1 import login from synapse.rest.client.v2_alpha import capabilities @@ -32,6 +32,7 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.url = b"/_matrix/client/r0/capabilities" hs = self.setup_test_homeserver() self.store = hs.get_datastore() + self.config = hs.config return hs def test_check_auth_required(self): @@ -51,8 +52,10 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) for room_version in capabilities['m.room_versions']['available'].keys(): self.assertTrue(room_version in KNOWN_ROOM_VERSIONS, "" + room_version) + self.assertEqual( - DEFAULT_ROOM_VERSION.identifier, capabilities['m.room_versions']['default'] + self.config.default_room_version.identifier, + capabilities['m.room_versions']['default'], ) def test_get_change_password_capabilities(self): From 753b1270da1f0449bbb960b37707556abd3eaac0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 13:03:56 +0100 Subject: [PATCH 208/429] Require sig from origin server on perspectives responses --- synapse/crypto/keyring.py | 28 ++++++------ tests/crypto/test_keyring.py | 86 +++++++++++++++++++++++++++++++----- 2 files changed, 91 insertions(+), 23 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index eaf41b983c..a64ba0752a 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -394,8 +394,7 @@ class BaseV2KeyFetcher(object): POST /_matrix/key/v2/query. Checks that each signature in the response that claims to come from the origin - server is valid. (Does not check that there actually is such a signature, for - some reason.) + server is valid, and that there is at least one such signature. Stores the json in server_keys_json so that it can be used for future responses to /_matrix/key/v2/query. @@ -430,16 +429,25 @@ class BaseV2KeyFetcher(object): verify_key=verify_key, valid_until_ts=ts_valid_until_ms ) - # TODO: improve this signature checking server_name = response_json["server_name"] + verified = False for key_id in response_json["signatures"].get(server_name, {}): - if key_id not in verify_keys: + # each of the keys used for the signature must be present in the response + # json. + key = verify_keys.get(key_id) + if not key: raise KeyLookupError( - "Key response must include verification keys for all signatures" + "Key response is signed by key id %s:%s but that key is not " + "present in the response" % (server_name, key_id) ) - verify_signed_json( - response_json, server_name, verify_keys[key_id].verify_key + verify_signed_json(response_json, server_name, key.verify_key) + verified = True + + if not verified: + raise KeyLookupError( + "Key response for %s is not signed by the origin server" + % (server_name,) ) for key_id, key_data in response_json["old_verify_keys"].items(): @@ -677,12 +685,6 @@ class ServerKeyFetcher(BaseV2KeyFetcher): except HttpResponseException as e: raise_from(KeyLookupError("Remote server returned an error"), e) - if ( - u"signatures" not in response - or server_name not in response[u"signatures"] - ): - raise KeyLookupError("Key response not signed by remote server") - if response["server_name"] != server_name: raise KeyLookupError( "Expected a response for server %r not %r" diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index de61bad15d..c4c9d29499 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -55,12 +55,12 @@ class MockPerspectiveServer(object): key_id: {"key": signedjson.key.encode_verify_key_base64(verify_key)} }, } - return self.get_signed_response(res) - - def get_signed_response(self, res): - signedjson.sign.sign_json(res, self.server_name, self.key) + self.sign_response(res) return res + def sign_response(self, res): + signedjson.sign.sign_json(res, self.server_name, self.key) + class KeyringTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): @@ -238,7 +238,7 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): testkey = signedjson.key.generate_signing_key("ver1") testverifykey = signedjson.key.get_verify_key(testkey) testverifykey_id = "ed25519:ver1" - VALID_UNTIL_TS = 1000 + VALID_UNTIL_TS = 200 * 1000 # valid response response = { @@ -326,9 +326,10 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): }, } - persp_resp = { - "server_keys": [self.mock_perspective_server.get_signed_response(response)] - } + # the response must be signed by both the origin server and the perspectives + # server. + signedjson.sign.sign_json(response, SERVER_NAME, testkey) + self.mock_perspective_server.sign_response(response) def post_json(destination, path, data, **kwargs): self.assertEqual(destination, self.mock_perspective_server.server_name) @@ -337,7 +338,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): # check that the request is for the expected key q = data["server_keys"] self.assertEqual(list(q[SERVER_NAME].keys()), ["key1"]) - return persp_resp + return {"server_keys": [response]} self.http_client.post_json.side_effect = post_json @@ -365,9 +366,74 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual( bytes(res["key_json"]), - canonicaljson.encode_canonical_json(persp_resp["server_keys"][0]), + canonicaljson.encode_canonical_json(response), ) + def test_invalid_perspectives_responses(self): + """Check that invalid responses from the perspectives server are rejected""" + # arbitrarily advance the clock a bit + self.reactor.advance(100) + + SERVER_NAME = "server2" + testkey = signedjson.key.generate_signing_key("ver1") + testverifykey = signedjson.key.get_verify_key(testkey) + testverifykey_id = "ed25519:ver1" + VALID_UNTIL_TS = 200 * 1000 + + def build_response(): + # valid response + response = { + "server_name": SERVER_NAME, + "old_verify_keys": {}, + "valid_until_ts": VALID_UNTIL_TS, + "verify_keys": { + testverifykey_id: { + "key": signedjson.key.encode_verify_key_base64(testverifykey) + } + }, + } + + # the response must be signed by both the origin server and the perspectives + # server. + signedjson.sign.sign_json(response, SERVER_NAME, testkey) + self.mock_perspective_server.sign_response(response) + return response + + def get_key_from_perspectives(response): + fetcher = PerspectivesKeyFetcher(self.hs) + server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + + def post_json(destination, path, data, **kwargs): + self.assertEqual(destination, self.mock_perspective_server.server_name) + self.assertEqual(path, "/_matrix/key/v2/query") + return {"server_keys": [response]} + + self.http_client.post_json.side_effect = post_json + + return self.get_success( + fetcher.get_keys(server_name_and_key_ids) + ) + + # start with a valid response so we can check we are testing the right thing + response = build_response() + keys = get_key_from_perspectives(response) + k = keys[SERVER_NAME][testverifykey_id] + self.assertEqual(k.verify_key, testverifykey) + + # remove the perspectives server's signature + response = build_response() + del response["signatures"][self.mock_perspective_server.server_name] + self.http_client.post_json.return_value = {"server_keys": [response]} + keys = get_key_from_perspectives(response) + self.assertEqual(keys, {}, "Expected empty dict with missing persp server sig") + + # remove the origin server's signature + response = build_response() + del response["signatures"][SERVER_NAME] + self.http_client.post_json.return_value = {"server_keys": [response]} + keys = get_key_from_perspectives(response) + self.assertEqual(keys, {}, "Expected empty dict with missing origin server sig") + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): From 4cb577c23f1399d233d72cee45efad36982f692a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 May 2019 09:52:33 +0100 Subject: [PATCH 209/429] Don't bundle aggs for /state and /members etc APIs --- synapse/handlers/message.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 7b2c33a922..7e40cb6502 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -166,6 +166,9 @@ class MessageHandler(object): now = self.clock.time_msec() events = yield self._event_serializer.serialize_events( room_state.values(), now, + # We don't bother bundling aggregations in when asked for state + # events, as clients won't use them. + bundle_aggregations=False, ) defer.returnValue(events) From dba9152d1593c9de176e969153b30b4ef19f29ae Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 24 May 2019 14:12:38 +0100 Subject: [PATCH 210/429] Add missing blank line in config (#5249) --- changelog.d/5249.feature | 1 + docs/sample_config.yaml | 1 + synapse/config/server.py | 1 + 3 files changed, 3 insertions(+) create mode 100644 changelog.d/5249.feature diff --git a/changelog.d/5249.feature b/changelog.d/5249.feature new file mode 100644 index 0000000000..cfdf1ad41b --- /dev/null +++ b/changelog.d/5249.feature @@ -0,0 +1 @@ +Ability to configure default room version. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2a5a514d61..421ae96f04 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -90,6 +90,7 @@ pid_file: DATADIR/homeserver.pid # # For example, for room version 1, default_room_version should be set # to "1". +# #default_room_version: "1" # The GC threshold parameters to pass to `gc.set_threshold`, if defined diff --git a/synapse/config/server.py b/synapse/config/server.py index e9120d4d75..e763e19e15 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -414,6 +414,7 @@ class ServerConfig(Config): # # For example, for room version 1, default_room_version should be set # to "1". + # #default_room_version: "%(default_room_version)s" # The GC threshold parameters to pass to `gc.set_threshold`, if defined From dd64b9dbdd9bb6f9ac84d51dd4f4bdb665fcfcb1 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Fri, 24 May 2019 16:44:04 +0300 Subject: [PATCH 211/429] Fix appservice timestamp massaging (#5233) Signed-off-by: Tulir Asokan --- changelog.d/5233.bugfix | 1 + synapse/events/builder.py | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5233.bugfix diff --git a/changelog.d/5233.bugfix b/changelog.d/5233.bugfix new file mode 100644 index 0000000000..d71b962160 --- /dev/null +++ b/changelog.d/5233.bugfix @@ -0,0 +1 @@ +Fix appservice timestamp massaging. diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 1fe995f212..546b6f4982 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -76,6 +76,7 @@ class EventBuilder(object): # someone tries to get them when they don't exist. _state_key = attr.ib(default=None) _redacts = attr.ib(default=None) + _origin_server_ts = attr.ib(default=None) internal_metadata = attr.ib(default=attr.Factory(lambda: _EventInternalMetadata({}))) @@ -142,6 +143,9 @@ class EventBuilder(object): if self._redacts is not None: event_dict["redacts"] = self._redacts + if self._origin_server_ts is not None: + event_dict["origin_server_ts"] = self._origin_server_ts + defer.returnValue( create_local_event_from_event_dict( clock=self._clock, @@ -209,6 +213,7 @@ class EventBuilderFactory(object): content=key_values.get("content", {}), unsigned=key_values.get("unsigned", {}), redacts=key_values.get("redacts", None), + origin_server_ts=key_values.get("origin_server_ts", None), ) @@ -245,7 +250,7 @@ def create_local_event_from_event_dict(clock, hostname, signing_key, event_dict["event_id"] = _create_event_id(clock, hostname) event_dict["origin"] = hostname - event_dict["origin_server_ts"] = time_now + event_dict.setdefault("origin_server_ts", time_now) event_dict.setdefault("unsigned", {}) age = event_dict["unsigned"].pop("age", 0) From b825d1c80046b37e32951ef034a05002df76a287 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 23 May 2019 17:31:26 +0100 Subject: [PATCH 212/429] Improve error handling/logging for perspectives-key fetching. In particular, don't give up on the first failure. --- synapse/crypto/keyring.py | 105 ++++++++++++++++++++++++++++---------- 1 file changed, 77 insertions(+), 28 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index a64ba0752a..65af2fb671 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -17,6 +17,7 @@ import logging from collections import namedtuple +import six from six import raise_from from six.moves import urllib @@ -349,6 +350,7 @@ class KeyFetcher(object): Args: server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): list of (server_name, iterable[key_id]) tuples to fetch keys for + Note that the iterables may be iterated more than once. Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: @@ -557,7 +559,16 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map from server_name -> key_id -> FetchKeyResult + + Raises: + KeyLookupError if there was an error processing the entire response from + the server """ + logger.info( + "Requesting keys %s from notary server %s", + server_names_and_key_ids, + perspective_name, + ) # TODO(mark): Set the minimum_valid_until_ts to that needed by # the events being validated or the current time if validating # an incoming request. @@ -586,40 +597,31 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): time_now_ms = self.clock.time_msec() for response in query_response["server_keys"]: - if ( - u"signatures" not in response - or perspective_name not in response[u"signatures"] - ): + # do this first, so that we can give useful errors thereafter + server_name = response.get("server_name") + if not isinstance(server_name, six.string_types): raise KeyLookupError( - "Key response not signed by perspective server" - " %r" % (perspective_name,) + "Malformed response from key notary server %s: invalid server_name" + % (perspective_name,) ) - verified = False - for key_id in response[u"signatures"][perspective_name]: - if key_id in perspective_keys: - verify_signed_json( - response, perspective_name, perspective_keys[key_id] - ) - verified = True - - if not verified: - logging.info( - "Response from perspective server %r not signed with a" - " known key, signed with: %r, known keys: %r", + try: + processed_response = yield self._process_perspectives_response( perspective_name, - list(response[u"signatures"][perspective_name]), - list(perspective_keys), + perspective_keys, + response, + time_added_ms=time_now_ms, ) - raise KeyLookupError( - "Response not signed with a known key for perspective" - " server %r" % (perspective_name,) + except KeyLookupError as e: + logger.warning( + "Error processing response from key notary server %s for origin " + "server %s: %s", + perspective_name, + server_name, + e, ) - - processed_response = yield self.process_v2_response( - perspective_name, response, time_added_ms=time_now_ms - ) - server_name = response["server_name"] + # we continue to process the rest of the response + continue added_keys.extend( (server_name, key_id, key) for key_id, key in processed_response.items() @@ -632,6 +634,53 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): defer.returnValue(keys) + def _process_perspectives_response( + self, perspective_name, perspective_keys, response, time_added_ms + ): + """Parse a 'Server Keys' structure from the result of a /key/query request + + Checks that the entry is correctly signed by the perspectives server, and then + passes over to process_v2_response + + Args: + perspective_name (str): the name of the notary server that produced this + result + + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the + notary server + + response (dict): the json-decoded Server Keys response object + + time_added_ms (int): the timestamp to record in server_keys_json + + Returns: + Deferred[dict[str, FetchKeyResult]]: map from key_id to result object + """ + if ( + u"signatures" not in response + or perspective_name not in response[u"signatures"] + ): + raise KeyLookupError("Response not signed by the notary server") + + verified = False + for key_id in response[u"signatures"][perspective_name]: + if key_id in perspective_keys: + verify_signed_json(response, perspective_name, perspective_keys[key_id]) + verified = True + + if not verified: + raise KeyLookupError( + "Response not signed with a known key: signed with: %r, known keys: %r" + % ( + list(response[u"signatures"][perspective_name].keys()), + list(perspective_keys.keys()), + ) + ) + + return self.process_v2_response( + perspective_name, response, time_added_ms=time_added_ms + ) + class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" From cbcfd642a0dc375ea6f006c1633f82d16b3ac002 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 24 May 2019 15:47:30 +0100 Subject: [PATCH 213/429] changelog --- changelog.d/5251.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5251.bugfix diff --git a/changelog.d/5251.bugfix b/changelog.d/5251.bugfix new file mode 100644 index 0000000000..9a053204b6 --- /dev/null +++ b/changelog.d/5251.bugfix @@ -0,0 +1 @@ +Ensure that server_keys fetched via a notary server are correctly signed. \ No newline at end of file From fa1b293da2e0a5e47864ccb49e530d8a81d81790 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Fri, 24 May 2019 22:17:18 +0100 Subject: [PATCH 214/429] Simplification to Keyring.wait_for_previous_lookups. (#5250) The list of server names was redundant, since it was equivalent to the keys on the server_to_deferred map. This reduces the number of large lists being passed around, and has the benefit of deduplicating the entries in `wait_on`. --- changelog.d/5250.misc | 1 + synapse/crypto/keyring.py | 11 ++++------- tests/crypto/test_keyring.py | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5250.misc diff --git a/changelog.d/5250.misc b/changelog.d/5250.misc new file mode 100644 index 0000000000..575a299a82 --- /dev/null +++ b/changelog.d/5250.misc @@ -0,0 +1 @@ +Simplification to Keyring.wait_for_previous_lookups. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index eaf41b983c..d6ad7f1772 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -180,9 +180,7 @@ class Keyring(object): # We want to wait for any previous lookups to complete before # proceeding. - yield self.wait_for_previous_lookups( - [rq.server_name for rq in verify_requests], server_to_deferred - ) + yield self.wait_for_previous_lookups(server_to_deferred) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) @@ -215,12 +213,11 @@ class Keyring(object): logger.exception("Error starting key lookups") @defer.inlineCallbacks - def wait_for_previous_lookups(self, server_names, server_to_deferred): + def wait_for_previous_lookups(self, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: - server_names (list): list of server_names we want to lookup - server_to_deferred (dict): server_name to deferred which gets + server_to_deferred (dict[str, Deferred]): server_name to deferred which gets resolved once we've finished looking up keys for that server. The Deferreds should be regular twisted ones which call their callbacks with no logcontext. @@ -233,7 +230,7 @@ class Keyring(object): while True: wait_on = [ (server_name, self.key_downloads[server_name]) - for server_name in server_names + for server_name in server_to_deferred.keys() if server_name in self.key_downloads ] if not wait_on: diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index de61bad15d..4fba462d44 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -85,7 +85,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): # we run the lookup in a logcontext so that the patched inlineCallbacks can check # it is doing the right thing with logcontexts. wait_1_deferred = run_in_context( - kr.wait_for_previous_lookups, ["server1"], {"server1": lookup_1_deferred} + kr.wait_for_previous_lookups, {"server1": lookup_1_deferred} ) # there were no previous lookups, so the deferred should be ready @@ -94,7 +94,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): # set off another wait. It should block because the first lookup # hasn't yet completed. wait_2_deferred = run_in_context( - kr.wait_for_previous_lookups, ["server1"], {"server1": lookup_2_deferred} + kr.wait_for_previous_lookups, {"server1": lookup_2_deferred} ) self.assertFalse(wait_2_deferred.called) From 56f07d980a9d3b3b8e2cc196e5d630abd98be122 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 16:32:21 -0500 Subject: [PATCH 215/429] Show correct error when logging out and access token is missing Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 430c692336..317f52cb56 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -40,11 +40,11 @@ class LogoutRestServlet(ClientV1RestServlet): def on_POST(self, request): try: requester = yield self.auth.get_user_by_req(request) - except AuthError: + except AuthError as e: # this implies the access token has already been deleted. - defer.returnValue((401, { - "errcode": "M_UNKNOWN_TOKEN", - "error": "Access Token unknown or expired" + defer.returnValue((e.code, { + "errcode": e.errcode, + "error": e.msg })) else: if requester.device_id is None: From 2d4853039f37a16cc898c7dba1a4a34f67ca1062 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 17:13:10 -0500 Subject: [PATCH 216/429] Fix error code for invalid parameter Signed-off-by: Aaron Raimist --- synapse/http/servlet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 528125e737..197c652850 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -55,7 +55,7 @@ def parse_integer_from_args(args, name, default=None, required=False): return int(args[name][0]) except Exception: message = "Query parameter %r must be an integer" % (name,) - raise SynapseError(400, message) + raise SynapseError(400, message, errcode=Codes.INVALID_PARAM) else: if required: message = "Missing integer query parameter %r" % (name,) From 6dac0e738c70ee67abb9b03cafbb07749b84309c Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 17:15:24 -0500 Subject: [PATCH 217/429] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5257.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5257.bugfix diff --git a/changelog.d/5257.bugfix b/changelog.d/5257.bugfix new file mode 100644 index 0000000000..8334af9b99 --- /dev/null +++ b/changelog.d/5257.bugfix @@ -0,0 +1 @@ +Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms From 0b4f4cb0b4bff42dd0f638d1d2891f35feaff1be Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Fri, 24 May 2019 16:35:48 -0500 Subject: [PATCH 218/429] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5256.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5256.bugfix diff --git a/changelog.d/5256.bugfix b/changelog.d/5256.bugfix new file mode 100644 index 0000000000..86316ab5dd --- /dev/null +++ b/changelog.d/5256.bugfix @@ -0,0 +1 @@ +Show the correct error when logging out and access token is missing. From bc4b2ecf70bc3965cbbf1daee52bf7577e219d7b Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 25 May 2019 12:02:48 -0600 Subject: [PATCH 219/429] Fix logging for room stats background update --- synapse/storage/stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 71b80a891d..eb0ced5b5e 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -169,7 +169,7 @@ class StatsStore(StateDeltasStore): logger.info( "Processing the next %d rooms of %d remaining", - (len(rooms_to_work_on), progress["remaining"]), + len(rooms_to_work_on), progress["remaining"], ) # Number of state events we've processed by going through each room From 4ccdbfcdb133ed10cd53f5a1b7f77b00c1ecdf97 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Sat, 25 May 2019 12:21:21 -0600 Subject: [PATCH 220/429] Changelog --- changelog.d/5260.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5260.feature diff --git a/changelog.d/5260.feature b/changelog.d/5260.feature new file mode 100644 index 0000000000..01285e965c --- /dev/null +++ b/changelog.d/5260.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From 119c9c10b026b65affab5d5bcb6ae1d3d7e7229c Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Mon, 27 May 2019 00:13:48 -0500 Subject: [PATCH 221/429] Get rid of try except Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 317f52cb56..2cf373e83c 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -38,23 +38,16 @@ class LogoutRestServlet(ClientV1RestServlet): @defer.inlineCallbacks def on_POST(self, request): - try: - requester = yield self.auth.get_user_by_req(request) - except AuthError as e: - # this implies the access token has already been deleted. - defer.returnValue((e.code, { - "errcode": e.errcode, - "error": e.msg - })) + requester = yield self.auth.get_user_by_req(request) + + if requester.device_id is None: + # the acccess token wasn't associated with a device. + # Just delete the access token + access_token = self._auth.get_access_token_from_request(request) + yield self._auth_handler.delete_access_token(access_token) else: - if requester.device_id is None: - # the acccess token wasn't associated with a device. - # Just delete the access token - access_token = self._auth.get_access_token_from_request(request) - yield self._auth_handler.delete_access_token(access_token) - else: - yield self._device_handler.delete_device( - requester.user.to_string(), requester.device_id) + yield self._device_handler.delete_device( + requester.user.to_string(), requester.device_id) defer.returnValue((200, {})) From ba17de7fbc29700163b23363ae0e03f8a01ef274 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 10:11:38 +0100 Subject: [PATCH 222/429] Fix schema update for account validity --- ...{account_validity.sql => account_validity_with_renewal.sql} | 3 +++ 1 file changed, 3 insertions(+) rename synapse/storage/schema/delta/54/{account_validity.sql => account_validity_with_renewal.sql} (83%) diff --git a/synapse/storage/schema/delta/54/account_validity.sql b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql similarity index 83% rename from synapse/storage/schema/delta/54/account_validity.sql rename to synapse/storage/schema/delta/54/account_validity_with_renewal.sql index 2357626000..0adb2ad55e 100644 --- a/synapse/storage/schema/delta/54/account_validity.sql +++ b/synapse/storage/schema/delta/54/account_validity_with_renewal.sql @@ -13,6 +13,9 @@ * limitations under the License. */ +-- We previously changed the schema for this table without renaming the file, which means +-- that some databases might still be using the old schema. This ensures Synapse uses the +-- right schema for the table. DROP TABLE IF EXISTS account_validity; -- Track what users are in public rooms. From ddd30f44a09a775d290c2d41b8db4d15b967dd43 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 10:14:21 +0100 Subject: [PATCH 223/429] Changelog --- changelog.d/5268.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5268.bugfix diff --git a/changelog.d/5268.bugfix b/changelog.d/5268.bugfix new file mode 100644 index 0000000000..1a5a03bf0a --- /dev/null +++ b/changelog.d/5268.bugfix @@ -0,0 +1 @@ +Fix schema update for account validity. From 52839886d664576831462e033b88e5aba4c019e3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 16:47:42 +0100 Subject: [PATCH 224/429] Allow configuring a range for the account validity startup job When enabling the account validity feature, Synapse will look at startup for registered account without an expiration date, and will set one equals to 'now + validity_period' for them. On large servers, it can mean that a large number of users will have the same expiration date, which means that they will all be sent a renewal email at the same time, which isn't ideal. In order to mitigate this, this PR allows server admins to define a 'max_delta' so that the expiration date is a random value in the [now + validity_period ; now + validity_period + max_delta] range. This allows renewal emails to be progressively sent over a configured period instead of being sent all in one big batch. --- synapse/config/registration.py | 11 ++++++++++ synapse/storage/_base.py | 23 +++++++++++++++++++-- tests/rest/client/v2_alpha/test_register.py | 21 +++++++++++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 693288f938..b4fd4af368 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -39,6 +39,10 @@ class AccountValidityConfig(Config): else: self.renew_email_subject = "Renew your %(app)s account" + self.startup_job_max_delta = self.parse_duration( + config.get("startup_job_max_delta", 0), + ) + if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: raise ConfigError("Can't send renewal emails without 'public_baseurl'") @@ -131,11 +135,18 @@ class RegistrationConfig(Config): # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. # + # If set, the ``startup_job_max_delta`` optional setting will make the startup job + # described above set a random expiration date between t + period and + # t + period + startup_job_max_delta, t being the date and time at which the job + # sets the expiration date for a given user. This is useful for server admins that + # want to avoid Synapse sending a lot of renewal emails at once. + # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" + # startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839ceca..40802fd3dc 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -16,6 +16,7 @@ # limitations under the License. import itertools import logging +import random import sys import threading import time @@ -247,6 +248,8 @@ class SQLBaseStore(object): self._check_safe_to_upsert, ) + self.rand = random.SystemRandom() + if self._account_validity.enabled: self._clock.call_later( 0.0, @@ -308,21 +311,37 @@ class SQLBaseStore(object): res = self.cursor_to_dict(txn) if res: for user in res: - self.set_expiration_date_for_user_txn(txn, user["name"]) + self.set_expiration_date_for_user_txn( + txn, + user["name"], + use_delta=True, + ) yield self.runInteraction( "get_users_with_no_expiration_date", select_users_with_no_expiration_date_txn, ) - def set_expiration_date_for_user_txn(self, txn, user_id): + def set_expiration_date_for_user_txn(self, txn, user_id, use_delta=False): """Sets an expiration date to the account with the given user ID. Args: user_id (str): User ID to set an expiration date for. + use_delta (bool): If set to False, the expiration date for the user will be + now + validity period. If set to True, this expiration date will be a + random value in the [now + period; now + period + max_delta] range, + max_delta being the configured value for the size of the range, unless + delta is 0, in which case it sets it to now + period. """ now_ms = self._clock.time_msec() expiration_ts = now_ms + self._account_validity.period + + if use_delta and self._account_validity.startup_job_max_delta: + expiration_ts = self.rand.randrange( + expiration_ts, + expiration_ts + self._account_validity.startup_job_max_delta, + ) + self._simple_insert_txn( txn, "account_validity", diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index d4a1d4d50c..7603440fd8 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -436,6 +436,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.validity_period = 10 + self.max_delta = 10 config = self.default_config() @@ -459,8 +460,28 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): """ user_id = self.register_user("kermit", "user") + self.hs.config.account_validity.startup_job_max_delta = 0 + now_ms = self.hs.clock.time_msec() self.get_success(self.store._set_expiration_date_when_missing()) res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) self.assertEqual(res, now_ms + self.validity_period) + + def test_background_job_with_max_delta(self): + """ + Tests the same thing as test_background_job, except that it sets the + startup_job_max_delta parameter and checks that the expiration date is within the + allowed range. + """ + user_id = self.register_user("kermit_delta", "user") + + self.hs.config.account_validity.startup_job_max_delta = self.max_delta + + now_ms = self.hs.clock.time_msec() + self.get_success(self.store._set_expiration_date_when_missing()) + + res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + + self.assertLessEqual(res, now_ms + self.validity_period + self.delta) + self.assertGreaterEqual(res, now_ms + self.validity_period) From 4aba561c65c842e640861035e3937e78ab950a21 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 16:55:10 +0100 Subject: [PATCH 225/429] Config and changelog --- changelog.d/5276.feature | 1 + docs/sample_config.yaml | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/5276.feature diff --git a/changelog.d/5276.feature b/changelog.d/5276.feature new file mode 100644 index 0000000000..403dee0862 --- /dev/null +++ b/changelog.d/5276.feature @@ -0,0 +1 @@ +Allow configuring a range for the account validity startup job. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index f658ec8ecd..8ff53d5cb4 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -755,11 +755,18 @@ uploads_path: "DATADIR/uploads" # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. # +# If set, the ``startup_job_max_delta`` optional setting will make the startup job +# described above set a random expiration date between t + period and +# t + period + startup_job_max_delta, t being the date and time at which the job +# sets the expiration date for a given user. This is useful for server admins that +# want to avoid Synapse sending a lot of renewal emails at once. +# #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" +# startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # From 7e1c7cc2742f5eb9d6d37205a0c457b8a7bd015f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 May 2019 17:13:26 +0100 Subject: [PATCH 226/429] Typo --- tests/rest/client/v2_alpha/test_register.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 7603440fd8..68654e25ab 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -483,5 +483,5 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertLessEqual(res, now_ms + self.validity_period + self.delta) + self.assertLessEqual(res, now_ms + self.validity_period + self.max_delta) self.assertGreaterEqual(res, now_ms + self.validity_period) From 5726378eced1d032552318cb5fd603da8f364db2 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 28 May 2019 21:20:11 +0100 Subject: [PATCH 227/429] Fix "db txn 'update_presence' from sentinel context" log messages (#5275) Fixes #4414. --- changelog.d/5275.bugfix | 1 + synapse/handlers/presence.py | 97 +++++++++++++++++------------------- 2 files changed, 47 insertions(+), 51 deletions(-) create mode 100644 changelog.d/5275.bugfix diff --git a/changelog.d/5275.bugfix b/changelog.d/5275.bugfix new file mode 100644 index 0000000000..45a554642a --- /dev/null +++ b/changelog.d/5275.bugfix @@ -0,0 +1 @@ +Fix "db txn 'update_presence' from sentinel context" log messages. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 59d53f1050..6209858bbb 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -182,17 +182,27 @@ class PresenceHandler(object): # Start a LoopingCall in 30s that fires every 5s. # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. + def run_timeout_handler(): + return run_as_background_process( + "handle_presence_timeouts", self._handle_timeouts + ) + self.clock.call_later( 30, self.clock.looping_call, - self._handle_timeouts, + run_timeout_handler, 5000, ) + def run_persister(): + return run_as_background_process( + "persist_presence_changes", self._persist_unpersisted_changes + ) + self.clock.call_later( 60, self.clock.looping_call, - self._persist_unpersisted_changes, + run_persister, 60 * 1000, ) @@ -229,6 +239,7 @@ class PresenceHandler(object): ) if self.unpersisted_users_changes: + yield self.store.update_presence([ self.user_to_current_state[user_id] for user_id in self.unpersisted_users_changes @@ -240,30 +251,18 @@ class PresenceHandler(object): """We periodically persist the unpersisted changes, as otherwise they may stack up and slow down shutdown times. """ - logger.info( - "Performing _persist_unpersisted_changes. Persisting %d unpersisted changes", - len(self.unpersisted_users_changes) - ) - unpersisted = self.unpersisted_users_changes self.unpersisted_users_changes = set() if unpersisted: + logger.info( + "Persisting %d upersisted presence updates", len(unpersisted) + ) yield self.store.update_presence([ self.user_to_current_state[user_id] for user_id in unpersisted ]) - logger.info("Finished _persist_unpersisted_changes") - - @defer.inlineCallbacks - def _update_states_and_catch_exception(self, new_states): - try: - res = yield self._update_states(new_states) - defer.returnValue(res) - except Exception: - logger.exception("Error updating presence") - @defer.inlineCallbacks def _update_states(self, new_states): """Updates presence of users. Sets the appropriate timeouts. Pokes @@ -338,45 +337,41 @@ class PresenceHandler(object): logger.info("Handling presence timeouts") now = self.clock.time_msec() - try: - with Measure(self.clock, "presence_handle_timeouts"): - # Fetch the list of users that *may* have timed out. Things may have - # changed since the timeout was set, so we won't necessarily have to - # take any action. - users_to_check = set(self.wheel_timer.fetch(now)) + # Fetch the list of users that *may* have timed out. Things may have + # changed since the timeout was set, so we won't necessarily have to + # take any action. + users_to_check = set(self.wheel_timer.fetch(now)) - # Check whether the lists of syncing processes from an external - # process have expired. - expired_process_ids = [ - process_id for process_id, last_update - in self.external_process_last_updated_ms.items() - if now - last_update > EXTERNAL_PROCESS_EXPIRY - ] - for process_id in expired_process_ids: - users_to_check.update( - self.external_process_last_updated_ms.pop(process_id, ()) - ) - self.external_process_last_update.pop(process_id) + # Check whether the lists of syncing processes from an external + # process have expired. + expired_process_ids = [ + process_id for process_id, last_update + in self.external_process_last_updated_ms.items() + if now - last_update > EXTERNAL_PROCESS_EXPIRY + ] + for process_id in expired_process_ids: + users_to_check.update( + self.external_process_last_updated_ms.pop(process_id, ()) + ) + self.external_process_last_update.pop(process_id) - states = [ - self.user_to_current_state.get( - user_id, UserPresenceState.default(user_id) - ) - for user_id in users_to_check - ] + states = [ + self.user_to_current_state.get( + user_id, UserPresenceState.default(user_id) + ) + for user_id in users_to_check + ] - timers_fired_counter.inc(len(states)) + timers_fired_counter.inc(len(states)) - changes = handle_timeouts( - states, - is_mine_fn=self.is_mine_id, - syncing_user_ids=self.get_currently_syncing_users(), - now=now, - ) + changes = handle_timeouts( + states, + is_mine_fn=self.is_mine_id, + syncing_user_ids=self.get_currently_syncing_users(), + now=now, + ) - run_in_background(self._update_states_and_catch_exception, changes) - except Exception: - logger.exception("Exception in _handle_timeouts loop") + return self._update_states(changes) @defer.inlineCallbacks def bump_presence_active_time(self, user): From 9b6f72663e2eb8c2caf834da511c2617d8061e58 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 20:53:56 -0500 Subject: [PATCH 228/429] Fix docs on resetting the user directory (#5036) Signed-off-by: Aaron Raimist --- docs/user_directory.md | 10 +++------- synapse/config/user_directory.py | 6 +++--- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/docs/user_directory.md b/docs/user_directory.md index 4c8ee44f37..e64aa453cc 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -7,11 +7,7 @@ who are present in a publicly viewable room present on the server. The directory info is stored in various tables, which can (typically after DB corruption) get stale or out of sync. If this happens, for now the -quickest solution to fix it is: - -``` -UPDATE user_directory_stream_pos SET stream_id = NULL; -``` - -and restart the synapse, which should then start a background task to +solution to fix it is to execute the SQL here +https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql +and then restart synapse. This should then start a background task to flush the current tables and regenerate the directory. diff --git a/synapse/config/user_directory.py b/synapse/config/user_directory.py index 142754a7dc..023997ccde 100644 --- a/synapse/config/user_directory.py +++ b/synapse/config/user_directory.py @@ -43,9 +43,9 @@ class UserDirectoryConfig(Config): # # 'search_all_users' defines whether to search all users visible to your HS # when searching the user directory, rather than limiting to users visible - # in public rooms. Defaults to false. If you set it True, you'll have to run - # UPDATE user_directory_stream_pos SET stream_id = NULL; - # on your database to tell it to rebuild the user_directory search indexes. + # in public rooms. Defaults to false. If you set it True, you'll have to + # rebuild the user_directory search indexes, see + # https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md # #user_directory: # enabled: true From 878b00c39531d5200a3efab356766e4e2670e589 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 20:58:18 -0500 Subject: [PATCH 229/429] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5282.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5282.misc diff --git a/changelog.d/5282.misc b/changelog.d/5282.misc new file mode 100644 index 0000000000..350e15bc03 --- /dev/null +++ b/changelog.d/5282.misc @@ -0,0 +1 @@ +Fix docs on resetting the user directory. From f795595e956c4584ae280a59ca122057894e0c54 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 22:04:24 -0500 Subject: [PATCH 230/429] Specify the type of reCAPTCHA key to use (#5013) Signed-off-by: Aaron Raimist --- docs/CAPTCHA_SETUP.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/CAPTCHA_SETUP.rst b/docs/CAPTCHA_SETUP.rst index 19a204d9ce..0c22ee4ff6 100644 --- a/docs/CAPTCHA_SETUP.rst +++ b/docs/CAPTCHA_SETUP.rst @@ -7,6 +7,7 @@ Requires a public/private key pair from: https://developers.google.com/recaptcha/ +Must be a reCAPTCHA v2 key using the "I'm not a robot" Checkbox option Setting ReCaptcha Keys ---------------------- From 2ec28094606383a4e00fa6665b27a64bc00fd9fd Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Tue, 28 May 2019 22:05:51 -0500 Subject: [PATCH 231/429] Add changelog Signed-off-by: Aaron Raimist --- changelog.d/5283.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5283.misc diff --git a/changelog.d/5283.misc b/changelog.d/5283.misc new file mode 100644 index 0000000000..002721e566 --- /dev/null +++ b/changelog.d/5283.misc @@ -0,0 +1 @@ +Specify the type of reCAPTCHA key to use. From ecaa299cabe099449a1a05aef4ba3708c9d231cf Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 29 May 2019 16:32:30 +1000 Subject: [PATCH 232/429] Rename 5282.misc to 5282.doc --- changelog.d/{5282.misc => 5282.doc} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename changelog.d/{5282.misc => 5282.doc} (100%) diff --git a/changelog.d/5282.misc b/changelog.d/5282.doc similarity index 100% rename from changelog.d/5282.misc rename to changelog.d/5282.doc From 0729ef01f80b8d6f2fcf1ab40a22587347b2c777 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 29 May 2019 16:41:25 +1000 Subject: [PATCH 233/429] regenerate sample config --- docs/sample_config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 421ae96f04..edfde05a23 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1103,9 +1103,9 @@ password_config: # # 'search_all_users' defines whether to search all users visible to your HS # when searching the user directory, rather than limiting to users visible -# in public rooms. Defaults to false. If you set it True, you'll have to run -# UPDATE user_directory_stream_pos SET stream_id = NULL; -# on your database to tell it to rebuild the user_directory search indexes. +# in public rooms. Defaults to false. If you set it True, you'll have to +# rebuild the user_directory search indexes, see +# https://github.com/matrix-org/synapse/blob/master/docs/user_directory.md # #user_directory: # enabled: true From f76d407ef3d2f6c18a568eff965e12e794105a7a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 09:17:33 +0100 Subject: [PATCH 234/429] Fix dropped logcontexts during high outbound traffic. (#5277) Fixes #5271. --- changelog.d/5277.bugfix | 1 + synapse/app/_base.py | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) create mode 100644 changelog.d/5277.bugfix diff --git a/changelog.d/5277.bugfix b/changelog.d/5277.bugfix new file mode 100644 index 0000000000..371aa2e7fb --- /dev/null +++ b/changelog.d/5277.bugfix @@ -0,0 +1 @@ +Fix dropped logcontexts during high outbound traffic. diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 08199a5e8d..8cc990399f 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -344,15 +344,21 @@ class _LimitedHostnameResolver(object): def resolveHostName(self, resolutionReceiver, hostName, portNumber=0, addressTypes=None, transportSemantics='TCP'): - # Note this is happening deep within the reactor, so we don't need to - # worry about log contexts. - # We need this function to return `resolutionReceiver` so we do all the # actual logic involving deferreds in a separate function. - self._resolve( - resolutionReceiver, hostName, portNumber, - addressTypes, transportSemantics, - ) + + # even though this is happening within the depths of twisted, we need to drop + # our logcontext before starting _resolve, otherwise: (a) _resolve will drop + # the logcontext if it returns an incomplete deferred; (b) _resolve will + # call the resolutionReceiver *with* a logcontext, which it won't be expecting. + with PreserveLoggingContext(): + self._resolve( + resolutionReceiver, + hostName, + portNumber, + addressTypes, + transportSemantics, + ) return resolutionReceiver From 58c8ed5b0dbfe0556f11985a61c0e13bbe61d93c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:56:24 +0100 Subject: [PATCH 235/429] Correctly filter out extremities with soft failed prevs (#5274) When we receive a soft failed event we, correctly, *do not* update the forward extremity table with the event. However, if we later receive an event that references the soft failed event we then need to remove the soft failed events prev events from the forward extremities table, otherwise we just build up forward extremities. Fixes #5269 --- changelog.d/5274.bugfix | 1 + synapse/storage/events.py | 82 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5274.bugfix diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5274.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2ffc27ff41..6e9f3d1dc0 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -554,10 +554,18 @@ class EventsStore( e_id for event in new_events for e_id in event.prev_event_ids() ) - # Finally, remove any events which are prev_events of any existing events. + # Remove any events which are prev_events of any existing events. existing_prevs = yield self._get_events_which_are_prevs(result) result.difference_update(existing_prevs) + # Finally handle the case where the new events have soft-failed prev + # events. If they do we need to remove them and their prev events, + # otherwise we end up with dangling extremities. + existing_prevs = yield self._get_prevs_before_rejected( + e_id for event in new_events for e_id in event.prev_event_ids() + ) + result.difference_update(existing_prevs) + defer.returnValue(result) @defer.inlineCallbacks @@ -573,7 +581,7 @@ class EventsStore( """ results = [] - def _get_events(txn, batch): + def _get_events_which_are_prevs_txn(txn, batch): sql = """ SELECT prev_event_id, internal_metadata FROM event_edges @@ -596,10 +604,78 @@ class EventsStore( ) for chunk in batch_iter(event_ids, 100): - yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events_which_are_prevs_txn, + chunk, + ) defer.returnValue(results) + @defer.inlineCallbacks + def _get_prevs_before_rejected(self, event_ids): + """Get soft-failed ancestors to remove from the extremities. + + Given a set of events, find all those that have been soft-failed or + rejected. Returns those soft failed/rejected events and their prev + events (whether soft-failed/rejected or not), and recurses up the + prev-event graph until it finds no more soft-failed/rejected events. + + This is used to find extremities that are ancestors of new events, but + are separated by soft failed events. + + Args: + event_ids (Iterable[str]): Events to find prev events for. Note + that these must have already been persisted. + + Returns: + Deferred[set[str]] + """ + + # The set of event_ids to return. This includes all soft-failed events + # and their prev events. + existing_prevs = set() + + def _get_prevs_before_rejected_txn(txn, batch): + to_recursively_check = batch + + while to_recursively_check: + sql = """ + SELECT + event_id, prev_event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + LEFT JOIN event_json USING (event_id) + WHERE + event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_recursively_check), + ) + + txn.execute(sql, to_recursively_check) + to_recursively_check = [] + + for event_id, prev_event_id, metadata, rejected in txn: + if prev_event_id in existing_prevs: + continue + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + to_recursively_check.append(prev_event_id) + existing_prevs.add(prev_event_id) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_prevs_before_rejected", + _get_prevs_before_rejected_txn, + chunk, + ) + + defer.returnValue(existing_prevs) + @defer.inlineCallbacks def _get_new_state_after_events( self, room_id, events_context, old_latest_event_ids, new_latest_event_ids From 30858ff4617517916fc8973b16c6be6e13288bd0 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 29 May 2019 08:27:41 -0500 Subject: [PATCH 236/429] Fix error when downloading thumbnail with width/height param missing (#5258) Fix error when downloading thumbnail with width/height param missing Fixes #2748 Signed-off-by: Aaron Raimist --- changelog.d/5258.bugfix | 1 + synapse/rest/media/v1/thumbnail_resource.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5258.bugfix diff --git a/changelog.d/5258.bugfix b/changelog.d/5258.bugfix new file mode 100644 index 0000000000..fb5d44aedb --- /dev/null +++ b/changelog.d/5258.bugfix @@ -0,0 +1 @@ +Fix error when downloading thumbnail with missing width/height parameter. diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 5305e9175f..35a750923b 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -56,8 +56,8 @@ class ThumbnailResource(Resource): def _async_render_GET(self, request): set_cors_headers(request) server_name, media_id, _ = parse_media_id(request) - width = parse_integer(request, "width") - height = parse_integer(request, "height") + width = parse_integer(request, "width", required=True) + height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") m_type = parse_string(request, "type", "image/png") From d79c9994f416ee5dab27a277fa729ffa5ee74ccc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:52:41 +0100 Subject: [PATCH 237/429] Add DB bg update to cleanup extremities. Due to #5269 we may have extremities in our DB that we shouldn't have, so lets add a cleanup task such to remove those. --- synapse/storage/events.py | 186 ++++++++++++++++++ .../delta/54/delete_forward_extremities.sql | 19 ++ 2 files changed, 205 insertions(+) create mode 100644 synapse/storage/schema/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6e9f3d1dc0..a9be143bd5 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -221,6 +221,7 @@ class EventsStore( ): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) @@ -252,6 +253,11 @@ class EventsStore( psql_only=True, ) + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() @@ -2341,6 +2347,186 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their prev events, if any, and their prev events rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph.setdefault(event_id, set()).add(prev_event_id) + logger.info("Already handled") + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + logger.info("Deleting up to %d forward extremities", len(to_delete)) + + self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + if to_delete: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + for row in rows: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (row["room_id"],) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) + AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql new file mode 100644 index 0000000000..7056bd1d00 --- /dev/null +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('delete_soft_failed_extremities', '{}'); + +CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 7e8e683754cdc606a1440832d9b1eb47f930ddee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:58:32 +0100 Subject: [PATCH 238/429] Log actual number of entries deleted --- synapse/storage/_base.py | 12 +++++++++--- synapse/storage/events.py | 6 ++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839ceca..3fe827cd43 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1261,7 +1261,8 @@ class SQLBaseStore(object): " AND ".join("%s = ?" % (k,) for k in keyvalues), ) - return txn.execute(sql, list(keyvalues.values())) + txn.execute(sql, list(keyvalues.values())) + return txn.rowcount def _simple_delete_many(self, table, column, iterable, keyvalues, desc): return self.runInteraction( @@ -1280,9 +1281,12 @@ class SQLBaseStore(object): column : column name to test for inclusion against `iterable` iterable : list keyvalues : dict of column names and values to select the rows with + + Returns: + int: Number rows deleted """ if not iterable: - return + return 0 sql = "DELETE FROM %s" % table @@ -1297,7 +1301,9 @@ class SQLBaseStore(object): if clauses: sql = "%s WHERE %s" % (sql, " AND ".join(clauses)) - return txn.execute(sql, values) + txn.execute(sql, values) + + return txn.rowcount def _get_cache_dict( self, db_conn, table, entity_column, stream_column, max_value, limit=100000 diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9be143bd5..a9664928ca 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2476,7 +2476,7 @@ class EventsStore( logger.info("Deleting up to %d forward extremities", len(to_delete)) - self._simple_delete_many_txn( + deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", column="event_id", @@ -2484,7 +2484,9 @@ class EventsStore( keyvalues={}, ) - if to_delete: + logger.info("Deleted %d forward extremities", deleted) + + if deleted: # We now need to invalidate the caches of these rooms rows = self._simple_select_many_txn( txn, From 532b825ed9dff2faec1360fa3ee3734e0d782bd3 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 30 May 2019 00:55:18 +1000 Subject: [PATCH 239/429] Serve CAS login over r0 (#5286) --- changelog.d/5286.feature | 1 + synapse/rest/client/v1/login.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5286.feature diff --git a/changelog.d/5286.feature b/changelog.d/5286.feature new file mode 100644 index 0000000000..81860279a3 --- /dev/null +++ b/changelog.d/5286.feature @@ -0,0 +1 @@ +CAS login will now hit the r0 API, not the deprecated v1 one. diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 5180e9eaf1..029039c162 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -386,7 +386,7 @@ class CasRedirectServlet(RestServlet): b"redirectUrl": args[b"redirectUrl"][0] }).encode('ascii') hs_redirect_url = (self.cas_service_url + - b"/_matrix/client/api/v1/login/cas/ticket") + b"/_matrix/client/r0/login/cas/ticket") service_param = urllib.parse.urlencode({ b"service": b"%s?%s" % (hs_redirect_url, client_redirect_url_param) }).encode('ascii') @@ -395,7 +395,7 @@ class CasRedirectServlet(RestServlet): class CasTicketServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login/cas/ticket", releases=()) + PATTERNS = client_path_patterns("/login/cas/ticket") def __init__(self, hs): super(CasTicketServlet, self).__init__(hs) From d7add713a8351024aec9a51c1744f78ac39f552e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 14:19:11 +0100 Subject: [PATCH 240/429] Add test --- tests/storage/test_cleanup_extrems.py | 248 ++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 tests/storage/test_cleanup_extrems.py diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py new file mode 100644 index 0000000000..6dda66ecd3 --- /dev/null +++ b/tests/storage/test_cleanup_extrems.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path + +from synapse.api.constants import EventTypes +from synapse.storage import prepare_database +from synapse.types import Requester, UserID + +from tests.unittest import HomeserverTestCase + + +class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): + """Test the background update to clean forward extremities table. + """ + + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.event_creator = homeserver.get_event_creation_handler() + self.room_creator = homeserver.get_room_creation_handler() + + # Create a test user and room + self.user = UserID("alice", "test") + self.requester = Requester(self.user, None, False, None, None) + info = self.get_success(self.room_creator.create_room(self.requester, {})) + self.room_id = info["room_id"] + + def create_and_send_event(self, soft_failed=False, prev_event_ids=None): + """Create and send an event. + + Args: + soft_failed (bool): Whether to create a soft failed event or not + prev_event_ids (list[str]|None): Explicitly set the prev events, + or if None just use the default + + Returns: + str: The new event's ID. + """ + prev_events_and_hashes = None + if prev_event_ids: + prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] + + event, context = self.get_success( + self.event_creator.create_event( + self.requester, + { + "type": EventTypes.Message, + "room_id": self.room_id, + "sender": self.user.to_string(), + "content": {"body": "", "msgtype": "m.text"}, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) + ) + + if soft_failed: + event.internal_metadata.soft_failed = True + + self.get_success( + self.event_creator.send_nonmember_event(self.requester, event, context) + ) + + return event.event_id + + def add_extremity(self, event_id): + """Add the given event as an extremity to the room. + """ + self.get_success( + self.store._simple_insert( + table="event_forward_extremities", + values={"room_id": self.room_id, "event_id": event_id}, + desc="test_add_extremity", + ) + ) + + self.store.get_latest_event_ids_in_room.invalidate((self.room_id,)) + + def run_background_update(self): + """Re run the background update to clean up the extremities. + """ + # Make sure we don't clash with in progress updates. + self.assertTrue(self.store._all_done, "Background updates are still ongoing") + + schema_path = os.path.join( + prepare_database.dir_path, + "schema", + "delta", + "54", + "delete_forward_extremities.sql", + ) + + def run_delta_file(txn): + prepare_database.executescript(txn, schema_path) + + self.get_success( + self.store.runInteraction("test_delete_forward_extremities", run_delta_file) + ) + + # Ugh, have to reset this flag + self.store._all_done = False + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + def test_soft_failed_extremities_handled_correctly(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed. + """ + + # Create the room graph + event_id_1 = self.create_and_send_event() + event_id_2 = self.create_and_send_event(True, [event_id_1]) + event_id_3 = self.create_and_send_event(True, [event_id_2]) + event_id_4 = self.create_and_send_event(False, [event_id_3]) + + # Check the latest events are as expected + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + + self.assertEqual(latest_event_ids, [event_id_4]) + + def test_basic_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_b = self.create_and_send_event(False, [event_id_sf1]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_chain_of_fail_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_sf1]) + event_id_b = self.create_and_send_event(False, [event_id_sf2]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_forked_graph_cleanup(self): + r"""Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like, where time flows down the page: + + A B + / \ / + / \ / + SF1 SF2 + | | + SF3 | + / \ | + | \ | + C SF4 + + Where SF* are soft failed, and with them A, B and C marked as + extremities. This should resolve to B and C being marked as extremity. + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_b = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b]) + event_id_sf3 = self.create_and_send_event(True, [event_id_sf1]) + self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4 + event_id_c = self.create_and_send_event(False, [event_id_sf3]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual( + set(latest_event_ids), set((event_id_a, event_id_b, event_id_c)) + ) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set([event_id_b, event_id_c])) From 67e0631f8f8bfc2843d2c06ebf20fe2226810686 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:56:02 +0100 Subject: [PATCH 241/429] Newsfile --- changelog.d/5278.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5278.bugfix diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5278.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 46c8f7a5170d04dfa6ad02c69667d4aa48635231 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 30 May 2019 01:47:16 +1000 Subject: [PATCH 242/429] Implement the SHHS complexity API (#5216) --- changelog.d/5216.misc | 1 + synapse/api/urls.py | 1 + synapse/federation/transport/server.py | 31 ++++++++- synapse/rest/admin/__init__.py | 12 +++- synapse/storage/events_worker.py | 50 +++++++++++++- tests/federation/test_complexity.py | 90 ++++++++++++++++++++++++++ 6 files changed, 180 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5216.misc create mode 100644 tests/federation/test_complexity.py diff --git a/changelog.d/5216.misc b/changelog.d/5216.misc new file mode 100644 index 0000000000..dbfa29475f --- /dev/null +++ b/changelog.d/5216.misc @@ -0,0 +1 @@ +Synapse will now serve the experimental "room complexity" API endpoint. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 3c6bddff7a..e16c386a14 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -26,6 +26,7 @@ CLIENT_API_PREFIX = "/_matrix/client" FEDERATION_PREFIX = "/_matrix/federation" FEDERATION_V1_PREFIX = FEDERATION_PREFIX + "/v1" FEDERATION_V2_PREFIX = FEDERATION_PREFIX + "/v2" +FEDERATION_UNSTABLE_PREFIX = FEDERATION_PREFIX + "/unstable" STATIC_PREFIX = "/_matrix/static" WEB_CLIENT_PREFIX = "/_matrix/client" CONTENT_REPO_PREFIX = "/_matrix/content" diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 385eda2dca..d0efc4e0d3 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -23,7 +23,11 @@ from twisted.internet import defer import synapse from synapse.api.errors import Codes, FederationDeniedError, SynapseError from synapse.api.room_versions import RoomVersions -from synapse.api.urls import FEDERATION_V1_PREFIX, FEDERATION_V2_PREFIX +from synapse.api.urls import ( + FEDERATION_UNSTABLE_PREFIX, + FEDERATION_V1_PREFIX, + FEDERATION_V2_PREFIX, +) from synapse.http.endpoint import parse_and_validate_server_name from synapse.http.server import JsonResource from synapse.http.servlet import ( @@ -1304,6 +1308,30 @@ class FederationGroupsSettingJoinPolicyServlet(BaseFederationServlet): defer.returnValue((200, new_content)) +class RoomComplexityServlet(BaseFederationServlet): + """ + Indicates to other servers how complex (and therefore likely + resource-intensive) a public room this server knows about is. + """ + PATH = "/rooms/(?P[^/]*)/complexity" + PREFIX = FEDERATION_UNSTABLE_PREFIX + + @defer.inlineCallbacks + def on_GET(self, origin, content, query, room_id): + + store = self.handler.hs.get_datastore() + + is_public = yield store.is_room_world_readable_or_publicly_joinable( + room_id + ) + + if not is_public: + raise SynapseError(404, "Room not found", errcode=Codes.INVALID_PARAM) + + complexity = yield store.get_room_complexity(room_id) + defer.returnValue((200, complexity)) + + FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationEventServlet, @@ -1327,6 +1355,7 @@ FEDERATION_SERVLET_CLASSES = ( FederationThirdPartyInviteExchangeServlet, On3pidBindServlet, FederationVersionServlet, + RoomComplexityServlet, ) OPENID_SERVLET_CLASSES = ( diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 744d85594f..d6c4dcdb18 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -822,10 +822,16 @@ class AdminRestResource(JsonResource): def __init__(self, hs): JsonResource.__init__(self, hs, canonical_json=False) + register_servlets(hs, self) - register_servlets_for_client_rest_resource(hs, self) - SendServerNoticeServlet(hs).register(self) - VersionServlet(hs).register(self) + +def register_servlets(hs, http_server): + """ + Register all the admin servlets. + """ + register_servlets_for_client_rest_resource(hs, http_server) + SendServerNoticeServlet(hs).register(http_server) + VersionServlet(hs).register(http_server) def register_servlets_for_client_rest_resource(hs, http_server): diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 21b353cad3..b56c83e460 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import division + import itertools import logging from collections import namedtuple @@ -614,7 +616,7 @@ class EventsWorkerStore(SQLBaseStore): def _get_total_state_event_counts_txn(self, txn, room_id): """ - See get_state_event_counts. + See get_total_state_event_counts. """ sql = "SELECT COUNT(*) FROM state_events WHERE room_id=?" txn.execute(sql, (room_id,)) @@ -635,3 +637,49 @@ class EventsWorkerStore(SQLBaseStore): "get_total_state_event_counts", self._get_total_state_event_counts_txn, room_id ) + + def _get_current_state_event_counts_txn(self, txn, room_id): + """ + See get_current_state_event_counts. + """ + sql = "SELECT COUNT(*) FROM current_state_events WHERE room_id=?" + txn.execute(sql, (room_id,)) + row = txn.fetchone() + return row[0] if row else 0 + + def get_current_state_event_counts(self, room_id): + """ + Gets the current number of state events in a room. + + Args: + room_id (str) + + Returns: + Deferred[int] + """ + return self.runInteraction( + "get_current_state_event_counts", + self._get_current_state_event_counts_txn, room_id + ) + + @defer.inlineCallbacks + def get_room_complexity(self, room_id): + """ + Get a rough approximation of the complexity of the room. This is used by + remote servers to decide whether they wish to join the room or not. + Higher complexity value indicates that being in the room will consume + more resources. + + Args: + room_id (str) + + Returns: + Deferred[dict[str:int]] of complexity version to complexity. + """ + state_events = yield self.get_current_state_event_counts(room_id) + + # Call this one "v1", so we can introduce new ones as we want to develop + # it. + complexity_v1 = round(state_events / 500, 2) + + defer.returnValue({"v1": complexity_v1}) diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py new file mode 100644 index 0000000000..1e3e5aec66 --- /dev/null +++ b/tests/federation/test_complexity.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 Matrix.org Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + +from synapse.config.ratelimiting import FederationRateLimitConfig +from synapse.federation.transport import server +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.util.ratelimitutils import FederationRateLimiter + +from tests import unittest + + +class RoomComplexityTests(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def default_config(self, name='test'): + config = super(RoomComplexityTests, self).default_config(name=name) + config["limit_large_remote_room_joins"] = True + config["limit_large_remote_room_complexity"] = 0.05 + return config + + def prepare(self, reactor, clock, homeserver): + class Authenticator(object): + def authenticate_request(self, request, content): + return defer.succeed("otherserver.nottld") + + ratelimiter = FederationRateLimiter( + clock, + FederationRateLimitConfig( + window_size=1, + sleep_limit=1, + sleep_msec=1, + reject_limit=1000, + concurrent_requests=1000, + ), + ) + server.register_servlets( + homeserver, self.resource, Authenticator(), ratelimiter + ) + + def test_complexity_simple(self): + + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + self.helper.send_state( + room_1, event_type="m.room.topic", body={"topic": "foo"}, tok=u1_token + ) + + # Get the room complexity + request, channel = self.make_request( + "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) + ) + self.render(request) + self.assertEquals(200, channel.code) + complexity = channel.json_body["v1"] + self.assertTrue(complexity > 0, complexity) + + # Artificially raise the complexity + store = self.hs.get_datastore() + store.get_current_state_event_counts = lambda x: defer.succeed(500 * 1.23) + + # Get the room complexity again -- make sure it's our artificial value + request, channel = self.make_request( + "GET", "/_matrix/federation/unstable/rooms/%s/complexity" % (room_1,) + ) + self.render(request) + self.assertEquals(200, channel.code) + complexity = channel.json_body["v1"] + self.assertEqual(complexity, 1.23) From 3dcf2feba8ee38c43c63c0f321fd379f843a5929 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 19:27:50 +0100 Subject: [PATCH 243/429] Improve logging for logcontext leaks. (#5288) --- changelog.d/5288.misc | 1 + synapse/util/logcontext.py | 22 +++++++++++++--------- 2 files changed, 14 insertions(+), 9 deletions(-) create mode 100644 changelog.d/5288.misc diff --git a/changelog.d/5288.misc b/changelog.d/5288.misc new file mode 100644 index 0000000000..fbf049ba6a --- /dev/null +++ b/changelog.d/5288.misc @@ -0,0 +1 @@ +Improve logging for logcontext leaks. diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 311b49e18a..fe412355d8 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -226,6 +226,8 @@ class LoggingContext(object): self.request = request def __str__(self): + if self.request: + return str(self.request) return "%s@%x" % (self.name, id(self)) @classmethod @@ -274,12 +276,10 @@ class LoggingContext(object): current = self.set_current_context(self.previous_context) if current is not self: if current is self.sentinel: - logger.warn("Expected logging context %s has been lost", self) + logger.warning("Expected logging context %s was lost", self) else: - logger.warn( - "Current logging context %s is not expected context %s", - current, - self + logger.warning( + "Expected logging context %s but found %s", self, current ) self.previous_context = None self.alive = False @@ -433,10 +433,14 @@ class PreserveLoggingContext(object): context = LoggingContext.set_current_context(self.current_context) if context != self.new_context: - logger.warn( - "Unexpected logging context: %s is not %s", - context, self.new_context, - ) + if context is LoggingContext.sentinel: + logger.warning("Expected logging context %s was lost", self.new_context) + else: + logger.warning( + "Expected logging context %s but found %s", + self.new_context, + context, + ) if self.current_context is not LoggingContext.sentinel: if not self.current_context.alive: From 8d92329214f92b0e4e4f2d6fa21e1197a691ba5b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 29 May 2019 19:31:52 +0100 Subject: [PATCH 244/429] Remove spurious debug from MatrixFederationHttpClient.get_json (#5287) This is just unhelpful spam --- changelog.d/5287.misc | 1 + synapse/http/matrixfederationclient.py | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) create mode 100644 changelog.d/5287.misc diff --git a/changelog.d/5287.misc b/changelog.d/5287.misc new file mode 100644 index 0000000000..1286f1dd08 --- /dev/null +++ b/changelog.d/5287.misc @@ -0,0 +1 @@ +Remove spurious debug from MatrixFederationHttpClient.get_json. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 7eefc7b1fc..8197619a78 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -711,10 +711,6 @@ class MatrixFederationHttpClient(object): RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. """ - logger.debug("get_json args: %s", args) - - logger.debug("Query bytes: %s Retry DNS: %s", args, retry_on_dns_fail) - request = MatrixFederationRequest( method="GET", destination=destination, From 123918b73938bdba89e6e0ce66482444590f2b4e Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Wed, 29 May 2019 14:44:25 -0500 Subject: [PATCH 245/429] Lint Signed-off-by: Aaron Raimist --- synapse/rest/client/v1/logout.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index 2cf373e83c..ba20e75033 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -17,8 +17,6 @@ import logging from twisted.internet import defer -from synapse.api.errors import AuthError - from .base import ClientV1RestServlet, client_path_patterns logger = logging.getLogger(__name__) From 640fcbb07f8dc7d89465734f009d8e0a458c2b17 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 10:55:55 +0100 Subject: [PATCH 246/429] Fixup comments and logging --- synapse/storage/events.py | 21 +++++++++++-------- .../delta/54/delete_forward_extremities.sql | 3 +++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9664928ca..418d88b8dc 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2387,7 +2387,8 @@ class EventsStore( soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their prev events, if any, and their prev events rejection status. + # their successor events, if any, and their successor events + # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, rejections.event_id IS NOT NULL, events.outlier @@ -2450,11 +2451,10 @@ class EventsStore( if event_id in graph: # Already handled this event previously, but we still # want to record the edge. - graph.setdefault(event_id, set()).add(prev_event_id) - logger.info("Already handled") + graph[event_id].add(prev_event_id) continue - graph.setdefault(event_id, set()).add(prev_event_id) + graph[event_id] = {prev_event_id} soft_failed = json.loads(metadata).get("soft_failed") if soft_failed or rejected: @@ -2474,8 +2474,6 @@ class EventsStore( to_delete.intersection_update(original_set) - logger.info("Deleting up to %d forward extremities", len(to_delete)) - deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", @@ -2484,7 +2482,11 @@ class EventsStore( keyvalues={}, ) - logger.info("Deleted %d forward extremities", deleted) + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) if deleted: # We now need to invalidate the caches of these rooms @@ -2496,10 +2498,11 @@ class EventsStore( keyvalues={}, retcols=("room_id",) ) - for row in rows: + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: txn.call_after( self.get_latest_event_ids_in_room.invalidate, - (row["room_id"],) + (room_id,) ) self._simple_delete_many_txn( diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index 7056bd1d00..aa40f13da7 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -13,7 +13,10 @@ * limitations under the License. */ +-- Start a background job to cleanup extremities that were incorrectly added +-- by bug #5269. INSERT INTO background_updates (update_name, progress_json) VALUES ('delete_soft_failed_extremities', '{}'); +DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 5c1ece0ffcd803eb4bf8e5748d3e2633426e00a0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:22:59 +0100 Subject: [PATCH 247/429] Move event background updates to a separate file --- synapse/storage/__init__.py | 2 + synapse/storage/events.py | 371 +------------------------ synapse/storage/events_bg_updates.py | 401 +++++++++++++++++++++++++++ 3 files changed, 405 insertions(+), 369 deletions(-) create mode 100644 synapse/storage/events_bg_updates.py diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 66675d08ae..71316f7d09 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -36,6 +36,7 @@ from .engines import PostgresEngine from .event_federation import EventFederationStore from .event_push_actions import EventPushActionsStore from .events import EventsStore +from .events_bg_updates import EventsBackgroundUpdatesStore from .filtering import FilteringStore from .group_server import GroupServerStore from .keys import KeyStore @@ -66,6 +67,7 @@ logger = logging.getLogger(__name__) class DataStore( + EventsBackgroundUpdatesStore, RoomMemberStore, RoomStore, RegistrationStore, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 418d88b8dc..f9162be9b9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -219,47 +220,11 @@ class EventsStore( EventsWorkerStore, BackgroundUpdateStore, ): - EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" - EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) - self.register_background_update_handler( - self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts - ) - self.register_background_update_handler( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, - self._background_reindex_fields_sender, - ) - - self.register_background_index_update( - "event_contains_url_index", - index_name="event_contains_url_index", - table="events", - columns=["room_id", "topological_ordering", "stream_ordering"], - where_clause="contains_url = true AND outlier = false", - ) - - # an event_id index on event_search is useful for the purge_history - # api. Plus it means we get to enforce some integrity with a UNIQUE - # clause - self.register_background_index_update( - "event_search_event_id_idx", - index_name="event_search_event_id_idx", - table="event_search", - columns=["event_id"], - unique=True, - psql_only=True, - ) - - self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, - self._cleanup_extremities_bg_update, - ) self._event_persist_queue = _EventPeristenceQueue() - self._state_resolution_handler = hs.get_state_resolution_handler() @defer.inlineCallbacks @@ -1585,153 +1550,6 @@ class EventsStore( ret = yield self.runInteraction("count_daily_active_rooms", _count) defer.returnValue(ret) - @defer.inlineCallbacks - def _background_reindex_fields_sender(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_txn(txn): - sql = ( - "SELECT stream_ordering, event_id, json FROM events" - " INNER JOIN event_json USING (event_id)" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - - update_rows = [] - for row in rows: - try: - event_id = row[1] - event_json = json.loads(row[2]) - sender = event_json["sender"] - content = event_json["content"] - - contains_url = "url" in content - if contains_url: - contains_url &= isinstance(content["url"], text_type) - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - update_rows.append((sender, contains_url, event_id)) - - sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" - - for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): - clump = update_rows[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows), - } - - self._background_update_progress_txn( - txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress - ) - - return len(rows) - - result = yield self.runInteraction( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - - defer.returnValue(result) - - @defer.inlineCallbacks - def _background_reindex_origin_server_ts(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_search_txn(txn): - sql = ( - "SELECT stream_ordering, event_id FROM events" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - event_ids = [row[1] for row in rows] - - rows_to_update = [] - - chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] - for chunk in chunks: - ev_rows = self._simple_select_many_txn( - txn, - table="event_json", - column="event_id", - iterable=chunk, - retcols=["event_id", "json"], - keyvalues={}, - ) - - for row in ev_rows: - event_id = row["event_id"] - event_json = json.loads(row["json"]) - try: - origin_server_ts = event_json["origin_server_ts"] - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - rows_to_update.append((origin_server_ts, event_id)) - - sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" - - for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): - clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows_to_update), - } - - self._background_update_progress_txn( - txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress - ) - - return len(rows_to_update) - - result = yield self.runInteraction( - self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - - defer.returnValue(result) - def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" return -self._backfill_id_gen.get_current_token() @@ -2347,191 +2165,6 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) - @defer.inlineCallbacks - def _cleanup_extremities_bg_update(self, progress, batch_size): - """Background update to clean out extremities that should have been - deleted previously. - - Mainly used to deal with the aftermath of #5269. - """ - - # This works by first copying all existing forward extremities into the - # `_extremities_to_check` table at start up, and then checking each - # event in that table whether we have any descendants that are not - # soft-failed/rejected. If that is the case then we delete that event - # from the forward extremities table. - # - # For efficiency, we do this in batches by recursively pulling out all - # descendants of a batch until we find the non soft-failed/rejected - # events, i.e. the set of descendants whose chain of prev events back - # to the batch of extremities are all soft-failed or rejected. - # Typically, we won't find any such events as extremities will rarely - # have any descendants, but if they do then we should delete those - # extremities. - - def _cleanup_extremities_bg_update_txn(txn): - # The set of extremity event IDs that we're checking this round - original_set = set() - - # A dict[str, set[str]] of event ID to their prev events. - graph = {} - - # The set of descendants of the original set that are not rejected - # nor soft-failed. Ancestors of these events should be removed - # from the forward extremities table. - non_rejected_leaves = set() - - # Set of event IDs that have been soft failed, and for which we - # should check if they have descendants which haven't been soft - # failed. - soft_failed_events_to_lookup = set() - - # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events - # rejection status. - txn.execute( - """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL, events.outlier - FROM ( - SELECT event_id AS prev_event_id - FROM _extremities_to_check - LIMIT ? - ) AS f - LEFT JOIN event_edges USING (prev_event_id) - LEFT JOIN events USING (event_id) - LEFT JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - """, (batch_size,) - ) - - for prev_event_id, event_id, metadata, rejected, outlier in txn: - original_set.add(prev_event_id) - - if not event_id or outlier: - # Common case where the forward extremity doesn't have any - # descendants. - continue - - graph.setdefault(event_id, set()).add(prev_event_id) - - soft_failed = False - if metadata: - soft_failed = json.loads(metadata).get("soft_failed") - - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # Now we recursively check all the soft-failed descendants we - # found above in the same way, until we have nothing left to - # check. - while soft_failed_events_to_lookup: - # We only want to do 100 at a time, so we split given list - # into two. - batch = list(soft_failed_events_to_lookup) - to_check, to_defer = batch[:100], batch[100:] - soft_failed_events_to_lookup = set(to_defer) - - sql = """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL - FROM event_edges - INNER JOIN events USING (event_id) - INNER JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - WHERE - prev_event_id IN (%s) - AND NOT events.outlier - """ % ( - ",".join("?" for _ in to_check), - ) - txn.execute(sql, to_check) - - for prev_event_id, event_id, metadata, rejected in txn: - if event_id in graph: - # Already handled this event previously, but we still - # want to record the edge. - graph[event_id].add(prev_event_id) - continue - - graph[event_id] = {prev_event_id} - - soft_failed = json.loads(metadata).get("soft_failed") - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # We have a set of non-soft-failed descendants, so we recurse up - # the graph to find all ancestors and add them to the set of event - # IDs that we can delete from forward extremities table. - to_delete = set() - while non_rejected_leaves: - event_id = non_rejected_leaves.pop() - prev_event_ids = graph.get(event_id, set()) - non_rejected_leaves.update(prev_event_ids) - to_delete.update(prev_event_ids) - - to_delete.intersection_update(original_set) - - deleted = self._simple_delete_many_txn( - txn=txn, - table="event_forward_extremities", - column="event_id", - iterable=to_delete, - keyvalues={}, - ) - - logger.info( - "Deleted %d forward extremities of %d checked, to clean up #5269", - deleted, - len(original_set), - ) - - if deleted: - # We now need to invalidate the caches of these rooms - rows = self._simple_select_many_txn( - txn, - table="events", - column="event_id", - iterable=to_delete, - keyvalues={}, - retcols=("room_id",) - ) - room_ids = set(row["room_id"] for row in rows) - for room_id in room_ids: - txn.call_after( - self.get_latest_event_ids_in_room.invalidate, - (room_id,) - ) - - self._simple_delete_many_txn( - txn=txn, - table="_extremities_to_check", - column="event_id", - iterable=original_set, - keyvalues={}, - ) - - return len(original_set) - - num_handled = yield self.runInteraction( - "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, - ) - - if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) - - def _drop_table_txn(txn): - txn.execute("DROP TABLE _extremities_to_check") - - yield self.runInteraction( - "_cleanup_extremities_bg_update_drop_table", - _drop_table_txn, - ) - - defer.returnValue(num_handled) - AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py new file mode 100644 index 0000000000..2eba106abf --- /dev/null +++ b/synapse/storage/events_bg_updates.py @@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from six import text_type + +from canonicaljson import json + +from twisted.internet import defer + +from synapse.storage.background_updates import BackgroundUpdateStore + +logger = logging.getLogger(__name__) + + +class EventsBackgroundUpdatesStore(BackgroundUpdateStore): + + EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + + def __init__(self, db_conn, hs): + super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) + + self.register_background_update_handler( + self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts + ) + self.register_background_update_handler( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, + self._background_reindex_fields_sender, + ) + + self.register_background_index_update( + "event_contains_url_index", + index_name="event_contains_url_index", + table="events", + columns=["room_id", "topological_ordering", "stream_ordering"], + where_clause="contains_url = true AND outlier = false", + ) + + # an event_id index on event_search is useful for the purge_history + # api. Plus it means we get to enforce some integrity with a UNIQUE + # clause + self.register_background_index_update( + "event_search_event_id_idx", + index_name="event_search_event_id_idx", + table="event_search", + columns=["event_id"], + unique=True, + psql_only=True, + ) + + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + + @defer.inlineCallbacks + def _background_reindex_fields_sender(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_txn(txn): + sql = ( + "SELECT stream_ordering, event_id, json FROM events" + " INNER JOIN event_json USING (event_id)" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + + update_rows = [] + for row in rows: + try: + event_id = row[1] + event_json = json.loads(row[2]) + sender = event_json["sender"] + content = event_json["content"] + + contains_url = "url" in content + if contains_url: + contains_url &= isinstance(content["url"], text_type) + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + update_rows.append((sender, contains_url, event_id)) + + sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" + + for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): + clump = update_rows[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows), + } + + self._background_update_progress_txn( + txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress + ) + + return len(rows) + + result = yield self.runInteraction( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _background_reindex_origin_server_ts(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_search_txn(txn): + sql = ( + "SELECT stream_ordering, event_id FROM events" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + event_ids = [row[1] for row in rows] + + rows_to_update = [] + + chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] + for chunk in chunks: + ev_rows = self._simple_select_many_txn( + txn, + table="event_json", + column="event_id", + iterable=chunk, + retcols=["event_id", "json"], + keyvalues={}, + ) + + for row in ev_rows: + event_id = row["event_id"] + event_json = json.loads(row["json"]) + try: + origin_server_ts = event_json["origin_server_ts"] + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + rows_to_update.append((origin_server_ts, event_id)) + + sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" + + for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): + clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows_to_update), + } + + self._background_update_progress_txn( + txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress + ) + + return len(rows_to_update) + + result = yield self.runInteraction( + self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their successor events, if any, and their successor events + # rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph[event_id].add(prev_event_id) + continue + + graph[event_id] = {prev_event_id} + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + deleted = self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) + + if deleted: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (room_id,) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) From 468bd090ff354f27597e08b54a969f22afbfad43 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:24:42 +0100 Subject: [PATCH 248/429] Rename constant --- synapse/storage/events_bg_updates.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 2eba106abf..22aac1393d 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -30,7 +30,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) @@ -64,7 +64,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update, ) @@ -388,7 +388,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES) def _drop_table_txn(txn): txn.execute("DROP TABLE _extremities_to_check") From cb967e2346096d7e647c757e3b57093549746f14 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:06:42 +0100 Subject: [PATCH 249/429] Update synapse/storage/events_bg_updates.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 22aac1393d..75c1935bf3 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -255,7 +255,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events + # their successor events, if any, and the successor events' # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, From 6cdfb0207e2de72286a7a8d3b3c417c2808e90dc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:54:56 +0100 Subject: [PATCH 250/429] Add index to temp table --- synapse/storage/schema/delta/54/delete_forward_extremities.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index aa40f13da7..b062ec840c 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -20,3 +20,4 @@ INSERT INTO background_updates (update_name, progress_json) VALUES DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; +CREATE INDEX _extremities_to_check_id ON _extremities_to_check(event_id); From 06675db684f06b5a369846aac896216bf4cc74ed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:05:26 +0100 Subject: [PATCH 251/429] Newsfile --- changelog.d/5291.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5291.bugfix diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5291.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 54d50fbfdf8c39d92c36291a572419cee6b9b916 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:15:13 +0100 Subject: [PATCH 252/429] Get events all at once --- synapse/storage/stats.py | 57 ++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 32 deletions(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index eb0ced5b5e..99b4af5555 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -179,46 +179,39 @@ class StatsStore(StateDeltasStore): current_state_ids = yield self.get_current_state_ids(room_id) - join_rules = yield self.get_event( - current_state_ids.get((EventTypes.JoinRules, "")), allow_none=True - ) - history_visibility = yield self.get_event( - current_state_ids.get((EventTypes.RoomHistoryVisibility, "")), - allow_none=True, - ) - encryption = yield self.get_event( - current_state_ids.get((EventTypes.RoomEncryption, "")), allow_none=True - ) - name = yield self.get_event( - current_state_ids.get((EventTypes.Name, "")), allow_none=True - ) - topic = yield self.get_event( - current_state_ids.get((EventTypes.Topic, "")), allow_none=True - ) - avatar = yield self.get_event( - current_state_ids.get((EventTypes.RoomAvatar, "")), allow_none=True - ) - canonical_alias = yield self.get_event( - current_state_ids.get((EventTypes.CanonicalAlias, "")), allow_none=True + join_rules_id = current_state_ids.get((EventTypes.JoinRules, "")) + history_visibility_id = current_state_ids.get( + (EventTypes.RoomHistoryVisibility, "") ) + encryption_id = current_state_ids.get((EventTypes.RoomEncryption, "")) + name_id = current_state_ids.get((EventTypes.Name, "")) + topic_id = current_state_ids.get((EventTypes.Topic, "")) + avatar_id = current_state_ids.get((EventTypes.RoomAvatar, "")) + canonical_alias_id = current_state_ids.get((EventTypes.CanonicalAlias, "")) - def _or_none(x, arg): - if x: - return x.content.get(arg) + state_events = yield self.get_events([ + join_rules_id, history_visibility_id, encryption_id, name_id, + topic_id, avatar_id, canonical_alias_id, + ]) + + def _get_or_none(event_id, arg): + event = state_events.get(event_id) + if event: + return event.content.get(arg) return None yield self.update_room_state( room_id, { - "join_rules": _or_none(join_rules, "join_rule"), - "history_visibility": _or_none( - history_visibility, "history_visibility" + "join_rules": _get_or_none(join_rules_id, "join_rule"), + "history_visibility": _get_or_none( + history_visibility_id, "history_visibility" ), - "encryption": _or_none(encryption, "algorithm"), - "name": _or_none(name, "name"), - "topic": _or_none(topic, "topic"), - "avatar": _or_none(avatar, "url"), - "canonical_alias": _or_none(canonical_alias, "alias"), + "encryption": _get_or_none(encryption_id, "algorithm"), + "name": _get_or_none(name_id, "name"), + "topic": _get_or_none(topic_id, "topic"), + "avatar": _get_or_none(avatar_id, "url"), + "canonical_alias": _get_or_none(canonical_alias_id, "alias"), }, ) From 04710cc2d71127b1f416e87f7a4aea3ce6d93410 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:22:32 +0100 Subject: [PATCH 253/429] Fetch membership counts all at once --- synapse/storage/roommember.py | 33 +++++++++++---------------------- synapse/storage/stats.py | 23 +++++++---------------- 2 files changed, 18 insertions(+), 38 deletions(-) diff --git a/synapse/storage/roommember.py b/synapse/storage/roommember.py index 4bd1669458..7617913326 100644 --- a/synapse/storage/roommember.py +++ b/synapse/storage/roommember.py @@ -142,26 +142,9 @@ class RoomMemberWorkerStore(EventsWorkerStore): return self.runInteraction("get_room_summary", _get_room_summary_txn) - def _get_user_count_in_room_txn(self, txn, room_id, membership): + def _get_user_counts_in_room_txn(self, txn, room_id): """ - See get_user_count_in_room. - """ - sql = ( - "SELECT count(*) FROM room_memberships as m" - " INNER JOIN current_state_events as c" - " ON m.event_id = c.event_id " - " AND m.room_id = c.room_id " - " AND m.user_id = c.state_key" - " WHERE c.type = 'm.room.member' AND c.room_id = ? AND m.membership = ?" - ) - - txn.execute(sql, (room_id, membership)) - row = txn.fetchone() - return row[0] - - def get_user_count_in_room(self, room_id, membership): - """ - Get the user count in a room with a particular membership. + Get the user count in a room by membership. Args: room_id (str) @@ -170,9 +153,15 @@ class RoomMemberWorkerStore(EventsWorkerStore): Returns: Deferred[int] """ - return self.runInteraction( - "get_users_in_room", self._get_user_count_in_room_txn, room_id, membership - ) + sql = """ + SELECT m.membership, count(*) FROM room_memberships as m + INNER JOIN current_state_events as c USING(event_id) + WHERE c.type = 'm.room.member' AND c.room_id = ? + GROUP BY m.membership + """ + + txn.execute(sql, (room_id,)) + return {row[0]: row[1] for row in txn} @cached() def get_invited_rooms_for_user(self, user_id): diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 99b4af5555..727f60b3bd 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -226,18 +226,9 @@ class StatsStore(StateDeltasStore): current_token = self._get_max_stream_id_in_current_state_deltas_txn(txn) current_state_events = len(current_state_ids) - joined_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.JOIN - ) - invited_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.INVITE - ) - left_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.LEAVE - ) - banned_members = self._get_user_count_in_room_txn( - txn, room_id, Membership.BAN - ) + + membership_counts = self._get_user_counts_in_room_txn(txn, room_id) + total_state_events = self._get_total_state_event_counts_txn( txn, room_id ) @@ -250,10 +241,10 @@ class StatsStore(StateDeltasStore): { "bucket_size": self.stats_bucket_size, "current_state_events": current_state_events, - "joined_members": joined_members, - "invited_members": invited_members, - "left_members": left_members, - "banned_members": banned_members, + "joined_members": membership_counts.get(Membership.JOIN, 0), + "invited_members": membership_counts.get(Membership.INVITE, 0), + "left_members": membership_counts.get(Membership.LEAVE, 0), + "banned_members": membership_counts.get(Membership.BAN, 0), "state_events": total_state_events, }, ) From e2c46ed851599dc08cc8a822e07c0d4f9a050ee2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:26:38 +0100 Subject: [PATCH 254/429] Move deletion from table inside txn --- synapse/storage/stats.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 727f60b3bd..a99637d4b4 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -254,10 +254,13 @@ class StatsStore(StateDeltasStore): {"room_id": room_id, "token": current_token}, ) + # We've finished a room. Delete it from the table. + self._simple_delete_one_txn( + txn, TEMP_TABLE + "_rooms", {"room_id": room_id}, + ) + yield self.runInteraction("update_room_stats", _fetch_data) - # We've finished a room. Delete it from the table. - yield self._simple_delete_one(TEMP_TABLE + "_rooms", {"room_id": room_id}) # Update the remaining counter. progress["remaining"] -= 1 yield self.runInteraction( From 5ac75fc9a2d80ddf2974d281c381f82515606403 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:26:55 +0100 Subject: [PATCH 255/429] Join against events to use its room_id index --- synapse/storage/events_worker.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index b56c83e460..1782428048 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -618,7 +618,12 @@ class EventsWorkerStore(SQLBaseStore): """ See get_total_state_event_counts. """ - sql = "SELECT COUNT(*) FROM state_events WHERE room_id=?" + # We join against the events table as that has an index on room_id + sql = """ + SELECT COUNT(*) FROM state_events + INNER JOIN events USING (room_id, event_id) + WHERE room_id=? + """ txn.execute(sql, (room_id,)) row = txn.fetchone() return row[0] if row else 0 From 8824325b829baa5262242a50d0ea2c9b738feb78 Mon Sep 17 00:00:00 2001 From: Eisha Chen-yen-su Date: Thu, 30 May 2019 16:58:53 +0200 Subject: [PATCH 256/429] Fix ignored filter field in `/messages` endpoint This fixes a bug which were causing the "event_format" field to be ignored in the filter of requests to the `/messages` endpoint of the CS API. Signed-off-by: Eisha Chen-yen-su --- synapse/rest/client/v1/room.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 255a85c588..b92c6a9a9c 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -475,6 +475,8 @@ class RoomMessageListRestServlet(ClientV1RestServlet): if filter_bytes: filter_json = urlparse.unquote(filter_bytes.decode("UTF-8")) event_filter = Filter(json.loads(filter_json)) + if event_filter.filter_json.get("event_format", "client") == "federation": + as_client_event = False else: event_filter = None msgs = yield self.pagination_handler.get_messages( From 0b6bc36402b747a6c1bad119aaffdcd326990346 Mon Sep 17 00:00:00 2001 From: Eisha Chen-yen-su Date: Thu, 30 May 2019 17:07:21 +0200 Subject: [PATCH 257/429] Add changelog --- changelog.d/5293.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5293.bugfix diff --git a/changelog.d/5293.bugfix b/changelog.d/5293.bugfix new file mode 100644 index 0000000000..aa519a8433 --- /dev/null +++ b/changelog.d/5293.bugfix @@ -0,0 +1 @@ +Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. From df9d9005448d837c5e1a2b75edb5730e2062b0f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:56:24 +0100 Subject: [PATCH 258/429] Correctly filter out extremities with soft failed prevs (#5274) When we receive a soft failed event we, correctly, *do not* update the forward extremity table with the event. However, if we later receive an event that references the soft failed event we then need to remove the soft failed events prev events from the forward extremities table, otherwise we just build up forward extremities. Fixes #5269 --- changelog.d/5274.bugfix | 1 + synapse/storage/events.py | 82 +++++++++++++++++++++++++++++++++++++-- 2 files changed, 80 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5274.bugfix diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5274.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 2ffc27ff41..6e9f3d1dc0 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -554,10 +554,18 @@ class EventsStore( e_id for event in new_events for e_id in event.prev_event_ids() ) - # Finally, remove any events which are prev_events of any existing events. + # Remove any events which are prev_events of any existing events. existing_prevs = yield self._get_events_which_are_prevs(result) result.difference_update(existing_prevs) + # Finally handle the case where the new events have soft-failed prev + # events. If they do we need to remove them and their prev events, + # otherwise we end up with dangling extremities. + existing_prevs = yield self._get_prevs_before_rejected( + e_id for event in new_events for e_id in event.prev_event_ids() + ) + result.difference_update(existing_prevs) + defer.returnValue(result) @defer.inlineCallbacks @@ -573,7 +581,7 @@ class EventsStore( """ results = [] - def _get_events(txn, batch): + def _get_events_which_are_prevs_txn(txn, batch): sql = """ SELECT prev_event_id, internal_metadata FROM event_edges @@ -596,10 +604,78 @@ class EventsStore( ) for chunk in batch_iter(event_ids, 100): - yield self.runInteraction("_get_events_which_are_prevs", _get_events, chunk) + yield self.runInteraction( + "_get_events_which_are_prevs", + _get_events_which_are_prevs_txn, + chunk, + ) defer.returnValue(results) + @defer.inlineCallbacks + def _get_prevs_before_rejected(self, event_ids): + """Get soft-failed ancestors to remove from the extremities. + + Given a set of events, find all those that have been soft-failed or + rejected. Returns those soft failed/rejected events and their prev + events (whether soft-failed/rejected or not), and recurses up the + prev-event graph until it finds no more soft-failed/rejected events. + + This is used to find extremities that are ancestors of new events, but + are separated by soft failed events. + + Args: + event_ids (Iterable[str]): Events to find prev events for. Note + that these must have already been persisted. + + Returns: + Deferred[set[str]] + """ + + # The set of event_ids to return. This includes all soft-failed events + # and their prev events. + existing_prevs = set() + + def _get_prevs_before_rejected_txn(txn, batch): + to_recursively_check = batch + + while to_recursively_check: + sql = """ + SELECT + event_id, prev_event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + LEFT JOIN rejections USING (event_id) + LEFT JOIN event_json USING (event_id) + WHERE + event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_recursively_check), + ) + + txn.execute(sql, to_recursively_check) + to_recursively_check = [] + + for event_id, prev_event_id, metadata, rejected in txn: + if prev_event_id in existing_prevs: + continue + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + to_recursively_check.append(prev_event_id) + existing_prevs.add(prev_event_id) + + for chunk in batch_iter(event_ids, 100): + yield self.runInteraction( + "_get_prevs_before_rejected", + _get_prevs_before_rejected_txn, + chunk, + ) + + defer.returnValue(existing_prevs) + @defer.inlineCallbacks def _get_new_state_after_events( self, room_id, events_context, old_latest_event_ids, new_latest_event_ids From 6ebc08c09d4ced251750cb087aa4689f90cdd4b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:52:41 +0100 Subject: [PATCH 259/429] Add DB bg update to cleanup extremities. Due to #5269 we may have extremities in our DB that we shouldn't have, so lets add a cleanup task such to remove those. --- synapse/storage/events.py | 186 ++++++++++++++++++ .../delta/54/delete_forward_extremities.sql | 19 ++ 2 files changed, 205 insertions(+) create mode 100644 synapse/storage/schema/delta/54/delete_forward_extremities.sql diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 6e9f3d1dc0..a9be143bd5 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -221,6 +221,7 @@ class EventsStore( ): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) @@ -252,6 +253,11 @@ class EventsStore( psql_only=True, ) + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() @@ -2341,6 +2347,186 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their prev events, if any, and their prev events rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph.setdefault(event_id, set()).add(prev_event_id) + logger.info("Already handled") + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + logger.info("Deleting up to %d forward extremities", len(to_delete)) + + self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + if to_delete: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + for row in rows: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (row["room_id"],) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) + AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql new file mode 100644 index 0000000000..7056bd1d00 --- /dev/null +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('delete_soft_failed_extremities', '{}'); + +CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 1d818fde14595299de9e13008c67239ca677014f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 11:58:32 +0100 Subject: [PATCH 260/429] Log actual number of entries deleted --- synapse/storage/_base.py | 12 +++++++++--- synapse/storage/events.py | 6 ++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index fa6839ceca..3fe827cd43 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -1261,7 +1261,8 @@ class SQLBaseStore(object): " AND ".join("%s = ?" % (k,) for k in keyvalues), ) - return txn.execute(sql, list(keyvalues.values())) + txn.execute(sql, list(keyvalues.values())) + return txn.rowcount def _simple_delete_many(self, table, column, iterable, keyvalues, desc): return self.runInteraction( @@ -1280,9 +1281,12 @@ class SQLBaseStore(object): column : column name to test for inclusion against `iterable` iterable : list keyvalues : dict of column names and values to select the rows with + + Returns: + int: Number rows deleted """ if not iterable: - return + return 0 sql = "DELETE FROM %s" % table @@ -1297,7 +1301,9 @@ class SQLBaseStore(object): if clauses: sql = "%s WHERE %s" % (sql, " AND ".join(clauses)) - return txn.execute(sql, values) + txn.execute(sql, values) + + return txn.rowcount def _get_cache_dict( self, db_conn, table, entity_column, stream_column, max_value, limit=100000 diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9be143bd5..a9664928ca 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2476,7 +2476,7 @@ class EventsStore( logger.info("Deleting up to %d forward extremities", len(to_delete)) - self._simple_delete_many_txn( + deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", column="event_id", @@ -2484,7 +2484,9 @@ class EventsStore( keyvalues={}, ) - if to_delete: + logger.info("Deleted %d forward extremities", deleted) + + if deleted: # We now need to invalidate the caches of these rooms rows = self._simple_select_many_txn( txn, From 6574d4ad0a95e1d5a709e5d1d61555561dc180f2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 May 2019 14:19:11 +0100 Subject: [PATCH 261/429] Add test --- tests/storage/test_cleanup_extrems.py | 248 ++++++++++++++++++++++++++ 1 file changed, 248 insertions(+) create mode 100644 tests/storage/test_cleanup_extrems.py diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py new file mode 100644 index 0000000000..6dda66ecd3 --- /dev/null +++ b/tests/storage/test_cleanup_extrems.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os.path + +from synapse.api.constants import EventTypes +from synapse.storage import prepare_database +from synapse.types import Requester, UserID + +from tests.unittest import HomeserverTestCase + + +class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): + """Test the background update to clean forward extremities table. + """ + + def prepare(self, reactor, clock, homeserver): + self.store = homeserver.get_datastore() + self.event_creator = homeserver.get_event_creation_handler() + self.room_creator = homeserver.get_room_creation_handler() + + # Create a test user and room + self.user = UserID("alice", "test") + self.requester = Requester(self.user, None, False, None, None) + info = self.get_success(self.room_creator.create_room(self.requester, {})) + self.room_id = info["room_id"] + + def create_and_send_event(self, soft_failed=False, prev_event_ids=None): + """Create and send an event. + + Args: + soft_failed (bool): Whether to create a soft failed event or not + prev_event_ids (list[str]|None): Explicitly set the prev events, + or if None just use the default + + Returns: + str: The new event's ID. + """ + prev_events_and_hashes = None + if prev_event_ids: + prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] + + event, context = self.get_success( + self.event_creator.create_event( + self.requester, + { + "type": EventTypes.Message, + "room_id": self.room_id, + "sender": self.user.to_string(), + "content": {"body": "", "msgtype": "m.text"}, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) + ) + + if soft_failed: + event.internal_metadata.soft_failed = True + + self.get_success( + self.event_creator.send_nonmember_event(self.requester, event, context) + ) + + return event.event_id + + def add_extremity(self, event_id): + """Add the given event as an extremity to the room. + """ + self.get_success( + self.store._simple_insert( + table="event_forward_extremities", + values={"room_id": self.room_id, "event_id": event_id}, + desc="test_add_extremity", + ) + ) + + self.store.get_latest_event_ids_in_room.invalidate((self.room_id,)) + + def run_background_update(self): + """Re run the background update to clean up the extremities. + """ + # Make sure we don't clash with in progress updates. + self.assertTrue(self.store._all_done, "Background updates are still ongoing") + + schema_path = os.path.join( + prepare_database.dir_path, + "schema", + "delta", + "54", + "delete_forward_extremities.sql", + ) + + def run_delta_file(txn): + prepare_database.executescript(txn, schema_path) + + self.get_success( + self.store.runInteraction("test_delete_forward_extremities", run_delta_file) + ) + + # Ugh, have to reset this flag + self.store._all_done = False + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + def test_soft_failed_extremities_handled_correctly(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed. + """ + + # Create the room graph + event_id_1 = self.create_and_send_event() + event_id_2 = self.create_and_send_event(True, [event_id_1]) + event_id_3 = self.create_and_send_event(True, [event_id_2]) + event_id_4 = self.create_and_send_event(False, [event_id_3]) + + # Check the latest events are as expected + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + + self.assertEqual(latest_event_ids, [event_id_4]) + + def test_basic_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_b = self.create_and_send_event(False, [event_id_sf1]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_chain_of_fail_cleanup(self): + """Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like: + + A <- SF1 <- SF2 <- B + + Where SF* are soft failed, and with extremities of A and B + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_sf1]) + event_id_b = self.create_and_send_event(False, [event_id_sf2]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set((event_id_a, event_id_b))) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(latest_event_ids, [event_id_b]) + + def test_forked_graph_cleanup(self): + r"""Test that extremities are correctly calculated in the presence of + soft failed events. + + Tests a graph like, where time flows down the page: + + A B + / \ / + / \ / + SF1 SF2 + | | + SF3 | + / \ | + | \ | + C SF4 + + Where SF* are soft failed, and with them A, B and C marked as + extremities. This should resolve to B and C being marked as extremity. + """ + # Create the room graph + event_id_a = self.create_and_send_event() + event_id_b = self.create_and_send_event() + event_id_sf1 = self.create_and_send_event(True, [event_id_a]) + event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b]) + event_id_sf3 = self.create_and_send_event(True, [event_id_sf1]) + self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4 + event_id_c = self.create_and_send_event(False, [event_id_sf3]) + + # Add the new extremity and check the latest events are as expected + self.add_extremity(event_id_a) + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual( + set(latest_event_ids), set((event_id_a, event_id_b, event_id_c)) + ) + + # Run the background update and check it did the right thing + self.run_background_update() + + latest_event_ids = self.get_success( + self.store.get_latest_event_ids_in_room(self.room_id) + ) + self.assertEqual(set(latest_event_ids), set([event_id_b, event_id_c])) From 9f5268388abb266260c30e9855da35a0b6b11bcb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 May 2019 18:56:02 +0100 Subject: [PATCH 262/429] Newsfile --- changelog.d/5278.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5278.bugfix diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5278.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 9b8cd66524304f76209a59e12f4eca561b1a43d3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 10:55:55 +0100 Subject: [PATCH 263/429] Fixup comments and logging --- synapse/storage/events.py | 21 +++++++++++-------- .../delta/54/delete_forward_extremities.sql | 3 +++ 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index a9664928ca..418d88b8dc 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -2387,7 +2387,8 @@ class EventsStore( soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their prev events, if any, and their prev events rejection status. + # their successor events, if any, and their successor events + # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, rejections.event_id IS NOT NULL, events.outlier @@ -2450,11 +2451,10 @@ class EventsStore( if event_id in graph: # Already handled this event previously, but we still # want to record the edge. - graph.setdefault(event_id, set()).add(prev_event_id) - logger.info("Already handled") + graph[event_id].add(prev_event_id) continue - graph.setdefault(event_id, set()).add(prev_event_id) + graph[event_id] = {prev_event_id} soft_failed = json.loads(metadata).get("soft_failed") if soft_failed or rejected: @@ -2474,8 +2474,6 @@ class EventsStore( to_delete.intersection_update(original_set) - logger.info("Deleting up to %d forward extremities", len(to_delete)) - deleted = self._simple_delete_many_txn( txn=txn, table="event_forward_extremities", @@ -2484,7 +2482,11 @@ class EventsStore( keyvalues={}, ) - logger.info("Deleted %d forward extremities", deleted) + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) if deleted: # We now need to invalidate the caches of these rooms @@ -2496,10 +2498,11 @@ class EventsStore( keyvalues={}, retcols=("room_id",) ) - for row in rows: + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: txn.call_after( self.get_latest_event_ids_in_room.invalidate, - (row["room_id"],) + (room_id,) ) self._simple_delete_many_txn( diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index 7056bd1d00..aa40f13da7 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -13,7 +13,10 @@ * limitations under the License. */ +-- Start a background job to cleanup extremities that were incorrectly added +-- by bug #5269. INSERT INTO background_updates (update_name, progress_json) VALUES ('delete_soft_failed_extremities', '{}'); +DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; From 98f438b52a93b1ce9d1f3e93fa57db0f870f9101 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:22:59 +0100 Subject: [PATCH 264/429] Move event background updates to a separate file --- synapse/storage/__init__.py | 2 + synapse/storage/events.py | 371 +------------------------ synapse/storage/events_bg_updates.py | 401 +++++++++++++++++++++++++++ 3 files changed, 405 insertions(+), 369 deletions(-) create mode 100644 synapse/storage/events_bg_updates.py diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 7522d3fd57..56c434d4e8 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -36,6 +36,7 @@ from .engines import PostgresEngine from .event_federation import EventFederationStore from .event_push_actions import EventPushActionsStore from .events import EventsStore +from .events_bg_updates import EventsBackgroundUpdatesStore from .filtering import FilteringStore from .group_server import GroupServerStore from .keys import KeyStore @@ -65,6 +66,7 @@ logger = logging.getLogger(__name__) class DataStore( + EventsBackgroundUpdatesStore, RoomMemberStore, RoomStore, RegistrationStore, diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 418d88b8dc..f9162be9b9 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -219,47 +220,11 @@ class EventsStore( EventsWorkerStore, BackgroundUpdateStore, ): - EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" - EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) - self.register_background_update_handler( - self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts - ) - self.register_background_update_handler( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, - self._background_reindex_fields_sender, - ) - - self.register_background_index_update( - "event_contains_url_index", - index_name="event_contains_url_index", - table="events", - columns=["room_id", "topological_ordering", "stream_ordering"], - where_clause="contains_url = true AND outlier = false", - ) - - # an event_id index on event_search is useful for the purge_history - # api. Plus it means we get to enforce some integrity with a UNIQUE - # clause - self.register_background_index_update( - "event_search_event_id_idx", - index_name="event_search_event_id_idx", - table="event_search", - columns=["event_id"], - unique=True, - psql_only=True, - ) - - self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, - self._cleanup_extremities_bg_update, - ) self._event_persist_queue = _EventPeristenceQueue() - self._state_resolution_handler = hs.get_state_resolution_handler() @defer.inlineCallbacks @@ -1585,153 +1550,6 @@ class EventsStore( ret = yield self.runInteraction("count_daily_active_rooms", _count) defer.returnValue(ret) - @defer.inlineCallbacks - def _background_reindex_fields_sender(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_txn(txn): - sql = ( - "SELECT stream_ordering, event_id, json FROM events" - " INNER JOIN event_json USING (event_id)" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - - update_rows = [] - for row in rows: - try: - event_id = row[1] - event_json = json.loads(row[2]) - sender = event_json["sender"] - content = event_json["content"] - - contains_url = "url" in content - if contains_url: - contains_url &= isinstance(content["url"], text_type) - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - update_rows.append((sender, contains_url, event_id)) - - sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" - - for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): - clump = update_rows[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows), - } - - self._background_update_progress_txn( - txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress - ) - - return len(rows) - - result = yield self.runInteraction( - self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) - - defer.returnValue(result) - - @defer.inlineCallbacks - def _background_reindex_origin_server_ts(self, progress, batch_size): - target_min_stream_id = progress["target_min_stream_id_inclusive"] - max_stream_id = progress["max_stream_id_exclusive"] - rows_inserted = progress.get("rows_inserted", 0) - - INSERT_CLUMP_SIZE = 1000 - - def reindex_search_txn(txn): - sql = ( - "SELECT stream_ordering, event_id FROM events" - " WHERE ? <= stream_ordering AND stream_ordering < ?" - " ORDER BY stream_ordering DESC" - " LIMIT ?" - ) - - txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) - - rows = txn.fetchall() - if not rows: - return 0 - - min_stream_id = rows[-1][0] - event_ids = [row[1] for row in rows] - - rows_to_update = [] - - chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] - for chunk in chunks: - ev_rows = self._simple_select_many_txn( - txn, - table="event_json", - column="event_id", - iterable=chunk, - retcols=["event_id", "json"], - keyvalues={}, - ) - - for row in ev_rows: - event_id = row["event_id"] - event_json = json.loads(row["json"]) - try: - origin_server_ts = event_json["origin_server_ts"] - except (KeyError, AttributeError): - # If the event is missing a necessary field then - # skip over it. - continue - - rows_to_update.append((origin_server_ts, event_id)) - - sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" - - for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): - clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] - txn.executemany(sql, clump) - - progress = { - "target_min_stream_id_inclusive": target_min_stream_id, - "max_stream_id_exclusive": min_stream_id, - "rows_inserted": rows_inserted + len(rows_to_update), - } - - self._background_update_progress_txn( - txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress - ) - - return len(rows_to_update) - - result = yield self.runInteraction( - self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn - ) - - if not result: - yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) - - defer.returnValue(result) - def get_current_backfill_token(self): """The current minimum token that backfilled events have reached""" return -self._backfill_id_gen.get_current_token() @@ -2347,191 +2165,6 @@ class EventsStore( get_all_updated_current_state_deltas_txn, ) - @defer.inlineCallbacks - def _cleanup_extremities_bg_update(self, progress, batch_size): - """Background update to clean out extremities that should have been - deleted previously. - - Mainly used to deal with the aftermath of #5269. - """ - - # This works by first copying all existing forward extremities into the - # `_extremities_to_check` table at start up, and then checking each - # event in that table whether we have any descendants that are not - # soft-failed/rejected. If that is the case then we delete that event - # from the forward extremities table. - # - # For efficiency, we do this in batches by recursively pulling out all - # descendants of a batch until we find the non soft-failed/rejected - # events, i.e. the set of descendants whose chain of prev events back - # to the batch of extremities are all soft-failed or rejected. - # Typically, we won't find any such events as extremities will rarely - # have any descendants, but if they do then we should delete those - # extremities. - - def _cleanup_extremities_bg_update_txn(txn): - # The set of extremity event IDs that we're checking this round - original_set = set() - - # A dict[str, set[str]] of event ID to their prev events. - graph = {} - - # The set of descendants of the original set that are not rejected - # nor soft-failed. Ancestors of these events should be removed - # from the forward extremities table. - non_rejected_leaves = set() - - # Set of event IDs that have been soft failed, and for which we - # should check if they have descendants which haven't been soft - # failed. - soft_failed_events_to_lookup = set() - - # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events - # rejection status. - txn.execute( - """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL, events.outlier - FROM ( - SELECT event_id AS prev_event_id - FROM _extremities_to_check - LIMIT ? - ) AS f - LEFT JOIN event_edges USING (prev_event_id) - LEFT JOIN events USING (event_id) - LEFT JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - """, (batch_size,) - ) - - for prev_event_id, event_id, metadata, rejected, outlier in txn: - original_set.add(prev_event_id) - - if not event_id or outlier: - # Common case where the forward extremity doesn't have any - # descendants. - continue - - graph.setdefault(event_id, set()).add(prev_event_id) - - soft_failed = False - if metadata: - soft_failed = json.loads(metadata).get("soft_failed") - - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # Now we recursively check all the soft-failed descendants we - # found above in the same way, until we have nothing left to - # check. - while soft_failed_events_to_lookup: - # We only want to do 100 at a time, so we split given list - # into two. - batch = list(soft_failed_events_to_lookup) - to_check, to_defer = batch[:100], batch[100:] - soft_failed_events_to_lookup = set(to_defer) - - sql = """SELECT prev_event_id, event_id, internal_metadata, - rejections.event_id IS NOT NULL - FROM event_edges - INNER JOIN events USING (event_id) - INNER JOIN event_json USING (event_id) - LEFT JOIN rejections USING (event_id) - WHERE - prev_event_id IN (%s) - AND NOT events.outlier - """ % ( - ",".join("?" for _ in to_check), - ) - txn.execute(sql, to_check) - - for prev_event_id, event_id, metadata, rejected in txn: - if event_id in graph: - # Already handled this event previously, but we still - # want to record the edge. - graph[event_id].add(prev_event_id) - continue - - graph[event_id] = {prev_event_id} - - soft_failed = json.loads(metadata).get("soft_failed") - if soft_failed or rejected: - soft_failed_events_to_lookup.add(event_id) - else: - non_rejected_leaves.add(event_id) - - # We have a set of non-soft-failed descendants, so we recurse up - # the graph to find all ancestors and add them to the set of event - # IDs that we can delete from forward extremities table. - to_delete = set() - while non_rejected_leaves: - event_id = non_rejected_leaves.pop() - prev_event_ids = graph.get(event_id, set()) - non_rejected_leaves.update(prev_event_ids) - to_delete.update(prev_event_ids) - - to_delete.intersection_update(original_set) - - deleted = self._simple_delete_many_txn( - txn=txn, - table="event_forward_extremities", - column="event_id", - iterable=to_delete, - keyvalues={}, - ) - - logger.info( - "Deleted %d forward extremities of %d checked, to clean up #5269", - deleted, - len(original_set), - ) - - if deleted: - # We now need to invalidate the caches of these rooms - rows = self._simple_select_many_txn( - txn, - table="events", - column="event_id", - iterable=to_delete, - keyvalues={}, - retcols=("room_id",) - ) - room_ids = set(row["room_id"] for row in rows) - for room_id in room_ids: - txn.call_after( - self.get_latest_event_ids_in_room.invalidate, - (room_id,) - ) - - self._simple_delete_many_txn( - txn=txn, - table="_extremities_to_check", - column="event_id", - iterable=original_set, - keyvalues={}, - ) - - return len(original_set) - - num_handled = yield self.runInteraction( - "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, - ) - - if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) - - def _drop_table_txn(txn): - txn.execute("DROP TABLE _extremities_to_check") - - yield self.runInteraction( - "_cleanup_extremities_bg_update_drop_table", - _drop_table_txn, - ) - - defer.returnValue(num_handled) - AllNewEventsResult = namedtuple( "AllNewEventsResult", diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py new file mode 100644 index 0000000000..2eba106abf --- /dev/null +++ b/synapse/storage/events_bg_updates.py @@ -0,0 +1,401 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from six import text_type + +from canonicaljson import json + +from twisted.internet import defer + +from synapse.storage.background_updates import BackgroundUpdateStore + +logger = logging.getLogger(__name__) + + +class EventsBackgroundUpdatesStore(BackgroundUpdateStore): + + EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" + EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" + EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + + def __init__(self, db_conn, hs): + super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) + + self.register_background_update_handler( + self.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts + ) + self.register_background_update_handler( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, + self._background_reindex_fields_sender, + ) + + self.register_background_index_update( + "event_contains_url_index", + index_name="event_contains_url_index", + table="events", + columns=["room_id", "topological_ordering", "stream_ordering"], + where_clause="contains_url = true AND outlier = false", + ) + + # an event_id index on event_search is useful for the purge_history + # api. Plus it means we get to enforce some integrity with a UNIQUE + # clause + self.register_background_index_update( + "event_search_event_id_idx", + index_name="event_search_event_id_idx", + table="event_search", + columns=["event_id"], + unique=True, + psql_only=True, + ) + + self.register_background_update_handler( + self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self._cleanup_extremities_bg_update, + ) + + @defer.inlineCallbacks + def _background_reindex_fields_sender(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_txn(txn): + sql = ( + "SELECT stream_ordering, event_id, json FROM events" + " INNER JOIN event_json USING (event_id)" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + + update_rows = [] + for row in rows: + try: + event_id = row[1] + event_json = json.loads(row[2]) + sender = event_json["sender"] + content = event_json["content"] + + contains_url = "url" in content + if contains_url: + contains_url &= isinstance(content["url"], text_type) + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + update_rows.append((sender, contains_url, event_id)) + + sql = "UPDATE events SET sender = ?, contains_url = ? WHERE event_id = ?" + + for index in range(0, len(update_rows), INSERT_CLUMP_SIZE): + clump = update_rows[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows), + } + + self._background_update_progress_txn( + txn, self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, progress + ) + + return len(rows) + + result = yield self.runInteraction( + self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME, reindex_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_FIELDS_SENDER_URL_UPDATE_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _background_reindex_origin_server_ts(self, progress, batch_size): + target_min_stream_id = progress["target_min_stream_id_inclusive"] + max_stream_id = progress["max_stream_id_exclusive"] + rows_inserted = progress.get("rows_inserted", 0) + + INSERT_CLUMP_SIZE = 1000 + + def reindex_search_txn(txn): + sql = ( + "SELECT stream_ordering, event_id FROM events" + " WHERE ? <= stream_ordering AND stream_ordering < ?" + " ORDER BY stream_ordering DESC" + " LIMIT ?" + ) + + txn.execute(sql, (target_min_stream_id, max_stream_id, batch_size)) + + rows = txn.fetchall() + if not rows: + return 0 + + min_stream_id = rows[-1][0] + event_ids = [row[1] for row in rows] + + rows_to_update = [] + + chunks = [event_ids[i : i + 100] for i in range(0, len(event_ids), 100)] + for chunk in chunks: + ev_rows = self._simple_select_many_txn( + txn, + table="event_json", + column="event_id", + iterable=chunk, + retcols=["event_id", "json"], + keyvalues={}, + ) + + for row in ev_rows: + event_id = row["event_id"] + event_json = json.loads(row["json"]) + try: + origin_server_ts = event_json["origin_server_ts"] + except (KeyError, AttributeError): + # If the event is missing a necessary field then + # skip over it. + continue + + rows_to_update.append((origin_server_ts, event_id)) + + sql = "UPDATE events SET origin_server_ts = ? WHERE event_id = ?" + + for index in range(0, len(rows_to_update), INSERT_CLUMP_SIZE): + clump = rows_to_update[index : index + INSERT_CLUMP_SIZE] + txn.executemany(sql, clump) + + progress = { + "target_min_stream_id_inclusive": target_min_stream_id, + "max_stream_id_exclusive": min_stream_id, + "rows_inserted": rows_inserted + len(rows_to_update), + } + + self._background_update_progress_txn( + txn, self.EVENT_ORIGIN_SERVER_TS_NAME, progress + ) + + return len(rows_to_update) + + result = yield self.runInteraction( + self.EVENT_ORIGIN_SERVER_TS_NAME, reindex_search_txn + ) + + if not result: + yield self._end_background_update(self.EVENT_ORIGIN_SERVER_TS_NAME) + + defer.returnValue(result) + + @defer.inlineCallbacks + def _cleanup_extremities_bg_update(self, progress, batch_size): + """Background update to clean out extremities that should have been + deleted previously. + + Mainly used to deal with the aftermath of #5269. + """ + + # This works by first copying all existing forward extremities into the + # `_extremities_to_check` table at start up, and then checking each + # event in that table whether we have any descendants that are not + # soft-failed/rejected. If that is the case then we delete that event + # from the forward extremities table. + # + # For efficiency, we do this in batches by recursively pulling out all + # descendants of a batch until we find the non soft-failed/rejected + # events, i.e. the set of descendants whose chain of prev events back + # to the batch of extremities are all soft-failed or rejected. + # Typically, we won't find any such events as extremities will rarely + # have any descendants, but if they do then we should delete those + # extremities. + + def _cleanup_extremities_bg_update_txn(txn): + # The set of extremity event IDs that we're checking this round + original_set = set() + + # A dict[str, set[str]] of event ID to their prev events. + graph = {} + + # The set of descendants of the original set that are not rejected + # nor soft-failed. Ancestors of these events should be removed + # from the forward extremities table. + non_rejected_leaves = set() + + # Set of event IDs that have been soft failed, and for which we + # should check if they have descendants which haven't been soft + # failed. + soft_failed_events_to_lookup = set() + + # First, we get `batch_size` events from the table, pulling out + # their successor events, if any, and their successor events + # rejection status. + txn.execute( + """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL, events.outlier + FROM ( + SELECT event_id AS prev_event_id + FROM _extremities_to_check + LIMIT ? + ) AS f + LEFT JOIN event_edges USING (prev_event_id) + LEFT JOIN events USING (event_id) + LEFT JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + """, (batch_size,) + ) + + for prev_event_id, event_id, metadata, rejected, outlier in txn: + original_set.add(prev_event_id) + + if not event_id or outlier: + # Common case where the forward extremity doesn't have any + # descendants. + continue + + graph.setdefault(event_id, set()).add(prev_event_id) + + soft_failed = False + if metadata: + soft_failed = json.loads(metadata).get("soft_failed") + + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # Now we recursively check all the soft-failed descendants we + # found above in the same way, until we have nothing left to + # check. + while soft_failed_events_to_lookup: + # We only want to do 100 at a time, so we split given list + # into two. + batch = list(soft_failed_events_to_lookup) + to_check, to_defer = batch[:100], batch[100:] + soft_failed_events_to_lookup = set(to_defer) + + sql = """SELECT prev_event_id, event_id, internal_metadata, + rejections.event_id IS NOT NULL + FROM event_edges + INNER JOIN events USING (event_id) + INNER JOIN event_json USING (event_id) + LEFT JOIN rejections USING (event_id) + WHERE + prev_event_id IN (%s) + AND NOT events.outlier + """ % ( + ",".join("?" for _ in to_check), + ) + txn.execute(sql, to_check) + + for prev_event_id, event_id, metadata, rejected in txn: + if event_id in graph: + # Already handled this event previously, but we still + # want to record the edge. + graph[event_id].add(prev_event_id) + continue + + graph[event_id] = {prev_event_id} + + soft_failed = json.loads(metadata).get("soft_failed") + if soft_failed or rejected: + soft_failed_events_to_lookup.add(event_id) + else: + non_rejected_leaves.add(event_id) + + # We have a set of non-soft-failed descendants, so we recurse up + # the graph to find all ancestors and add them to the set of event + # IDs that we can delete from forward extremities table. + to_delete = set() + while non_rejected_leaves: + event_id = non_rejected_leaves.pop() + prev_event_ids = graph.get(event_id, set()) + non_rejected_leaves.update(prev_event_ids) + to_delete.update(prev_event_ids) + + to_delete.intersection_update(original_set) + + deleted = self._simple_delete_many_txn( + txn=txn, + table="event_forward_extremities", + column="event_id", + iterable=to_delete, + keyvalues={}, + ) + + logger.info( + "Deleted %d forward extremities of %d checked, to clean up #5269", + deleted, + len(original_set), + ) + + if deleted: + # We now need to invalidate the caches of these rooms + rows = self._simple_select_many_txn( + txn, + table="events", + column="event_id", + iterable=to_delete, + keyvalues={}, + retcols=("room_id",) + ) + room_ids = set(row["room_id"] for row in rows) + for room_id in room_ids: + txn.call_after( + self.get_latest_event_ids_in_room.invalidate, + (room_id,) + ) + + self._simple_delete_many_txn( + txn=txn, + table="_extremities_to_check", + column="event_id", + iterable=original_set, + keyvalues={}, + ) + + return len(original_set) + + num_handled = yield self.runInteraction( + "_cleanup_extremities_bg_update", _cleanup_extremities_bg_update_txn, + ) + + if not num_handled: + yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + + def _drop_table_txn(txn): + txn.execute("DROP TABLE _extremities_to_check") + + yield self.runInteraction( + "_cleanup_extremities_bg_update_drop_table", + _drop_table_txn, + ) + + defer.returnValue(num_handled) From 7386c35f58f360269df1410b2d1ec6d179081b32 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 11:24:42 +0100 Subject: [PATCH 265/429] Rename constant --- synapse/storage/events_bg_updates.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 2eba106abf..22aac1393d 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -30,7 +30,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): EVENT_ORIGIN_SERVER_TS_NAME = "event_origin_server_ts" EVENT_FIELDS_SENDER_URL_UPDATE_NAME = "event_fields_sender_url" - EVENT_DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" + DELETE_SOFT_FAILED_EXTREMITIES = "delete_soft_failed_extremities" def __init__(self, db_conn, hs): super(EventsBackgroundUpdatesStore, self).__init__(db_conn, hs) @@ -64,7 +64,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) self.register_background_update_handler( - self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES, + self.DELETE_SOFT_FAILED_EXTREMITIES, self._cleanup_extremities_bg_update, ) @@ -388,7 +388,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) if not num_handled: - yield self._end_background_update(self.EVENT_DELETE_SOFT_FAILED_EXTREMITIES) + yield self._end_background_update(self.DELETE_SOFT_FAILED_EXTREMITIES) def _drop_table_txn(txn): txn.execute("DROP TABLE _extremities_to_check") From 06eb408da5c4ab52a3072dc6d76fe5ac3b9b1e83 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:06:42 +0100 Subject: [PATCH 266/429] Update synapse/storage/events_bg_updates.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/events_bg_updates.py b/synapse/storage/events_bg_updates.py index 22aac1393d..75c1935bf3 100644 --- a/synapse/storage/events_bg_updates.py +++ b/synapse/storage/events_bg_updates.py @@ -255,7 +255,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): soft_failed_events_to_lookup = set() # First, we get `batch_size` events from the table, pulling out - # their successor events, if any, and their successor events + # their successor events, if any, and the successor events' # rejection status. txn.execute( """SELECT prev_event_id, event_id, internal_metadata, From e2c3660a0ffb13d4198893e91a90ae1abcad8915 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 14:54:56 +0100 Subject: [PATCH 267/429] Add index to temp table --- synapse/storage/schema/delta/54/delete_forward_extremities.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/delta/54/delete_forward_extremities.sql b/synapse/storage/schema/delta/54/delete_forward_extremities.sql index aa40f13da7..b062ec840c 100644 --- a/synapse/storage/schema/delta/54/delete_forward_extremities.sql +++ b/synapse/storage/schema/delta/54/delete_forward_extremities.sql @@ -20,3 +20,4 @@ INSERT INTO background_updates (update_name, progress_json) VALUES DROP TABLE IF EXISTS _extremities_to_check; -- To make this delta schema file idempotent. CREATE TABLE _extremities_to_check AS SELECT event_id FROM event_forward_extremities; +CREATE INDEX _extremities_to_check_id ON _extremities_to_check(event_id); From f5c7f90d7278f199523457ade9587d284b3ce39b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:05:26 +0100 Subject: [PATCH 268/429] Newsfile --- changelog.d/5291.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5291.bugfix diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix new file mode 100644 index 0000000000..9e14d20289 --- /dev/null +++ b/changelog.d/5291.bugfix @@ -0,0 +1 @@ +Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. From 9315802221ede86def56986d5c5303f649cd2fa9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 30 May 2019 16:28:02 +0100 Subject: [PATCH 269/429] fix changelog for 0.99.5.1 (#5270) --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6bdfdd6d70..350151b62c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,8 +1,7 @@ Synapse 0.99.5.1 (2019-05-22) ============================= -No significant changes. - +0.99.5.1 supersedes 0.99.5 due to malformed debian changelog - no functional changes. Synapse 0.99.5 (2019-05-22) =========================== From c831748f4d243d74e9a3fd2042bc2b35cc30f961 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 16:28:52 +0100 Subject: [PATCH 270/429] 0.99.5.2 --- CHANGES.md | 9 +++++++++ changelog.d/5274.bugfix | 1 - changelog.d/5278.bugfix | 1 - changelog.d/5291.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 6 files changed, 16 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/5274.bugfix delete mode 100644 changelog.d/5278.bugfix delete mode 100644 changelog.d/5291.bugfix diff --git a/CHANGES.md b/CHANGES.md index 350151b62c..0ffdf1aaef 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 0.99.5.2 (2019-05-30) +============================= + +Bugfixes +-------- + +- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291)) + + Synapse 0.99.5.1 (2019-05-22) ============================= diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5274.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5291.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/debian/changelog b/debian/changelog index 90c6b86c5b..6a1a72c0e3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (0.99.5.2) stable; urgency=medium + + * New synapse release 0.99.5.2. + + -- Synapse Packaging team Thu, 30 May 2019 16:28:07 +0100 + matrix-synapse-py3 (0.99.5.1) stable; urgency=medium * New synapse release 0.99.5.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 4f95778eea..d0e8d7c21b 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.5.1" +__version__ = "0.99.5.2" From 099829d5a95b913c47634d13391d6c9f200f0bde Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 1 Apr 2019 12:28:40 +0100 Subject: [PATCH 271/429] use attr.s for VerifyKeyRequest because namedtuple is awful --- changelog.d/5296.misc | 1 + synapse/crypto/keyring.py | 36 ++++++++++++++++++++---------------- 2 files changed, 21 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5296.misc diff --git a/changelog.d/5296.misc b/changelog.d/5296.misc new file mode 100644 index 0000000000..a038a6f7f6 --- /dev/null +++ b/changelog.d/5296.misc @@ -0,0 +1 @@ +Refactor keyring.VerifyKeyRequest to use attr.s. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index c63f106cf3..e1e026214f 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -15,12 +15,12 @@ # limitations under the License. import logging -from collections import namedtuple import six from six import raise_from from six.moves import urllib +import attr from signedjson.key import ( decode_verify_key_bytes, encode_verify_key_base64, @@ -57,22 +57,26 @@ from synapse.util.retryutils import NotRetryingDestination logger = logging.getLogger(__name__) -VerifyKeyRequest = namedtuple( - "VerifyRequest", ("server_name", "key_ids", "json_object", "deferred") -) -""" -A request for a verify key to verify a JSON object. +@attr.s(slots=True, cmp=False) +class VerifyKeyRequest(object): + """ + A request for a verify key to verify a JSON object. -Attributes: - server_name(str): The name of the server to verify against. - key_ids(set(str)): The set of key_ids to that could be used to verify the - JSON object - json_object(dict): The JSON object to verify. - deferred(Deferred[str, str, nacl.signing.VerifyKey]): - A deferred (server_name, key_id, verify_key) tuple that resolves when - a verify key has been fetched. The deferreds' callbacks are run with no - logcontext. -""" + Attributes: + server_name(str): The name of the server to verify against. + key_ids(set[str]): The set of key_ids to that could be used to verify the + JSON object + json_object(dict): The JSON object to verify. + deferred(Deferred[str, str, nacl.signing.VerifyKey]): + A deferred (server_name, key_id, verify_key) tuple that resolves when + a verify key has been fetched. The deferreds' callbacks are run with no + logcontext. + """ + + server_name = attr.ib() + key_ids = attr.ib() + json_object = attr.ib() + deferred = attr.ib() class KeyLookupError(ValueError): From a82c96b87fd6fb8b8c71cc34e6a712a12ff4222f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 9 Apr 2019 18:30:13 +0100 Subject: [PATCH 272/429] Rewrite get_server_verify_keys, again. Attempt to simplify the logic in get_server_verify_keys by splitting it into two methods. --- changelog.d/5299.misc | 1 + synapse/crypto/keyring.py | 101 ++++++++++++++++++++------------------ 2 files changed, 54 insertions(+), 48 deletions(-) create mode 100644 changelog.d/5299.misc diff --git a/changelog.d/5299.misc b/changelog.d/5299.misc new file mode 100644 index 0000000000..53297c768b --- /dev/null +++ b/changelog.d/5299.misc @@ -0,0 +1 @@ +Rewrite get_server_verify_keys, again. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index c63f106cf3..194867db03 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -270,59 +270,21 @@ class Keyring(object): verify_requests (list[VerifyKeyRequest]): list of verify requests """ + remaining_requests = set( + (rq for rq in verify_requests if not rq.deferred.called) + ) + @defer.inlineCallbacks def do_iterations(): with Measure(self.clock, "get_server_verify_keys"): - # dict[str, set(str)]: keys to fetch for each server - missing_keys = {} - for verify_request in verify_requests: - missing_keys.setdefault(verify_request.server_name, set()).update( - verify_request.key_ids - ) - for f in self._key_fetchers: - results = yield f.get_keys(missing_keys.items()) - - # We now need to figure out which verify requests we have keys - # for and which we don't - missing_keys = {} - requests_missing_keys = [] - for verify_request in verify_requests: - if verify_request.deferred.called: - # We've already called this deferred, which probably - # means that we've already found a key for it. - continue - - server_name = verify_request.server_name - - # see if any of the keys we got this time are sufficient to - # complete this VerifyKeyRequest. - result_keys = results.get(server_name, {}) - for key_id in verify_request.key_ids: - fetch_key_result = result_keys.get(key_id) - if fetch_key_result: - with PreserveLoggingContext(): - verify_request.deferred.callback( - ( - server_name, - key_id, - fetch_key_result.verify_key, - ) - ) - break - else: - # The else block is only reached if the loop above - # doesn't break. - missing_keys.setdefault(server_name, set()).update( - verify_request.key_ids - ) - requests_missing_keys.append(verify_request) - - if not missing_keys: - break + if not remaining_requests: + return + yield self._attempt_key_fetches_with_fetcher(f, remaining_requests) + # look for any requests which weren't satisfied with PreserveLoggingContext(): - for verify_request in requests_missing_keys: + for verify_request in remaining_requests: verify_request.deferred.errback( SynapseError( 401, @@ -333,13 +295,56 @@ class Keyring(object): ) def on_err(err): + # we don't really expect to get here, because any errors should already + # have been caught and logged. But if we do, let's log the error and make + # sure that all of the deferreds are resolved. + logger.error("Unexpected error in _get_server_verify_keys: %s", err) with PreserveLoggingContext(): - for verify_request in verify_requests: + for verify_request in remaining_requests: if not verify_request.deferred.called: verify_request.deferred.errback(err) run_in_background(do_iterations).addErrback(on_err) + @defer.inlineCallbacks + def _attempt_key_fetches_with_fetcher(self, fetcher, remaining_requests): + """Use a key fetcher to attempt to satisfy some key requests + + Args: + fetcher (KeyFetcher): fetcher to use to fetch the keys + remaining_requests (set[VerifyKeyRequest]): outstanding key requests. + Any successfully-completed requests will be reomved from the list. + """ + # dict[str, set(str)]: keys to fetch for each server + missing_keys = {} + for verify_request in remaining_requests: + # any completed requests should already have been removed + assert not verify_request.deferred.called + missing_keys.setdefault(verify_request.server_name, set()).update( + verify_request.key_ids + ) + + results = yield fetcher.get_keys(missing_keys.items()) + + completed = list() + for verify_request in remaining_requests: + server_name = verify_request.server_name + + # see if any of the keys we got this time are sufficient to + # complete this VerifyKeyRequest. + result_keys = results.get(server_name, {}) + for key_id in verify_request.key_ids: + key = result_keys.get(key_id) + if key: + with PreserveLoggingContext(): + verify_request.deferred.callback( + (server_name, key_id, key.verify_key) + ) + completed.append(verify_request) + break + + remaining_requests.difference_update(completed) + class KeyFetcher(object): def get_keys(self, server_name_and_key_ids): From 8ea2f756a947d668afc9a6b22707c12a29af6be4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 29 May 2019 17:21:39 +0100 Subject: [PATCH 273/429] Remove some pointless exception handling The verify_request deferred already returns a suitable SynapseError, so I don't really know what we expect to achieve by doing more wrapping, other than log spam. Fixes #4278. --- changelog.d/5300.bugfix | 1 + synapse/crypto/keyring.py | 33 ++++++++------------------------- 2 files changed, 9 insertions(+), 25 deletions(-) create mode 100644 changelog.d/5300.bugfix diff --git a/changelog.d/5300.bugfix b/changelog.d/5300.bugfix new file mode 100644 index 0000000000..049e93cd5a --- /dev/null +++ b/changelog.d/5300.bugfix @@ -0,0 +1 @@ +Fix noisy 'no key for server' logs. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e1e026214f..5756478ad7 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -64,13 +64,19 @@ class VerifyKeyRequest(object): Attributes: server_name(str): The name of the server to verify against. + key_ids(set[str]): The set of key_ids to that could be used to verify the JSON object + json_object(dict): The JSON object to verify. + deferred(Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no logcontext. + + If we are unable to find a key which satisfies the request, the deferred + errbacks with an M_UNAUTHORIZED SynapseError. """ server_name = attr.ib() @@ -771,31 +777,8 @@ def _handle_key_deferred(verify_request): SynapseError if there was a problem performing the verification """ server_name = verify_request.server_name - try: - with PreserveLoggingContext(): - _, key_id, verify_key = yield verify_request.deferred - except KeyLookupError as e: - logger.warn( - "Failed to download keys for %s: %s %s", - server_name, - type(e).__name__, - str(e), - ) - raise SynapseError( - 502, "Error downloading keys for %s" % (server_name,), Codes.UNAUTHORIZED - ) - except Exception as e: - logger.exception( - "Got Exception when downloading keys for %s: %s %s", - server_name, - type(e).__name__, - str(e), - ) - raise SynapseError( - 401, - "No key for %s with id %s" % (server_name, verify_request.key_ids), - Codes.UNAUTHORIZED, - ) + with PreserveLoggingContext(): + _, key_id, verify_key = yield verify_request.deferred json_object = verify_request.json_object From 3e1af5109cb91b8e22f0e14aee875f86bd9fcd92 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Fri, 31 May 2019 02:45:46 -0600 Subject: [PATCH 274/429] Clarify that the admin change password endpoint logs them out (#5303) --- changelog.d/5303.misc | 1 + docs/admin_api/user_admin_api.rst | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5303.misc diff --git a/changelog.d/5303.misc b/changelog.d/5303.misc new file mode 100644 index 0000000000..f6a7f1f8e3 --- /dev/null +++ b/changelog.d/5303.misc @@ -0,0 +1 @@ +Clarify that the admin change password API logs the user out. diff --git a/docs/admin_api/user_admin_api.rst b/docs/admin_api/user_admin_api.rst index 8aca4f158d..213359d0c0 100644 --- a/docs/admin_api/user_admin_api.rst +++ b/docs/admin_api/user_admin_api.rst @@ -69,7 +69,7 @@ An empty body may be passed for backwards compatibility. Reset password ============== -Changes the password of another user. +Changes the password of another user. This will automatically log the user out of all their devices. The api is:: From 847b9dcd1c9d7d7a43333e85f69dc78471095475 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:54:46 +0100 Subject: [PATCH 275/429] Make max_delta equal to period * 10% --- synapse/config/registration.py | 15 ++++----------- synapse/storage/_base.py | 7 +++---- tests/rest/client/v2_alpha/test_register.py | 18 +----------------- 3 files changed, 8 insertions(+), 32 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index b4fd4af368..1835b4b1f3 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -39,9 +39,7 @@ class AccountValidityConfig(Config): else: self.renew_email_subject = "Renew your %(app)s account" - self.startup_job_max_delta = self.parse_duration( - config.get("startup_job_max_delta", 0), - ) + self.startup_job_max_delta = self.period * 10. / 100. if self.renew_by_email_enabled and "public_baseurl" not in synapse_config: raise ConfigError("Can't send renewal emails without 'public_baseurl'") @@ -133,20 +131,15 @@ class RegistrationConfig(Config): # This means that, if a validity period is set, and Synapse is restarted (it will # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' - # expiration dates won't be updated unless their account is manually renewed. - # - # If set, the ``startup_job_max_delta`` optional setting will make the startup job - # described above set a random expiration date between t + period and - # t + period + startup_job_max_delta, t being the date and time at which the job - # sets the expiration date for a given user. This is useful for server admins that - # want to avoid Synapse sending a lot of renewal emails at once. + # expiration dates won't be updated unless their account is manually renewed. This + # date will be randomly selected within a range [now + period ; now + period + d], + # where d is equal to 10% of the validity period. # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %%(app)s account" - # startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 40802fd3dc..7f944ec717 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -329,14 +329,13 @@ class SQLBaseStore(object): user_id (str): User ID to set an expiration date for. use_delta (bool): If set to False, the expiration date for the user will be now + validity period. If set to True, this expiration date will be a - random value in the [now + period; now + period + max_delta] range, - max_delta being the configured value for the size of the range, unless - delta is 0, in which case it sets it to now + period. + random value in the [now + period; now + period + d] range, d being a + delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() expiration_ts = now_ms + self._account_validity.period - if use_delta and self._account_validity.startup_job_max_delta: + if use_delta: expiration_ts = self.rand.randrange( expiration_ts, expiration_ts + self._account_validity.startup_job_max_delta, diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 68654e25ab..711628ded1 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -436,7 +436,7 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.validity_period = 10 - self.max_delta = 10 + self.max_delta = self.validity_period * 10. / 100. config = self.default_config() @@ -453,22 +453,6 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): return self.hs def test_background_job(self): - """ - Tests whether the account validity startup background job does the right thing, - which is sticking an expiration date to every account that doesn't already have - one. - """ - user_id = self.register_user("kermit", "user") - - self.hs.config.account_validity.startup_job_max_delta = 0 - - now_ms = self.hs.clock.time_msec() - self.get_success(self.store._set_expiration_date_when_missing()) - - res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertEqual(res, now_ms + self.validity_period) - - def test_background_job_with_max_delta(self): """ Tests the same thing as test_background_job, except that it sets the startup_job_max_delta parameter and checks that the expiration date is within the From 0c2362861e3fad44ede5e9c23dbef8e1a9113f36 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:56:52 +0100 Subject: [PATCH 276/429] Gah python --- synapse/config/registration.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 1835b4b1f3..4af825a2ab 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -133,7 +133,7 @@ class RegistrationConfig(Config): # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This # date will be randomly selected within a range [now + period ; now + period + d], - # where d is equal to 10% of the validity period. + # where d is equal to 10%% of the validity period. # #account_validity: # enabled: True From 6bfc5ad3a189acd993a1ef9db36d28b963be345d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 09:56:57 +0100 Subject: [PATCH 277/429] Sample config --- docs/sample_config.yaml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8ff53d5cb4..13c0ddc7c5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -753,20 +753,15 @@ uploads_path: "DATADIR/uploads" # This means that, if a validity period is set, and Synapse is restarted (it will # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' -# expiration dates won't be updated unless their account is manually renewed. -# -# If set, the ``startup_job_max_delta`` optional setting will make the startup job -# described above set a random expiration date between t + period and -# t + period + startup_job_max_delta, t being the date and time at which the job -# sets the expiration date for a given user. This is useful for server admins that -# want to avoid Synapse sending a lot of renewal emails at once. +# expiration dates won't be updated unless their account is manually renewed. This +# date will be randomly selected within a range [now + period ; now + period + d], +# where d is equal to 10% of the validity period. # #account_validity: # enabled: True # period: 6w # renew_at: 1w # renew_email_subject: "Renew your %(app)s account" -# startup_job_max_delta: 2d # The user must provide all of the below types of 3PID when registering. # From 5037326d6624d1d1780a0536d19ff79e275f8735 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 15:37:57 +0100 Subject: [PATCH 278/429] Add indices. Remove room_ids accidentally added We have to do this by re-inserting a background update and recreating tables, as the tables only get created during a background update and will later be deleted. We also make sure that we remove any entries that should have been removed but weren't due to a race that has been fixed in a previous commit. --- synapse/storage/schema/delta/54/stats2.sql | 28 +++++++++++++++ synapse/storage/stats.py | 41 +++++++++++++++------- 2 files changed, 56 insertions(+), 13 deletions(-) create mode 100644 synapse/storage/schema/delta/54/stats2.sql diff --git a/synapse/storage/schema/delta/54/stats2.sql b/synapse/storage/schema/delta/54/stats2.sql new file mode 100644 index 0000000000..3b2d48447f --- /dev/null +++ b/synapse/storage/schema/delta/54/stats2.sql @@ -0,0 +1,28 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- This delta file gets run after `54/stats.sql` delta. + +-- We want to add some indices to the temporary stats table, so we re-insert +-- 'populate_stats_createtables' if we are still processing the rooms update. +INSERT INTO background_updates (update_name, progress_json) + SELECT 'populate_stats_createtables', '{}' + WHERE + 'populate_stats_process_rooms' IN ( + SELECT update_name FROM background_updates + ) + AND 'populate_stats_createtables' NOT IN ( -- don't insert if already exists + SELECT update_name FROM background_updates + ); diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index a99637d4b4..1c0b183a56 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -18,6 +18,7 @@ import logging from twisted.internet import defer from synapse.api.constants import EventTypes, Membership +from synapse.storage.prepare_database import get_statements from synapse.storage.state_deltas import StateDeltasStore from synapse.util.caches.descriptors import cached @@ -69,12 +70,25 @@ class StatsStore(StateDeltasStore): # Get all the rooms that we want to process. def _make_staging_area(txn): - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)" - ) - txn.execute(sql) + # Create the temporary tables + stmts = get_statements(""" + -- We just recreate the table, we'll be reinserting the + -- correct entries again later anyway. + DROP TABLE IF EXISTS {temp}_rooms; + + CREATE TABLE IF NOT EXISTS {temp}_rooms( + room_id TEXT NOT NULL, + events BIGINT NOT NULL + ); + + CREATE INDEX {temp}_rooms_events + ON {temp}_rooms(events); + CREATE INDEX {temp}_rooms_id + ON {temp}_rooms(room_id); + """.format(temp=TEMP_TABLE).splitlines()) + + for statement in stmts: + txn.execute(statement) sql = ( "CREATE TABLE IF NOT EXISTS " @@ -83,15 +97,16 @@ class StatsStore(StateDeltasStore): ) txn.execute(sql) - # Get rooms we want to process from the database + # Get rooms we want to process from the database, only adding + # those that we haven't (i.e. those not in room_stats_earliest_token) sql = """ - SELECT room_id, count(*) FROM current_state_events - GROUP BY room_id - """ + INSERT INTO %s_rooms (room_id, events) + SELECT c.room_id, count(*) FROM current_state_events AS c + LEFT JOIN room_stats_earliest_token AS t USING (room_id) + WHERE t.room_id IS NULL + GROUP BY c.room_id + """ % (TEMP_TABLE,) txn.execute(sql) - rooms = [{"room_id": x[0], "events": x[1]} for x in txn.fetchall()] - self._simple_insert_many_txn(txn, TEMP_TABLE + "_rooms", rooms) - del rooms new_pos = yield self.get_max_stream_id_in_current_state_deltas() yield self.runInteraction("populate_stats_temp_build", _make_staging_area) From 39bbf6a4a5b954de56865a2aa0877587acbd9552 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 May 2019 16:07:23 +0100 Subject: [PATCH 279/429] Newsfile --- changelog.d/5294.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5294.bugfix diff --git a/changelog.d/5294.bugfix b/changelog.d/5294.bugfix new file mode 100644 index 0000000000..5924bda319 --- /dev/null +++ b/changelog.d/5294.bugfix @@ -0,0 +1 @@ +Fix performance problems with the rooms stats background update. From 4d794dae210ce30e87d8a6b9ee2f9b481cadf539 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 11:09:34 +0100 Subject: [PATCH 280/429] Move delta from +10% to -10% --- synapse/config/registration.py | 2 +- synapse/storage/_base.py | 4 ++-- tests/rest/client/v2_alpha/test_register.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 4af825a2ab..aad3400819 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -132,7 +132,7 @@ class RegistrationConfig(Config): # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This - # date will be randomly selected within a range [now + period ; now + period + d], + # date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10%% of the validity period. # #account_validity: diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 7f944ec717..086318a530 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -329,7 +329,7 @@ class SQLBaseStore(object): user_id (str): User ID to set an expiration date for. use_delta (bool): If set to False, the expiration date for the user will be now + validity period. If set to True, this expiration date will be a - random value in the [now + period; now + period + d] range, d being a + random value in the [now + period - d ; now + period] range, d being a delta equal to 10% of the validity period. """ now_ms = self._clock.time_msec() @@ -337,8 +337,8 @@ class SQLBaseStore(object): if use_delta: expiration_ts = self.rand.randrange( + expiration_ts - self._account_validity.startup_job_max_delta, expiration_ts, - expiration_ts + self._account_validity.startup_job_max_delta, ) self._simple_insert_txn( diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 711628ded1..0cb6a363d6 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -467,5 +467,5 @@ class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) - self.assertLessEqual(res, now_ms + self.validity_period + self.max_delta) - self.assertGreaterEqual(res, now_ms + self.validity_period) + self.assertGreaterEqual(res, now_ms + self.validity_period - self.max_delta) + self.assertLessEqual(res, now_ms + self.validity_period) From e975b15101c08299218bd15963a9dc5ea6f990ff Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 31 May 2019 11:14:21 +0100 Subject: [PATCH 281/429] Sample config --- docs/sample_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 13c0ddc7c5..9536681068 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -754,7 +754,7 @@ uploads_path: "DATADIR/uploads" # then derive an expiration date from the current validity period), and some time # after that the validity period changes and Synapse is restarted, the users' # expiration dates won't be updated unless their account is manually renewed. This -# date will be randomly selected within a range [now + period ; now + period + d], +# date will be randomly selected within a range [now + period - d ; now + period], # where d is equal to 10% of the validity period. # #account_validity: From 3600f5568b5f8c6902a0dbeeb349c1891f8114b9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 31 May 2019 15:48:36 +0100 Subject: [PATCH 282/429] Stop overwriting server keys with other keys Fix a bug where we would discard a key result which the origin server is no longer returning. Fixes #5305. --- changelog.d/5307.bugfix | 1 + synapse/crypto/keyring.py | 14 ++------------ 2 files changed, 3 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5307.bugfix diff --git a/changelog.d/5307.bugfix b/changelog.d/5307.bugfix new file mode 100644 index 0000000000..6b152f4854 --- /dev/null +++ b/changelog.d/5307.bugfix @@ -0,0 +1 @@ +Fix bug where a notary server would sometimes forget old keys. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5756478ad7..8f47469a1c 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -394,7 +394,7 @@ class BaseV2KeyFetcher(object): @defer.inlineCallbacks def process_v2_response( - self, from_server, response_json, time_added_ms, requested_ids=[] + self, from_server, response_json, time_added_ms ): """Parse a 'Server Keys' structure from the result of a /key request @@ -417,10 +417,6 @@ class BaseV2KeyFetcher(object): time_added_ms (int): the timestamp to record in server_keys_json - requested_ids (iterable[str]): a list of the key IDs that were requested. - We will store the json for these key ids as well as any that are - actually in the response - Returns: Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ @@ -476,11 +472,6 @@ class BaseV2KeyFetcher(object): signed_key_json_bytes = encode_canonical_json(signed_key_json) - # for reasons I don't quite understand, we store this json for the key ids we - # requested, as well as those we got. - updated_key_ids = set(requested_ids) - updated_key_ids.update(verify_keys) - yield logcontext.make_deferred_yieldable( defer.gatherResults( [ @@ -493,7 +484,7 @@ class BaseV2KeyFetcher(object): ts_expires_ms=ts_valid_until_ms, key_json_bytes=signed_key_json_bytes, ) - for key_id in updated_key_ids + for key_id in verify_keys ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -749,7 +740,6 @@ class ServerKeyFetcher(BaseV2KeyFetcher): response_keys = yield self.process_v2_response( from_server=server_name, - requested_ids=[requested_key_id], response_json=response, time_added_ms=time_now_ms, ) From d16c6375fe39deaafd70b151e496f5e15fd7b29c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 1 Jun 2019 10:42:33 +0100 Subject: [PATCH 283/429] Limit displaynames and avatar URLs These end up in join events everywhere, so let's limit them. Fixes #5079 --- changelog.d/5309.bugfix | 1 + synapse/handlers/profile.py | 13 +++++++++++++ synapse/handlers/register.py | 2 ++ 3 files changed, 16 insertions(+) create mode 100644 changelog.d/5309.bugfix diff --git a/changelog.d/5309.bugfix b/changelog.d/5309.bugfix new file mode 100644 index 0000000000..97b3527266 --- /dev/null +++ b/changelog.d/5309.bugfix @@ -0,0 +1 @@ +Prevent users from setting huge displaynames and avatar URLs. diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 91fc718ff8..a5fc6c5dbf 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -31,6 +31,9 @@ from ._base import BaseHandler logger = logging.getLogger(__name__) +MAX_DISPLAYNAME_LEN = 100 +MAX_AVATAR_URL_LEN = 1000 + class BaseProfileHandler(BaseHandler): """Handles fetching and updating user profile information. @@ -162,6 +165,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") + if len(new_displayname) > MAX_DISPLAYNAME_LEN: + raise SynapseError( + 400, "Displayname is too long (max %i)" % (MAX_DISPLAYNAME_LEN, ), + ) + if new_displayname == '': new_displayname = None @@ -217,6 +225,11 @@ class BaseProfileHandler(BaseHandler): if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") + if len(new_avatar_url) > MAX_AVATAR_URL_LEN: + raise SynapseError( + 400, "Avatar URL is too long (max %i)" % (MAX_AVATAR_URL_LEN, ), + ) + yield self.store.set_profile_avatar_url( target_user.localpart, new_avatar_url ) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e83ee24f10..9a388ea013 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -531,6 +531,8 @@ class RegistrationHandler(BaseHandler): A tuple of (user_id, access_token). Raises: RegistrationError if there was a problem registering. + + NB this is only used in tests. TODO: move it to the test package! """ if localpart is None: raise SynapseError(400, "Request must include user id") From 93003aa1720af846f238bd0c6fd2f2a0df3c20ef Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sat, 1 Jun 2019 11:13:49 +0100 Subject: [PATCH 284/429] add some tests --- tests/rest/client/v1/test_profile.py | 62 +++++++++++++++++++++++++++- 1 file changed, 60 insertions(+), 2 deletions(-) diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index 769c37ce52..f4d0d48dad 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -14,6 +14,8 @@ # limitations under the License. """Tests REST events for /profile paths.""" +import json + from mock import Mock from twisted.internet import defer @@ -31,8 +33,11 @@ myid = "@1234ABCD:test" PATH_PREFIX = "/_matrix/client/api/v1" -class ProfileTestCase(unittest.TestCase): - """ Tests profile management. """ +class MockHandlerProfileTestCase(unittest.TestCase): + """ Tests rest layer of profile management. + + Todo: move these into ProfileTestCase + """ @defer.inlineCallbacks def setUp(self): @@ -159,6 +164,59 @@ class ProfileTestCase(unittest.TestCase): self.assertEquals(mocked_set.call_args[0][2], "http://my.server/pic.gif") +class ProfileTestCase(unittest.HomeserverTestCase): + + servlets = [ + admin.register_servlets_for_client_rest_resource, + login.register_servlets, + profile.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + self.hs = self.setup_test_homeserver() + return self.hs + + def prepare(self, reactor, clock, hs): + self.owner = self.register_user("owner", "pass") + self.owner_tok = self.login("owner", "pass") + + def test_set_displayname(self): + request, channel = self.make_request( + "PUT", + "/profile/%s/displayname" % (self.owner, ), + content=json.dumps({"displayname": "test"}), + access_token=self.owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + res = self.get_displayname() + self.assertEqual(res, "test") + + def test_set_displayname_too_long(self): + """Attempts to set a stupid displayname should get a 400""" + request, channel = self.make_request( + "PUT", + "/profile/%s/displayname" % (self.owner, ), + content=json.dumps({"displayname": "test" * 100}), + access_token=self.owner_tok, + ) + self.render(request) + self.assertEqual(channel.code, 400, channel.result) + + res = self.get_displayname() + self.assertEqual(res, "owner") + + def get_displayname(self): + request, channel = self.make_request( + "GET", + "/profile/%s/displayname" % (self.owner, ), + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + return channel.json_body["displayname"] + + class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): servlets = [ From 220a733d7379be88514f7681ec37388755d4e612 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 09:56:45 +0100 Subject: [PATCH 285/429] Fix handling of failures when calling /event_auth. When processing an incoming event over federation, we may try and resolve any unexpected differences in auth events. This is a non-essential process and so should not stop the processing of the event if it fails (e.g. due to the remote disappearing or not implementing the necessary endpoints). Fixes #3330 --- synapse/handlers/federation.py | 50 ++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index cf4fad7de0..fa735efedd 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( CodeMessageException, FederationDeniedError, FederationError, + RequestSendFailed, StoreError, SynapseError, ) @@ -2027,9 +2028,15 @@ class FederationHandler(BaseHandler): """ room_version = yield self.store.get_room_version(event.room_id) - yield self._update_auth_events_and_context_for_auth( - origin, event, context, auth_events - ) + try: + yield self._update_auth_events_and_context_for_auth( + origin, event, context, auth_events + ) + except Exception: + # We don't really mind if the above fails, so lets not fail + # processing if it does. + logger.exception("Failed to call _update_auth_events_and_context_for_auth") + try: self.auth.check(room_version, event, auth_events=auth_events) except AuthError as e: @@ -2042,6 +2049,15 @@ class FederationHandler(BaseHandler): ): """Helper for do_auth. See there for docs. + Checks whether a given event has the expected auth events. If it + doesn't then we talk to the remote server to compare state to see if + we can come to a consensus (e.g. if one server missed some valid + state). + + This attempts to resovle any potential divergence of state between + servers, but is not essential and so failures should not block further + processing of the event. + Args: origin (str): event (synapse.events.EventBase): @@ -2088,9 +2104,14 @@ class FederationHandler(BaseHandler): missing_auth, ) try: - remote_auth_chain = yield self.federation_client.get_event_auth( - origin, event.room_id, event.event_id - ) + try: + remote_auth_chain = yield self.federation_client.get_event_auth( + origin, event.room_id, event.event_id + ) + except RequestSendFailed: + # The other side isn't around or doesn't implement the + # endpoint, so lets just bail out. + return seen_remotes = yield self.store.have_seen_events( [e.event_id for e in remote_auth_chain] @@ -2236,12 +2257,17 @@ class FederationHandler(BaseHandler): try: # 2. Get remote difference. - result = yield self.federation_client.query_auth( - origin, - event.room_id, - event.event_id, - local_auth_chain, - ) + try: + result = yield self.federation_client.query_auth( + origin, + event.room_id, + event.event_id, + local_auth_chain, + ) + except RequestSendFailed: + # The other side isn't around or doesn't implement the + # endpoint, so lets just bail out. + return seen_remotes = yield self.store.have_seen_events( [e.event_id for e in result["auth_chain"]] From fde37e4e98163c269a2b82e4892a70b2e37c619c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 10:22:03 +0100 Subject: [PATCH 286/429] Newsfile --- changelog.d/5317.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5317.bugfix diff --git a/changelog.d/5317.bugfix b/changelog.d/5317.bugfix new file mode 100644 index 0000000000..2709375214 --- /dev/null +++ b/changelog.d/5317.bugfix @@ -0,0 +1 @@ +Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. From 2889b055543c8db6bf93eaad7035d0eca1ec2874 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 21:28:59 +1000 Subject: [PATCH 287/429] Unify v1 and v2 REST client APIs (#5226) --- changelog.d/5226.misc | 1 + scripts-dev/list_url_patterns.py | 4 +- synapse/app/frontend_proxy.py | 11 +- synapse/rest/client/v1/base.py | 65 --------- synapse/rest/client/v1/directory.py | 28 ++-- synapse/rest/client/v1/events.py | 17 ++- synapse/rest/client/v1/initial_sync.py | 12 +- synapse/rest/client/v1/login.py | 22 +-- synapse/rest/client/v1/logout.py | 19 +-- synapse/rest/client/v1/presence.py | 13 +- synapse/rest/client/v1/profile.py | 29 ++-- synapse/rest/client/v1/push_rule.py | 12 +- synapse/rest/client/v1/pusher.py | 21 +-- synapse/rest/client/v1/room.py | 137 +++++++++++------- synapse/rest/client/v1/voip.py | 11 +- synapse/rest/client/v2_alpha/_base.py | 6 +- synapse/rest/client/v2_alpha/account.py | 20 +-- synapse/rest/client/v2_alpha/account_data.py | 6 +- .../rest/client/v2_alpha/account_validity.py | 6 +- synapse/rest/client/v2_alpha/auth.py | 4 +- synapse/rest/client/v2_alpha/capabilities.py | 4 +- synapse/rest/client/v2_alpha/devices.py | 8 +- synapse/rest/client/v2_alpha/filter.py | 6 +- synapse/rest/client/v2_alpha/groups.py | 50 +++---- synapse/rest/client/v2_alpha/keys.py | 10 +- synapse/rest/client/v2_alpha/notifications.py | 4 +- synapse/rest/client/v2_alpha/openid.py | 4 +- synapse/rest/client/v2_alpha/read_marker.py | 4 +- synapse/rest/client/v2_alpha/receipts.py | 4 +- synapse/rest/client/v2_alpha/register.py | 10 +- synapse/rest/client/v2_alpha/relations.py | 12 +- synapse/rest/client/v2_alpha/report_event.py | 4 +- synapse/rest/client/v2_alpha/room_keys.py | 8 +- .../v2_alpha/room_upgrade_rest_servlet.py | 4 +- synapse/rest/client/v2_alpha/sendtodevice.py | 4 +- synapse/rest/client/v2_alpha/sync.py | 4 +- synapse/rest/client/v2_alpha/tags.py | 6 +- synapse/rest/client/v2_alpha/thirdparty.py | 10 +- synapse/rest/client/v2_alpha/tokenrefresh.py | 4 +- .../rest/client/v2_alpha/user_directory.py | 4 +- tests/__init__.py | 2 +- tests/rest/admin/test_admin.py | 1 - tests/rest/client/v1/test_profile.py | 2 +- 43 files changed, 296 insertions(+), 317 deletions(-) create mode 100644 changelog.d/5226.misc delete mode 100644 synapse/rest/client/v1/base.py diff --git a/changelog.d/5226.misc b/changelog.d/5226.misc new file mode 100644 index 0000000000..e1b9dc58a3 --- /dev/null +++ b/changelog.d/5226.misc @@ -0,0 +1 @@ +The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/scripts-dev/list_url_patterns.py b/scripts-dev/list_url_patterns.py index da027be26e..62e5a07472 100755 --- a/scripts-dev/list_url_patterns.py +++ b/scripts-dev/list_url_patterns.py @@ -20,9 +20,7 @@ class CallVisitor(ast.NodeVisitor): else: return - if name == "client_path_patterns": - PATTERNS_V1.append(node.args[0].s) - elif name == "client_v2_patterns": + if name == "client_patterns": PATTERNS_V2.append(node.args[0].s) diff --git a/synapse/app/frontend_proxy.py b/synapse/app/frontend_proxy.py index 8479fee738..6504da5278 100644 --- a/synapse/app/frontend_proxy.py +++ b/synapse/app/frontend_proxy.py @@ -37,8 +37,7 @@ from synapse.replication.slave.storage.client_ips import SlavedClientIpStore from synapse.replication.slave.storage.devices import SlavedDeviceStore from synapse.replication.slave.storage.registration import SlavedRegistrationStore from synapse.replication.tcp.client import ReplicationClientHandler -from synapse.rest.client.v1.base import ClientV1RestServlet, client_path_patterns -from synapse.rest.client.v2_alpha._base import client_v2_patterns +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.server import HomeServer from synapse.storage.engines import create_engine from synapse.util.httpresourcetree import create_resource_tree @@ -49,11 +48,11 @@ from synapse.util.versionstring import get_version_string logger = logging.getLogger("synapse.app.frontend_proxy") -class PresenceStatusStubServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") +class PresenceStatusStubServlet(RestServlet): + PATTERNS = client_patterns("/presence/(?P[^/]*)/status") def __init__(self, hs): - super(PresenceStatusStubServlet, self).__init__(hs) + super(PresenceStatusStubServlet, self).__init__() self.http_client = hs.get_simple_http_client() self.auth = hs.get_auth() self.main_uri = hs.config.worker_main_http_uri @@ -84,7 +83,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet): class KeyUploadServlet(RestServlet): - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") + PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v1/base.py b/synapse/rest/client/v1/base.py deleted file mode 100644 index dc63b661c0..0000000000 --- a/synapse/rest/client/v1/base.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2014-2016 OpenMarket Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This module contains base REST classes for constructing client v1 servlets. -""" - -import logging -import re - -from synapse.api.urls import CLIENT_API_PREFIX -from synapse.http.servlet import RestServlet -from synapse.rest.client.transactions import HttpTransactionCache - -logger = logging.getLogger(__name__) - - -def client_path_patterns(path_regex, releases=(0,), include_in_unstable=True): - """Creates a regex compiled client path with the correct client path - prefix. - - Args: - path_regex (str): The regex string to match. This should NOT have a ^ - as this will be prefixed. - Returns: - SRE_Pattern - """ - patterns = [re.compile("^" + CLIENT_API_PREFIX + "/api/v1" + path_regex)] - if include_in_unstable: - unstable_prefix = CLIENT_API_PREFIX + "/unstable" - patterns.append(re.compile("^" + unstable_prefix + path_regex)) - for release in releases: - new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) - patterns.append(re.compile("^" + new_prefix + path_regex)) - return patterns - - -class ClientV1RestServlet(RestServlet): - """A base Synapse REST Servlet for the client version 1 API. - """ - - # This subclass was presumably created to allow the auth for the v1 - # protocol version to be different, however this behaviour was removed. - # it may no longer be necessary - - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): - """ - self.hs = hs - self.builder_factory = hs.get_event_builder_factory() - self.auth = hs.get_auth() - self.txns = HttpTransactionCache(hs) diff --git a/synapse/rest/client/v1/directory.py b/synapse/rest/client/v1/directory.py index 0220acf644..0035182bb9 100644 --- a/synapse/rest/client/v1/directory.py +++ b/synapse/rest/client/v1/directory.py @@ -19,11 +19,10 @@ import logging from twisted.internet import defer from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import RoomAlias -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) @@ -33,13 +32,14 @@ def register_servlets(hs, http_server): ClientAppserviceDirectoryListServer(hs).register(http_server) -class ClientDirectoryServer(ClientV1RestServlet): - PATTERNS = client_path_patterns("/directory/room/(?P[^/]*)$") +class ClientDirectoryServer(RestServlet): + PATTERNS = client_patterns("/directory/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryServer, self).__init__(hs) + super(ClientDirectoryServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_alias): @@ -120,13 +120,14 @@ class ClientDirectoryServer(ClientV1RestServlet): defer.returnValue((200, {})) -class ClientDirectoryListServer(ClientV1RestServlet): - PATTERNS = client_path_patterns("/directory/list/room/(?P[^/]*)$") +class ClientDirectoryListServer(RestServlet): + PATTERNS = client_patterns("/directory/list/room/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(ClientDirectoryListServer, self).__init__(hs) + super(ClientDirectoryListServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -162,15 +163,16 @@ class ClientDirectoryListServer(ClientV1RestServlet): defer.returnValue((200, {})) -class ClientAppserviceDirectoryListServer(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/directory/list/appservice/(?P[^/]*)/(?P[^/]*)$" +class ClientAppserviceDirectoryListServer(RestServlet): + PATTERNS = client_patterns( + "/directory/list/appservice/(?P[^/]*)/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(ClientAppserviceDirectoryListServer, self).__init__(hs) + super(ClientAppserviceDirectoryListServer, self).__init__() self.store = hs.get_datastore() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() def on_PUT(self, request, network_id, room_id): content = parse_json_object_from_request(request) diff --git a/synapse/rest/client/v1/events.py b/synapse/rest/client/v1/events.py index c3b0a39ab7..84ca36270b 100644 --- a/synapse/rest/client/v1/events.py +++ b/synapse/rest/client/v1/events.py @@ -19,21 +19,22 @@ import logging from twisted.internet import defer from synapse.api.errors import SynapseError +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.streams.config import PaginationConfig -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class EventStreamRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/events$") +class EventStreamRestServlet(RestServlet): + PATTERNS = client_patterns("/events$", v1=True) DEFAULT_LONGPOLL_TIME_MS = 30000 def __init__(self, hs): - super(EventStreamRestServlet, self).__init__(hs) + super(EventStreamRestServlet, self).__init__() self.event_stream_handler = hs.get_event_stream_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -76,11 +77,11 @@ class EventStreamRestServlet(ClientV1RestServlet): # TODO: Unit test gets, with and without auth, with different kinds of events. -class EventRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/events/(?P[^/]*)$") +class EventRestServlet(RestServlet): + PATTERNS = client_patterns("/events/(?P[^/]*)$", v1=True) def __init__(self, hs): - super(EventRestServlet, self).__init__(hs) + super(EventRestServlet, self).__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() diff --git a/synapse/rest/client/v1/initial_sync.py b/synapse/rest/client/v1/initial_sync.py index 3ead75cb77..0fe5f2d79b 100644 --- a/synapse/rest/client/v1/initial_sync.py +++ b/synapse/rest/client/v1/initial_sync.py @@ -15,19 +15,19 @@ from twisted.internet import defer -from synapse.http.servlet import parse_boolean +from synapse.http.servlet import RestServlet, parse_boolean +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.streams.config import PaginationConfig -from .base import ClientV1RestServlet, client_path_patterns - # TODO: Needs unit testing -class InitialSyncRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/initialSync$") +class InitialSyncRestServlet(RestServlet): + PATTERNS = client_patterns("/initialSync$", v1=True) def __init__(self, hs): - super(InitialSyncRestServlet, self).__init__(hs) + super(InitialSyncRestServlet, self).__init__() self.initial_sync_handler = hs.get_initial_sync_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 029039c162..3b60728628 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -29,12 +29,11 @@ from synapse.http.servlet import ( parse_json_object_from_request, parse_string, ) +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.rest.well_known import WellKnownBuilder from synapse.types import UserID, map_username_to_mxid_localpart from synapse.util.msisdn import phone_number_to_msisdn -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) @@ -81,15 +80,16 @@ def login_id_thirdparty_from_phone(identifier): } -class LoginRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login$") +class LoginRestServlet(RestServlet): + PATTERNS = client_patterns("/login$", v1=True) CAS_TYPE = "m.login.cas" SSO_TYPE = "m.login.sso" TOKEN_TYPE = "m.login.token" JWT_TYPE = "m.login.jwt" def __init__(self, hs): - super(LoginRestServlet, self).__init__(hs) + super(LoginRestServlet, self).__init__() + self.hs = hs self.jwt_enabled = hs.config.jwt_enabled self.jwt_secret = hs.config.jwt_secret self.jwt_algorithm = hs.config.jwt_algorithm @@ -371,7 +371,7 @@ class LoginRestServlet(ClientV1RestServlet): class CasRedirectServlet(RestServlet): - PATTERNS = client_path_patterns("/login/(cas|sso)/redirect") + PATTERNS = client_patterns("/login/(cas|sso)/redirect", v1=True) def __init__(self, hs): super(CasRedirectServlet, self).__init__() @@ -394,27 +394,27 @@ class CasRedirectServlet(RestServlet): finish_request(request) -class CasTicketServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/login/cas/ticket") +class CasTicketServlet(RestServlet): + PATTERNS = client_patterns("/login/cas/ticket", v1=True) def __init__(self, hs): - super(CasTicketServlet, self).__init__(hs) + super(CasTicketServlet, self).__init__() self.cas_server_url = hs.config.cas_server_url self.cas_service_url = hs.config.cas_service_url self.cas_required_attributes = hs.config.cas_required_attributes self._sso_auth_handler = SSOAuthHandler(hs) + self._http_client = hs.get_simple_http_client() @defer.inlineCallbacks def on_GET(self, request): client_redirect_url = parse_string(request, "redirectUrl", required=True) - http_client = self.hs.get_simple_http_client() uri = self.cas_server_url + "/proxyValidate" args = { "ticket": parse_string(request, "ticket", required=True), "service": self.cas_service_url } try: - body = yield http_client.get_raw(uri, args) + body = yield self._http_client.get_raw(uri, args) except PartialDownloadError as pde: # Twisted raises this error if the connection is closed, # even if that's being used old-http style to signal end-of-data diff --git a/synapse/rest/client/v1/logout.py b/synapse/rest/client/v1/logout.py index ba20e75033..b8064f261e 100644 --- a/synapse/rest/client/v1/logout.py +++ b/synapse/rest/client/v1/logout.py @@ -17,17 +17,18 @@ import logging from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns logger = logging.getLogger(__name__) -class LogoutRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/logout$") +class LogoutRestServlet(RestServlet): + PATTERNS = client_patterns("/logout$", v1=True) def __init__(self, hs): - super(LogoutRestServlet, self).__init__(hs) - self._auth = hs.get_auth() + super(LogoutRestServlet, self).__init__() + self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() @@ -41,7 +42,7 @@ class LogoutRestServlet(ClientV1RestServlet): if requester.device_id is None: # the acccess token wasn't associated with a device. # Just delete the access token - access_token = self._auth.get_access_token_from_request(request) + access_token = self.auth.get_access_token_from_request(request) yield self._auth_handler.delete_access_token(access_token) else: yield self._device_handler.delete_device( @@ -50,11 +51,11 @@ class LogoutRestServlet(ClientV1RestServlet): defer.returnValue((200, {})) -class LogoutAllRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/logout/all$") +class LogoutAllRestServlet(RestServlet): + PATTERNS = client_patterns("/logout/all$", v1=True) def __init__(self, hs): - super(LogoutAllRestServlet, self).__init__(hs) + super(LogoutAllRestServlet, self).__init__() self.auth = hs.get_auth() self._auth_handler = hs.get_auth_handler() self._device_handler = hs.get_device_handler() diff --git a/synapse/rest/client/v1/presence.py b/synapse/rest/client/v1/presence.py index 045d5a20ac..e263da3cb7 100644 --- a/synapse/rest/client/v1/presence.py +++ b/synapse/rest/client/v1/presence.py @@ -23,21 +23,22 @@ from twisted.internet import defer from synapse.api.errors import AuthError, SynapseError from synapse.handlers.presence import format_user_presence_state -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import UserID -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class PresenceStatusRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/presence/(?P[^/]*)/status") +class PresenceStatusRestServlet(RestServlet): + PATTERNS = client_patterns("/presence/(?P[^/]*)/status", v1=True) def __init__(self, hs): - super(PresenceStatusRestServlet, self).__init__(hs) + super(PresenceStatusRestServlet, self).__init__() + self.hs = hs self.presence_handler = hs.get_presence_handler() self.clock = hs.get_clock() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/profile.py b/synapse/rest/client/v1/profile.py index eac1966c5e..e15d9d82a6 100644 --- a/synapse/rest/client/v1/profile.py +++ b/synapse/rest/client/v1/profile.py @@ -16,18 +16,19 @@ """ This module contains REST servlets to do with profile: /profile/ """ from twisted.internet import defer -from synapse.http.servlet import parse_json_object_from_request +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.types import UserID -from .base import ClientV1RestServlet, client_path_patterns - -class ProfileDisplaynameRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/displayname") +class ProfileDisplaynameRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)/displayname", v1=True) def __init__(self, hs): - super(ProfileDisplaynameRestServlet, self).__init__(hs) + super(ProfileDisplaynameRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -71,12 +72,14 @@ class ProfileDisplaynameRestServlet(ClientV1RestServlet): return (200, {}) -class ProfileAvatarURLRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)/avatar_url") +class ProfileAvatarURLRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)/avatar_url", v1=True) def __init__(self, hs): - super(ProfileAvatarURLRestServlet, self).__init__(hs) + super(ProfileAvatarURLRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): @@ -119,12 +122,14 @@ class ProfileAvatarURLRestServlet(ClientV1RestServlet): return (200, {}) -class ProfileRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/profile/(?P[^/]*)") +class ProfileRestServlet(RestServlet): + PATTERNS = client_patterns("/profile/(?P[^/]*)", v1=True) def __init__(self, hs): - super(ProfileRestServlet, self).__init__(hs) + super(ProfileRestServlet, self).__init__() + self.hs = hs self.profile_handler = hs.get_profile_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, user_id): diff --git a/synapse/rest/client/v1/push_rule.py b/synapse/rest/client/v1/push_rule.py index 506ec95ddd..3d6326fe2f 100644 --- a/synapse/rest/client/v1/push_rule.py +++ b/synapse/rest/client/v1/push_rule.py @@ -21,22 +21,22 @@ from synapse.api.errors import ( SynapseError, UnrecognizedRequestError, ) -from synapse.http.servlet import parse_json_value_from_request, parse_string +from synapse.http.servlet import RestServlet, parse_json_value_from_request, parse_string from synapse.push.baserules import BASE_RULE_IDS from synapse.push.clientformat import format_push_rules_for_user from synapse.push.rulekinds import PRIORITY_CLASS_MAP +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException -from .base import ClientV1RestServlet, client_path_patterns - -class PushRuleRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/(?Ppushrules/.*)$") +class PushRuleRestServlet(RestServlet): + PATTERNS = client_patterns("/(?Ppushrules/.*)$", v1=True) SLIGHTLY_PEDANTIC_TRAILING_SLASH_ERROR = ( "Unrecognised request: You probably wanted a trailing slash") def __init__(self, hs): - super(PushRuleRestServlet, self).__init__(hs) + super(PushRuleRestServlet, self).__init__() + self.auth = hs.get_auth() self.store = hs.get_datastore() self.notifier = hs.get_notifier() self._is_worker = hs.config.worker_app is not None diff --git a/synapse/rest/client/v1/pusher.py b/synapse/rest/client/v1/pusher.py index 4c07ae7f45..15d860db37 100644 --- a/synapse/rest/client/v1/pusher.py +++ b/synapse/rest/client/v1/pusher.py @@ -26,17 +26,18 @@ from synapse.http.servlet import ( parse_string, ) from synapse.push import PusherConfigException - -from .base import ClientV1RestServlet, client_path_patterns +from synapse.rest.client.v2_alpha._base import client_patterns logger = logging.getLogger(__name__) -class PushersRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/pushers$") +class PushersRestServlet(RestServlet): + PATTERNS = client_patterns("/pushers$", v1=True) def __init__(self, hs): - super(PushersRestServlet, self).__init__(hs) + super(PushersRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -69,11 +70,13 @@ class PushersRestServlet(ClientV1RestServlet): return 200, {} -class PushersSetRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/pushers/set$") +class PushersSetRestServlet(RestServlet): + PATTERNS = client_patterns("/pushers/set$", v1=True) def __init__(self, hs): - super(PushersSetRestServlet, self).__init__(hs) + super(PushersSetRestServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() self.notifier = hs.get_notifier() self.pusher_pool = self.hs.get_pusherpool() @@ -141,7 +144,7 @@ class PushersRemoveRestServlet(RestServlet): """ To allow pusher to be delete by clicking a link (ie. GET request) """ - PATTERNS = client_path_patterns("/pushers/remove$") + PATTERNS = client_patterns("/pushers/remove$", v1=True) SUCCESS_HTML = b"You have been unsubscribed" def __init__(self, hs): diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index b92c6a9a9c..e8f672c4ba 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -28,37 +28,45 @@ from synapse.api.errors import AuthError, Codes, SynapseError from synapse.api.filtering import Filter from synapse.events.utils import format_event_for_client_v2 from synapse.http.servlet import ( + RestServlet, assert_params_in_dict, parse_integer, parse_json_object_from_request, parse_string, ) +from synapse.rest.client.transactions import HttpTransactionCache +from synapse.rest.client.v2_alpha._base import client_patterns from synapse.storage.state import StateFilter from synapse.streams.config import PaginationConfig from synapse.types import RoomAlias, RoomID, StreamToken, ThirdPartyInstanceID, UserID -from .base import ClientV1RestServlet, client_path_patterns - logger = logging.getLogger(__name__) -class RoomCreateRestServlet(ClientV1RestServlet): +class TransactionRestServlet(RestServlet): + def __init__(self, hs): + super(TransactionRestServlet, self).__init__() + self.txns = HttpTransactionCache(hs) + + +class RoomCreateRestServlet(TransactionRestServlet): # No PATTERN; we have custom dispatch rules here def __init__(self, hs): super(RoomCreateRestServlet, self).__init__(hs) self._room_creation_handler = hs.get_room_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = "/createRoom" register_txn_path(self, PATTERNS, http_server) # define CORS for all of /rooms in RoomCreateRestServlet for simplicity http_server.register_paths("OPTIONS", - client_path_patterns("/rooms(?:/.*)?$"), + client_patterns("/rooms(?:/.*)?$", v1=True), self.on_OPTIONS) # define CORS for /createRoom[/txnid] http_server.register_paths("OPTIONS", - client_path_patterns("/createRoom(?:/.*)?$"), + client_patterns("/createRoom(?:/.*)?$", v1=True), self.on_OPTIONS) def on_PUT(self, request, txn_id): @@ -85,13 +93,14 @@ class RoomCreateRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for generic events -class RoomStateEventRestServlet(ClientV1RestServlet): +class RoomStateEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomStateEventRestServlet, self).__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() def register(self, http_server): # /room/$roomid/state/$eventtype @@ -102,16 +111,16 @@ class RoomStateEventRestServlet(ClientV1RestServlet): "(?P[^/]*)/(?P[^/]*)$") http_server.register_paths("GET", - client_path_patterns(state_key), + client_patterns(state_key, v1=True), self.on_GET) http_server.register_paths("PUT", - client_path_patterns(state_key), + client_patterns(state_key, v1=True), self.on_PUT) http_server.register_paths("GET", - client_path_patterns(no_state_key), + client_patterns(no_state_key, v1=True), self.on_GET_no_state_key) http_server.register_paths("PUT", - client_path_patterns(no_state_key), + client_patterns(no_state_key, v1=True), self.on_PUT_no_state_key) def on_GET_no_state_key(self, request, room_id, event_type): @@ -185,11 +194,12 @@ class RoomStateEventRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for generic events + feedback -class RoomSendEventRestServlet(ClientV1RestServlet): +class RoomSendEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomSendEventRestServlet, self).__init__(hs) self.event_creation_handler = hs.get_event_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): # /rooms/$roomid/send/$event_type[/$txn_id] @@ -229,10 +239,11 @@ class RoomSendEventRestServlet(ClientV1RestServlet): # TODO: Needs unit testing for room ID + alias joins -class JoinRoomAliasServlet(ClientV1RestServlet): +class JoinRoomAliasServlet(TransactionRestServlet): def __init__(self, hs): super(JoinRoomAliasServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): # /join/$room_identifier[/$txn_id] @@ -291,8 +302,13 @@ class JoinRoomAliasServlet(ClientV1RestServlet): # TODO: Needs unit testing -class PublicRoomListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/publicRooms$") +class PublicRoomListRestServlet(TransactionRestServlet): + PATTERNS = client_patterns("/publicRooms$", v1=True) + + def __init__(self, hs): + super(PublicRoomListRestServlet, self).__init__(hs) + self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -382,12 +398,13 @@ class PublicRoomListRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomMemberListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/members$") +class RoomMemberListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/members$", v1=True) def __init__(self, hs): - super(RoomMemberListRestServlet, self).__init__(hs) + super(RoomMemberListRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -436,12 +453,13 @@ class RoomMemberListRestServlet(ClientV1RestServlet): # deprecated in favour of /members?membership=join? # except it does custom AS logic and has a simpler return format -class JoinedRoomMemberListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/joined_members$") +class JoinedRoomMemberListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/joined_members$", v1=True) def __init__(self, hs): - super(JoinedRoomMemberListRestServlet, self).__init__(hs) + super(JoinedRoomMemberListRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -457,12 +475,13 @@ class JoinedRoomMemberListRestServlet(ClientV1RestServlet): # TODO: Needs better unit testing -class RoomMessageListRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/messages$") +class RoomMessageListRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/messages$", v1=True) def __init__(self, hs): - super(RoomMessageListRestServlet, self).__init__(hs) + super(RoomMessageListRestServlet, self).__init__() self.pagination_handler = hs.get_pagination_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -491,12 +510,13 @@ class RoomMessageListRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomStateRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/state$") +class RoomStateRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/state$", v1=True) def __init__(self, hs): - super(RoomStateRestServlet, self).__init__(hs) + super(RoomStateRestServlet, self).__init__() self.message_handler = hs.get_message_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -511,12 +531,13 @@ class RoomStateRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomInitialSyncRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/rooms/(?P[^/]*)/initialSync$") +class RoomInitialSyncRestServlet(RestServlet): + PATTERNS = client_patterns("/rooms/(?P[^/]*)/initialSync$", v1=True) def __init__(self, hs): - super(RoomInitialSyncRestServlet, self).__init__(hs) + super(RoomInitialSyncRestServlet, self).__init__() self.initial_sync_handler = hs.get_initial_sync_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id): @@ -530,16 +551,17 @@ class RoomInitialSyncRestServlet(ClientV1RestServlet): defer.returnValue((200, content)) -class RoomEventServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/event/(?P[^/]*)$" +class RoomEventServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/event/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomEventServlet, self).__init__(hs) + super(RoomEventServlet, self).__init__() self.clock = hs.get_clock() self.event_handler = hs.get_event_handler() self._event_serializer = hs.get_event_client_serializer() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -554,16 +576,17 @@ class RoomEventServlet(ClientV1RestServlet): defer.returnValue((404, "Event not found.")) -class RoomEventContextServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/context/(?P[^/]*)$" +class RoomEventContextServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/context/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomEventContextServlet, self).__init__(hs) + super(RoomEventContextServlet, self).__init__() self.clock = hs.get_clock() self.room_context_handler = hs.get_room_context_handler() self._event_serializer = hs.get_event_client_serializer() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request, room_id, event_id): @@ -609,10 +632,11 @@ class RoomEventContextServlet(ClientV1RestServlet): defer.returnValue((200, results)) -class RoomForgetRestServlet(ClientV1RestServlet): +class RoomForgetRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomForgetRestServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = ("/rooms/(?P[^/]*)/forget") @@ -639,11 +663,12 @@ class RoomForgetRestServlet(ClientV1RestServlet): # TODO: Needs unit testing -class RoomMembershipRestServlet(ClientV1RestServlet): +class RoomMembershipRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomMembershipRestServlet, self).__init__(hs) self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() def register(self, http_server): # /rooms/$roomid/[invite|join|leave] @@ -722,11 +747,12 @@ class RoomMembershipRestServlet(ClientV1RestServlet): ) -class RoomRedactEventRestServlet(ClientV1RestServlet): +class RoomRedactEventRestServlet(TransactionRestServlet): def __init__(self, hs): super(RoomRedactEventRestServlet, self).__init__(hs) self.handlers = hs.get_handlers() self.event_creation_handler = hs.get_event_creation_handler() + self.auth = hs.get_auth() def register(self, http_server): PATTERNS = ("/rooms/(?P[^/]*)/redact/(?P[^/]*)") @@ -757,15 +783,16 @@ class RoomRedactEventRestServlet(ClientV1RestServlet): ) -class RoomTypingRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/rooms/(?P[^/]*)/typing/(?P[^/]*)$" +class RoomTypingRestServlet(RestServlet): + PATTERNS = client_patterns( + "/rooms/(?P[^/]*)/typing/(?P[^/]*)$", v1=True ) def __init__(self, hs): - super(RoomTypingRestServlet, self).__init__(hs) + super(RoomTypingRestServlet, self).__init__() self.presence_handler = hs.get_presence_handler() self.typing_handler = hs.get_typing_handler() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_PUT(self, request, room_id, user_id): @@ -798,14 +825,13 @@ class RoomTypingRestServlet(ClientV1RestServlet): defer.returnValue((200, {})) -class SearchRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns( - "/search$" - ) +class SearchRestServlet(RestServlet): + PATTERNS = client_patterns("/search$", v1=True) def __init__(self, hs): - super(SearchRestServlet, self).__init__(hs) + super(SearchRestServlet, self).__init__() self.handlers = hs.get_handlers() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_POST(self, request): @@ -823,12 +849,13 @@ class SearchRestServlet(ClientV1RestServlet): defer.returnValue((200, results)) -class JoinedRoomsRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/joined_rooms$") +class JoinedRoomsRestServlet(RestServlet): + PATTERNS = client_patterns("/joined_rooms$", v1=True) def __init__(self, hs): - super(JoinedRoomsRestServlet, self).__init__(hs) + super(JoinedRoomsRestServlet, self).__init__() self.store = hs.get_datastore() + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): @@ -853,18 +880,18 @@ def register_txn_path(servlet, regex_string, http_server, with_get=False): """ http_server.register_paths( "POST", - client_path_patterns(regex_string + "$"), + client_patterns(regex_string + "$", v1=True), servlet.on_POST ) http_server.register_paths( "PUT", - client_path_patterns(regex_string + "/(?P[^/]*)$"), + client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_PUT ) if with_get: http_server.register_paths( "GET", - client_path_patterns(regex_string + "/(?P[^/]*)$"), + client_patterns(regex_string + "/(?P[^/]*)$", v1=True), servlet.on_GET ) diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 53da905eea..0975df84cf 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -19,11 +19,16 @@ import hmac from twisted.internet import defer -from .base import ClientV1RestServlet, client_path_patterns +from synapse.http.servlet import RestServlet +from synapse.rest.client.v2_alpha._base import client_patterns -class VoipRestServlet(ClientV1RestServlet): - PATTERNS = client_path_patterns("/voip/turnServer$") +class VoipRestServlet(RestServlet): + PATTERNS = client_patterns("/voip/turnServer$", v1=True) + + def __init__(self, hs): + super(VoipRestServlet, self).__init__() + self.hs = hs @defer.inlineCallbacks def on_GET(self, request): diff --git a/synapse/rest/client/v2_alpha/_base.py b/synapse/rest/client/v2_alpha/_base.py index 24ac26bf03..5236d5d566 100644 --- a/synapse/rest/client/v2_alpha/_base.py +++ b/synapse/rest/client/v2_alpha/_base.py @@ -26,8 +26,7 @@ from synapse.api.urls import CLIENT_API_PREFIX logger = logging.getLogger(__name__) -def client_v2_patterns(path_regex, releases=(0,), - unstable=True): +def client_patterns(path_regex, releases=(0,), unstable=True, v1=False): """Creates a regex compiled client path with the correct client path prefix. @@ -41,6 +40,9 @@ def client_v2_patterns(path_regex, releases=(0,), if unstable: unstable_prefix = CLIENT_API_PREFIX + "/unstable" patterns.append(re.compile("^" + unstable_prefix + path_regex)) + if v1: + v1_prefix = CLIENT_API_PREFIX + "/api/v1" + patterns.append(re.compile("^" + v1_prefix + path_regex)) for release in releases: new_prefix = CLIENT_API_PREFIX + "/r%d" % (release,) patterns.append(re.compile("^" + new_prefix + path_regex)) diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ee069179f0..ca35dc3c83 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -30,13 +30,13 @@ from synapse.http.servlet import ( from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.threepids import check_3pid_allowed -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler logger = logging.getLogger(__name__) class EmailPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password/email/requestToken$") + PATTERNS = client_patterns("/account/password/email/requestToken$") def __init__(self, hs): super(EmailPasswordRequestTokenRestServlet, self).__init__() @@ -70,7 +70,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): class MsisdnPasswordRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password/msisdn/requestToken$") + PATTERNS = client_patterns("/account/password/msisdn/requestToken$") def __init__(self, hs): super(MsisdnPasswordRequestTokenRestServlet, self).__init__() @@ -108,7 +108,7 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): class PasswordRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/password$") + PATTERNS = client_patterns("/account/password$") def __init__(self, hs): super(PasswordRestServlet, self).__init__() @@ -180,7 +180,7 @@ class PasswordRestServlet(RestServlet): class DeactivateAccountRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/deactivate$") + PATTERNS = client_patterns("/account/deactivate$") def __init__(self, hs): super(DeactivateAccountRestServlet, self).__init__() @@ -228,7 +228,7 @@ class DeactivateAccountRestServlet(RestServlet): class EmailThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/email/requestToken$") + PATTERNS = client_patterns("/account/3pid/email/requestToken$") def __init__(self, hs): self.hs = hs @@ -263,7 +263,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet): class MsisdnThreepidRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/msisdn/requestToken$") + PATTERNS = client_patterns("/account/3pid/msisdn/requestToken$") def __init__(self, hs): self.hs = hs @@ -300,7 +300,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet): class ThreepidRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid$") + PATTERNS = client_patterns("/account/3pid$") def __init__(self, hs): super(ThreepidRestServlet, self).__init__() @@ -364,7 +364,7 @@ class ThreepidRestServlet(RestServlet): class ThreepidDeleteRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/3pid/delete$") + PATTERNS = client_patterns("/account/3pid/delete$") def __init__(self, hs): super(ThreepidDeleteRestServlet, self).__init__() @@ -401,7 +401,7 @@ class ThreepidDeleteRestServlet(RestServlet): class WhoamiRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/account/whoami$") + PATTERNS = client_patterns("/account/whoami$") def __init__(self, hs): super(WhoamiRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/account_data.py b/synapse/rest/client/v2_alpha/account_data.py index f171b8d626..574a6298ce 100644 --- a/synapse/rest/client/v2_alpha/account_data.py +++ b/synapse/rest/client/v2_alpha/account_data.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError, NotFoundError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -30,7 +30,7 @@ class AccountDataServlet(RestServlet): PUT /user/{user_id}/account_data/{account_dataType} HTTP/1.1 GET /user/{user_id}/account_data/{account_dataType} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/account_data/(?P[^/]*)" ) @@ -79,7 +79,7 @@ class RoomAccountDataServlet(RestServlet): PUT /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1 GET /user/{user_id}/rooms/{room_id}/account_data/{account_dataType} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)" "/rooms/(?P[^/]*)" "/account_data/(?P[^/]*)" diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index fc8dbeb617..55c4ed5660 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -21,13 +21,13 @@ from synapse.api.errors import AuthError, SynapseError from synapse.http.server import finish_request from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class AccountValidityRenewServlet(RestServlet): - PATTERNS = client_v2_patterns("/account_validity/renew$") + PATTERNS = client_patterns("/account_validity/renew$") SUCCESS_HTML = b"Your account has been successfully renewed." def __init__(self, hs): @@ -60,7 +60,7 @@ class AccountValidityRenewServlet(RestServlet): class AccountValiditySendMailServlet(RestServlet): - PATTERNS = client_v2_patterns("/account_validity/send_mail$") + PATTERNS = client_patterns("/account_validity/send_mail$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/auth.py b/synapse/rest/client/v2_alpha/auth.py index 4c380ab84d..8dfe5cba02 100644 --- a/synapse/rest/client/v2_alpha/auth.py +++ b/synapse/rest/client/v2_alpha/auth.py @@ -23,7 +23,7 @@ from synapse.api.urls import CLIENT_API_PREFIX from synapse.http.server import finish_request from synapse.http.servlet import RestServlet, parse_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -122,7 +122,7 @@ class AuthRestServlet(RestServlet): cannot be handled in the normal flow (with requests to the same endpoint). Current use is for web fallback auth. """ - PATTERNS = client_v2_patterns(r"/auth/(?P[\w\.]*)/fallback/web") + PATTERNS = client_patterns(r"/auth/(?P[\w\.]*)/fallback/web") def __init__(self, hs): super(AuthRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/capabilities.py b/synapse/rest/client/v2_alpha/capabilities.py index 2b4892330c..fc7e2f4dd5 100644 --- a/synapse/rest/client/v2_alpha/capabilities.py +++ b/synapse/rest/client/v2_alpha/capabilities.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -27,7 +27,7 @@ logger = logging.getLogger(__name__) class CapabilitiesRestServlet(RestServlet): """End point to expose the capabilities of the server.""" - PATTERNS = client_v2_patterns("/capabilities$") + PATTERNS = client_patterns("/capabilities$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/devices.py b/synapse/rest/client/v2_alpha/devices.py index 5a5be7c390..78665304a5 100644 --- a/synapse/rest/client/v2_alpha/devices.py +++ b/synapse/rest/client/v2_alpha/devices.py @@ -24,13 +24,13 @@ from synapse.http.servlet import ( parse_json_object_from_request, ) -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler logger = logging.getLogger(__name__) class DevicesRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices$") + PATTERNS = client_patterns("/devices$") def __init__(self, hs): """ @@ -56,7 +56,7 @@ class DeleteDevicesRestServlet(RestServlet): API for bulk deletion of devices. Accepts a JSON object with a devices key which lists the device_ids to delete. Requires user interactive auth. """ - PATTERNS = client_v2_patterns("/delete_devices") + PATTERNS = client_patterns("/delete_devices") def __init__(self, hs): super(DeleteDevicesRestServlet, self).__init__() @@ -95,7 +95,7 @@ class DeleteDevicesRestServlet(RestServlet): class DeviceRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/devices/(?P[^/]*)$") + PATTERNS = client_patterns("/devices/(?P[^/]*)$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/filter.py b/synapse/rest/client/v2_alpha/filter.py index ae86728879..65db48c3cc 100644 --- a/synapse/rest/client/v2_alpha/filter.py +++ b/synapse/rest/client/v2_alpha/filter.py @@ -21,13 +21,13 @@ from synapse.api.errors import AuthError, Codes, StoreError, SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import UserID -from ._base import client_v2_patterns, set_timeline_upper_limit +from ._base import client_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) class GetFilterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") + PATTERNS = client_patterns("/user/(?P[^/]*)/filter/(?P[^/]*)") def __init__(self, hs): super(GetFilterRestServlet, self).__init__() @@ -63,7 +63,7 @@ class GetFilterRestServlet(RestServlet): class CreateFilterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user/(?P[^/]*)/filter") + PATTERNS = client_patterns("/user/(?P[^/]*)/filter") def __init__(self, hs): super(CreateFilterRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/groups.py b/synapse/rest/client/v2_alpha/groups.py index 21e02c07c0..d082385ec7 100644 --- a/synapse/rest/client/v2_alpha/groups.py +++ b/synapse/rest/client/v2_alpha/groups.py @@ -21,7 +21,7 @@ from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.types import GroupID -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ logger = logging.getLogger(__name__) class GroupServlet(RestServlet): """Get the group profile """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/profile$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/profile$") def __init__(self, hs): super(GroupServlet, self).__init__() @@ -65,7 +65,7 @@ class GroupServlet(RestServlet): class GroupSummaryServlet(RestServlet): """Get the full group summary """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/summary$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/summary$") def __init__(self, hs): super(GroupSummaryServlet, self).__init__() @@ -93,7 +93,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): - /groups/:group/summary/rooms/:room_id - /groups/:group/summary/categories/:category/rooms/:room_id """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/summary" "(/categories/(?P[^/]+))?" "/rooms/(?P[^/]*)$" @@ -137,7 +137,7 @@ class GroupSummaryRoomsCatServlet(RestServlet): class GroupCategoryServlet(RestServlet): """Get/add/update/delete a group category """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/categories/(?P[^/]+)$" ) @@ -189,7 +189,7 @@ class GroupCategoryServlet(RestServlet): class GroupCategoriesServlet(RestServlet): """Get all group categories """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/categories/$" ) @@ -214,7 +214,7 @@ class GroupCategoriesServlet(RestServlet): class GroupRoleServlet(RestServlet): """Get/add/update/delete a group role """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/roles/(?P[^/]+)$" ) @@ -266,7 +266,7 @@ class GroupRoleServlet(RestServlet): class GroupRolesServlet(RestServlet): """Get all group roles """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/roles/$" ) @@ -295,7 +295,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): - /groups/:group/summary/users/:room_id - /groups/:group/summary/roles/:role/users/:user_id """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/summary" "(/roles/(?P[^/]+))?" "/users/(?P[^/]*)$" @@ -339,7 +339,7 @@ class GroupSummaryUsersRoleServlet(RestServlet): class GroupRoomServlet(RestServlet): """Get all rooms in a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/rooms$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/rooms$") def __init__(self, hs): super(GroupRoomServlet, self).__init__() @@ -360,7 +360,7 @@ class GroupRoomServlet(RestServlet): class GroupUsersServlet(RestServlet): """Get all users in a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/users$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/users$") def __init__(self, hs): super(GroupUsersServlet, self).__init__() @@ -381,7 +381,7 @@ class GroupUsersServlet(RestServlet): class GroupInvitedUsersServlet(RestServlet): """Get users invited to a group """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/invited_users$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/invited_users$") def __init__(self, hs): super(GroupInvitedUsersServlet, self).__init__() @@ -405,7 +405,7 @@ class GroupInvitedUsersServlet(RestServlet): class GroupSettingJoinPolicyServlet(RestServlet): """Set group join policy """ - PATTERNS = client_v2_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") + PATTERNS = client_patterns("/groups/(?P[^/]*)/settings/m.join_policy$") def __init__(self, hs): super(GroupSettingJoinPolicyServlet, self).__init__() @@ -431,7 +431,7 @@ class GroupSettingJoinPolicyServlet(RestServlet): class GroupCreateServlet(RestServlet): """Create a group """ - PATTERNS = client_v2_patterns("/create_group$") + PATTERNS = client_patterns("/create_group$") def __init__(self, hs): super(GroupCreateServlet, self).__init__() @@ -462,7 +462,7 @@ class GroupCreateServlet(RestServlet): class GroupAdminRoomsServlet(RestServlet): """Add a room to the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)$" ) @@ -499,7 +499,7 @@ class GroupAdminRoomsServlet(RestServlet): class GroupAdminRoomsConfigServlet(RestServlet): """Update the config of a room in a group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/rooms/(?P[^/]*)" "/config/(?P[^/]*)$" ) @@ -526,7 +526,7 @@ class GroupAdminRoomsConfigServlet(RestServlet): class GroupAdminUsersInviteServlet(RestServlet): """Invite a user to the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/users/invite/(?P[^/]*)$" ) @@ -555,7 +555,7 @@ class GroupAdminUsersInviteServlet(RestServlet): class GroupAdminUsersKickServlet(RestServlet): """Kick a user from the group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/admin/users/remove/(?P[^/]*)$" ) @@ -581,7 +581,7 @@ class GroupAdminUsersKickServlet(RestServlet): class GroupSelfLeaveServlet(RestServlet): """Leave a joined group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/leave$" ) @@ -607,7 +607,7 @@ class GroupSelfLeaveServlet(RestServlet): class GroupSelfJoinServlet(RestServlet): """Attempt to join a group, or knock """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/join$" ) @@ -633,7 +633,7 @@ class GroupSelfJoinServlet(RestServlet): class GroupSelfAcceptInviteServlet(RestServlet): """Accept a group invite """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/accept_invite$" ) @@ -659,7 +659,7 @@ class GroupSelfAcceptInviteServlet(RestServlet): class GroupSelfUpdatePublicityServlet(RestServlet): """Update whether we publicise a users membership of a group """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/groups/(?P[^/]*)/self/update_publicity$" ) @@ -686,7 +686,7 @@ class GroupSelfUpdatePublicityServlet(RestServlet): class PublicisedGroupsForUserServlet(RestServlet): """Get the list of groups a user is advertising """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/publicised_groups/(?P[^/]*)$" ) @@ -711,7 +711,7 @@ class PublicisedGroupsForUserServlet(RestServlet): class PublicisedGroupsForUsersServlet(RestServlet): """Get the list of groups a user is advertising """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/publicised_groups$" ) @@ -739,7 +739,7 @@ class PublicisedGroupsForUsersServlet(RestServlet): class GroupsForUserServlet(RestServlet): """Get all groups the logged in user is joined to """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/joined_groups$" ) diff --git a/synapse/rest/client/v2_alpha/keys.py b/synapse/rest/client/v2_alpha/keys.py index 8486086b51..4cbfbf5631 100644 --- a/synapse/rest/client/v2_alpha/keys.py +++ b/synapse/rest/client/v2_alpha/keys.py @@ -26,7 +26,7 @@ from synapse.http.servlet import ( ) from synapse.types import StreamToken -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ class KeyUploadServlet(RestServlet): }, } """ - PATTERNS = client_v2_patterns("/keys/upload(/(?P[^/]+))?$") + PATTERNS = client_patterns("/keys/upload(/(?P[^/]+))?$") def __init__(self, hs): """ @@ -130,7 +130,7 @@ class KeyQueryServlet(RestServlet): } } } } } } """ - PATTERNS = client_v2_patterns("/keys/query$") + PATTERNS = client_patterns("/keys/query$") def __init__(self, hs): """ @@ -159,7 +159,7 @@ class KeyChangesServlet(RestServlet): 200 OK { "changed": ["@foo:example.com"] } """ - PATTERNS = client_v2_patterns("/keys/changes$") + PATTERNS = client_patterns("/keys/changes$") def __init__(self, hs): """ @@ -209,7 +209,7 @@ class OneTimeKeyServlet(RestServlet): } } } } """ - PATTERNS = client_v2_patterns("/keys/claim$") + PATTERNS = client_patterns("/keys/claim$") def __init__(self, hs): super(OneTimeKeyServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/notifications.py b/synapse/rest/client/v2_alpha/notifications.py index 0a1eb0ae45..53e666989b 100644 --- a/synapse/rest/client/v2_alpha/notifications.py +++ b/synapse/rest/client/v2_alpha/notifications.py @@ -20,13 +20,13 @@ from twisted.internet import defer from synapse.events.utils import format_event_for_client_v2_without_room_id from synapse.http.servlet import RestServlet, parse_integer, parse_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class NotificationsServlet(RestServlet): - PATTERNS = client_v2_patterns("/notifications$") + PATTERNS = client_patterns("/notifications$") def __init__(self, hs): super(NotificationsServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/openid.py b/synapse/rest/client/v2_alpha/openid.py index 01c90aa2a3..bb927d9f9d 100644 --- a/synapse/rest/client/v2_alpha/openid.py +++ b/synapse/rest/client/v2_alpha/openid.py @@ -22,7 +22,7 @@ from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.util.stringutils import random_string -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -56,7 +56,7 @@ class IdTokenServlet(RestServlet): "expires_in": 3600, } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/openid/request_token" ) diff --git a/synapse/rest/client/v2_alpha/read_marker.py b/synapse/rest/client/v2_alpha/read_marker.py index a6e582a5ae..f4bd0d077f 100644 --- a/synapse/rest/client/v2_alpha/read_marker.py +++ b/synapse/rest/client/v2_alpha/read_marker.py @@ -19,13 +19,13 @@ from twisted.internet import defer from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReadMarkerRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/rooms/(?P[^/]*)/read_markers$") + PATTERNS = client_patterns("/rooms/(?P[^/]*)/read_markers$") def __init__(self, hs): super(ReadMarkerRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/receipts.py b/synapse/rest/client/v2_alpha/receipts.py index de370cac45..fa12ac3e4d 100644 --- a/synapse/rest/client/v2_alpha/receipts.py +++ b/synapse/rest/client/v2_alpha/receipts.py @@ -20,13 +20,13 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReceiptRestServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)" "/receipt/(?P[^/]*)" "/(?P[^/]*)$" diff --git a/synapse/rest/client/v2_alpha/register.py b/synapse/rest/client/v2_alpha/register.py index 042f636135..79c085408b 100644 --- a/synapse/rest/client/v2_alpha/register.py +++ b/synapse/rest/client/v2_alpha/register.py @@ -43,7 +43,7 @@ from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.threepids import check_3pid_allowed -from ._base import client_v2_patterns, interactive_auth_handler +from ._base import client_patterns, interactive_auth_handler # We ought to be using hmac.compare_digest() but on older pythons it doesn't # exist. It's a _really minor_ security flaw to use plain string comparison @@ -60,7 +60,7 @@ logger = logging.getLogger(__name__) class EmailRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/email/requestToken$") + PATTERNS = client_patterns("/register/email/requestToken$") def __init__(self, hs): """ @@ -98,7 +98,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet): class MsisdnRegisterRequestTokenRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/msisdn/requestToken$") + PATTERNS = client_patterns("/register/msisdn/requestToken$") def __init__(self, hs): """ @@ -142,7 +142,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet): class UsernameAvailabilityRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register/available") + PATTERNS = client_patterns("/register/available") def __init__(self, hs): """ @@ -182,7 +182,7 @@ class UsernameAvailabilityRestServlet(RestServlet): class RegisterRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/register$") + PATTERNS = client_patterns("/register$") def __init__(self, hs): """ diff --git a/synapse/rest/client/v2_alpha/relations.py b/synapse/rest/client/v2_alpha/relations.py index 41e0a44936..f8f8742bdc 100644 --- a/synapse/rest/client/v2_alpha/relations.py +++ b/synapse/rest/client/v2_alpha/relations.py @@ -34,7 +34,7 @@ from synapse.http.servlet import ( from synapse.rest.client.transactions import HttpTransactionCache from synapse.storage.relations import AggregationPaginationToken, RelationPaginationToken -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -66,12 +66,12 @@ class RelationSendServlet(RestServlet): def register(self, http_server): http_server.register_paths( "POST", - client_v2_patterns(self.PATTERN + "$", releases=()), + client_patterns(self.PATTERN + "$", releases=()), self.on_PUT_or_POST, ) http_server.register_paths( "PUT", - client_v2_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), + client_patterns(self.PATTERN + "/(?P[^/]*)$", releases=()), self.on_PUT, ) @@ -120,7 +120,7 @@ class RelationPaginationServlet(RestServlet): filtered by relation type and event type. """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/relations/(?P[^/]*)" "(/(?P[^/]*)(/(?P[^/]*))?)?$", releases=(), @@ -197,7 +197,7 @@ class RelationAggregationPaginationServlet(RestServlet): } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" "(/(?P[^/]*)(/(?P[^/]*))?)?$", releases=(), @@ -269,7 +269,7 @@ class RelationAggregationGroupPaginationServlet(RestServlet): } """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/aggregations/(?P[^/]*)" "/(?P[^/]*)/(?P[^/]*)/(?P[^/]*)$", releases=(), diff --git a/synapse/rest/client/v2_alpha/report_event.py b/synapse/rest/client/v2_alpha/report_event.py index 95d2a71ec2..10198662a9 100644 --- a/synapse/rest/client/v2_alpha/report_event.py +++ b/synapse/rest/client/v2_alpha/report_event.py @@ -27,13 +27,13 @@ from synapse.http.servlet import ( parse_json_object_from_request, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ReportEventRestServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/rooms/(?P[^/]*)/report/(?P[^/]*)$" ) diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index 220a0de30b..87779645f9 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -24,13 +24,13 @@ from synapse.http.servlet import ( parse_string, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class RoomKeysServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/keys(/(?P[^/]+))?(/(?P[^/]+))?$" ) @@ -256,7 +256,7 @@ class RoomKeysServlet(RestServlet): class RoomKeysNewVersionServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/version$" ) @@ -314,7 +314,7 @@ class RoomKeysNewVersionServlet(RestServlet): class RoomKeysVersionServlet(RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/room_keys/version(/(?P[^/]+))?$" ) diff --git a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py index 62b8de71fa..c621a90fba 100644 --- a/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py +++ b/synapse/rest/client/v2_alpha/room_upgrade_rest_servlet.py @@ -25,7 +25,7 @@ from synapse.http.servlet import ( parse_json_object_from_request, ) -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -47,7 +47,7 @@ class RoomUpgradeRestServlet(RestServlet): Args: hs (synapse.server.HomeServer): """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( # /rooms/$roomid/upgrade "/rooms/(?P[^/]*)/upgrade$", ) diff --git a/synapse/rest/client/v2_alpha/sendtodevice.py b/synapse/rest/client/v2_alpha/sendtodevice.py index 21e9cef2d0..120a713361 100644 --- a/synapse/rest/client/v2_alpha/sendtodevice.py +++ b/synapse/rest/client/v2_alpha/sendtodevice.py @@ -21,13 +21,13 @@ from synapse.http import servlet from synapse.http.servlet import parse_json_object_from_request from synapse.rest.client.transactions import HttpTransactionCache -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class SendToDeviceRestServlet(servlet.RestServlet): - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/sendToDevice/(?P[^/]*)/(?P[^/]*)$", ) diff --git a/synapse/rest/client/v2_alpha/sync.py b/synapse/rest/client/v2_alpha/sync.py index d3025025e3..148fc6c985 100644 --- a/synapse/rest/client/v2_alpha/sync.py +++ b/synapse/rest/client/v2_alpha/sync.py @@ -32,7 +32,7 @@ from synapse.handlers.sync import SyncConfig from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.types import StreamToken -from ._base import client_v2_patterns, set_timeline_upper_limit +from ._base import client_patterns, set_timeline_upper_limit logger = logging.getLogger(__name__) @@ -73,7 +73,7 @@ class SyncRestServlet(RestServlet): } """ - PATTERNS = client_v2_patterns("/sync$") + PATTERNS = client_patterns("/sync$") ALLOWED_PRESENCE = set(["online", "offline", "unavailable"]) def __init__(self, hs): diff --git a/synapse/rest/client/v2_alpha/tags.py b/synapse/rest/client/v2_alpha/tags.py index 4fea614e95..ebff7cff45 100644 --- a/synapse/rest/client/v2_alpha/tags.py +++ b/synapse/rest/client/v2_alpha/tags.py @@ -20,7 +20,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) @@ -29,7 +29,7 @@ class TagListServlet(RestServlet): """ GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags" ) @@ -54,7 +54,7 @@ class TagServlet(RestServlet): PUT /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1 DELETE /user/{user_id}/rooms/{room_id}/tags/{tag} HTTP/1.1 """ - PATTERNS = client_v2_patterns( + PATTERNS = client_patterns( "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags/(?P[^/]*)" ) diff --git a/synapse/rest/client/v2_alpha/thirdparty.py b/synapse/rest/client/v2_alpha/thirdparty.py index b9b5d07677..e7a987466a 100644 --- a/synapse/rest/client/v2_alpha/thirdparty.py +++ b/synapse/rest/client/v2_alpha/thirdparty.py @@ -21,13 +21,13 @@ from twisted.internet import defer from synapse.api.constants import ThirdPartyEntityKind from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class ThirdPartyProtocolsServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocols") + PATTERNS = client_patterns("/thirdparty/protocols") def __init__(self, hs): super(ThirdPartyProtocolsServlet, self).__init__() @@ -44,7 +44,7 @@ class ThirdPartyProtocolsServlet(RestServlet): class ThirdPartyProtocolServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/protocol/(?P[^/]+)$") + PATTERNS = client_patterns("/thirdparty/protocol/(?P[^/]+)$") def __init__(self, hs): super(ThirdPartyProtocolServlet, self).__init__() @@ -66,7 +66,7 @@ class ThirdPartyProtocolServlet(RestServlet): class ThirdPartyUserServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/user(/(?P[^/]+))?$") + PATTERNS = client_patterns("/thirdparty/user(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyUserServlet, self).__init__() @@ -89,7 +89,7 @@ class ThirdPartyUserServlet(RestServlet): class ThirdPartyLocationServlet(RestServlet): - PATTERNS = client_v2_patterns("/thirdparty/location(/(?P[^/]+))?$") + PATTERNS = client_patterns("/thirdparty/location(/(?P[^/]+))?$") def __init__(self, hs): super(ThirdPartyLocationServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/tokenrefresh.py b/synapse/rest/client/v2_alpha/tokenrefresh.py index 6e76b9e9c2..6c366142e1 100644 --- a/synapse/rest/client/v2_alpha/tokenrefresh.py +++ b/synapse/rest/client/v2_alpha/tokenrefresh.py @@ -18,7 +18,7 @@ from twisted.internet import defer from synapse.api.errors import AuthError from synapse.http.servlet import RestServlet -from ._base import client_v2_patterns +from ._base import client_patterns class TokenRefreshRestServlet(RestServlet): @@ -26,7 +26,7 @@ class TokenRefreshRestServlet(RestServlet): Exchanges refresh tokens for a pair of an access token and a new refresh token. """ - PATTERNS = client_v2_patterns("/tokenrefresh") + PATTERNS = client_patterns("/tokenrefresh") def __init__(self, hs): super(TokenRefreshRestServlet, self).__init__() diff --git a/synapse/rest/client/v2_alpha/user_directory.py b/synapse/rest/client/v2_alpha/user_directory.py index 36b02de37f..69e4efc47a 100644 --- a/synapse/rest/client/v2_alpha/user_directory.py +++ b/synapse/rest/client/v2_alpha/user_directory.py @@ -20,13 +20,13 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.http.servlet import RestServlet, parse_json_object_from_request -from ._base import client_v2_patterns +from ._base import client_patterns logger = logging.getLogger(__name__) class UserDirectorySearchRestServlet(RestServlet): - PATTERNS = client_v2_patterns("/user_directory/search$") + PATTERNS = client_patterns("/user_directory/search$") def __init__(self, hs): """ diff --git a/tests/__init__.py b/tests/__init__.py index d3181f9403..f7fc502f01 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -21,4 +21,4 @@ import tests.patch_inline_callbacks # attempt to do the patch before we load any synapse code tests.patch_inline_callbacks.do_patch() -util.DEFAULT_TIMEOUT_DURATION = 10 +util.DEFAULT_TIMEOUT_DURATION = 20 diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index ee5f09041f..e5fc2fcd15 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -408,7 +408,6 @@ class ShutdownRoomTestCase(unittest.HomeserverTestCase): users_in_room = self.get_success(self.store.get_users_in_room(room_id)) self.assertEqual([], users_in_room) - @unittest.DEBUG def test_shutdown_room_block_peek(self): """Test that a world_readable room can no longer be peeked into after it has been shut down. diff --git a/tests/rest/client/v1/test_profile.py b/tests/rest/client/v1/test_profile.py index f4d0d48dad..72c7ed93cb 100644 --- a/tests/rest/client/v1/test_profile.py +++ b/tests/rest/client/v1/test_profile.py @@ -30,7 +30,7 @@ from tests import unittest from ....utils import MockHttpResource, setup_test_homeserver myid = "@1234ABCD:test" -PATH_PREFIX = "/_matrix/client/api/v1" +PATH_PREFIX = "/_matrix/client/r0" class MockHandlerProfileTestCase(unittest.TestCase): From 37057d5d6047a7f984fc9f1db094b9169a4e4c73 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:02:47 +1000 Subject: [PATCH 288/429] prepare --- synapse/storage/prepare_database.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c1711bc8bd..07478b6672 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -20,6 +20,8 @@ import logging import os import re +from synapse.storage.engines.postgres import PostgresEngine + logger = logging.getLogger(__name__) @@ -115,8 +117,16 @@ def _setup_new_database(cur, database_engine): valid_dirs = [] pattern = re.compile(r"^\d+(\.sql)?$") + + if isinstance(database_engine, PostgresEngine): + specific = "postgres" + else: + specific = "sqlite" + + specific_pattern = re.compile(r"^\d+(\.sql." + specific + r")?$") + for filename in directory_entries: - match = pattern.match(filename) + match = pattern.match(filename) or specific_pattern.match(filename) abs_path = os.path.join(current_dir, filename) if match and os.path.isdir(abs_path): ver = int(match.group(0)) @@ -136,7 +146,9 @@ def _setup_new_database(cur, database_engine): directory_entries = os.listdir(sql_dir) - for filename in fnmatch.filter(directory_entries, "*.sql"): + for filename in fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter( + directory_entries, "*.sql." + specific + ): sql_loc = os.path.join(sql_dir, filename) logger.debug("Applying schema %s", sql_loc) executescript(cur, sql_loc) From dc72b90cd674f69fea1d27a1c1dab60a60d5ab9d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:03:28 +1000 Subject: [PATCH 289/429] full schema --- .../schema/full_schemas/54/full.sql.postgres | 2040 +++++++++++++++++ .../schema/full_schemas/54/full.sql.sqlite | 261 +++ .../storage/schema/full_schemas/README.txt | 14 + 3 files changed, 2315 insertions(+) create mode 100644 synapse/storage/schema/full_schemas/54/full.sql.postgres create mode 100644 synapse/storage/schema/full_schemas/54/full.sql.sqlite create mode 100644 synapse/storage/schema/full_schemas/README.txt diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres new file mode 100644 index 0000000000..5fb54cfe77 --- /dev/null +++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres @@ -0,0 +1,2040 @@ + + + + + +CREATE TABLE _extremities_to_check ( + event_id text +); + + + +CREATE TABLE access_tokens ( + id bigint NOT NULL, + user_id text NOT NULL, + device_id text, + token text NOT NULL, + last_used bigint +); + + + +CREATE TABLE account_data ( + user_id text NOT NULL, + account_data_type text NOT NULL, + stream_id bigint NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE account_data_max_stream_id ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_id bigint NOT NULL, + CONSTRAINT private_user_data_max_stream_id_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE account_validity ( + user_id text NOT NULL, + expiration_ts_ms bigint NOT NULL, + email_sent boolean NOT NULL, + renewal_token text +); + + + +CREATE TABLE application_services_state ( + as_id text NOT NULL, + state character varying(5), + last_txn integer +); + + + +CREATE TABLE application_services_txns ( + as_id text NOT NULL, + txn_id integer NOT NULL, + event_ids text NOT NULL +); + + + +CREATE TABLE applied_module_schemas ( + module_name text NOT NULL, + file text NOT NULL +); + + + +CREATE TABLE applied_schema_deltas ( + version integer NOT NULL, + file text NOT NULL +); + + + +CREATE TABLE appservice_room_list ( + appservice_id text NOT NULL, + network_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE appservice_stream_position ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_ordering bigint, + CONSTRAINT appservice_stream_position_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE background_updates ( + update_name text NOT NULL, + progress_json text NOT NULL, + depends_on text +); + + + +CREATE TABLE blocked_rooms ( + room_id text NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE cache_invalidation_stream ( + stream_id bigint, + cache_func text, + keys text[], + invalidation_ts bigint +); + + + +CREATE TABLE current_state_delta_stream ( + stream_id bigint NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL, + event_id text, + prev_event_id text +); + + + +CREATE TABLE current_state_events ( + event_id text NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL +); + + + +CREATE TABLE deleted_pushers ( + stream_id bigint NOT NULL, + app_id text NOT NULL, + pushkey text NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE destinations ( + destination text NOT NULL, + retry_last_ts bigint, + retry_interval integer +); + + + +CREATE TABLE device_federation_inbox ( + origin text NOT NULL, + message_id text NOT NULL, + received_ts bigint NOT NULL +); + + + +CREATE TABLE device_federation_outbox ( + destination text NOT NULL, + stream_id bigint NOT NULL, + queued_ts bigint NOT NULL, + messages_json text NOT NULL +); + + + +CREATE TABLE device_inbox ( + user_id text NOT NULL, + device_id text NOT NULL, + stream_id bigint NOT NULL, + message_json text NOT NULL +); + + + +CREATE TABLE device_lists_outbound_last_success ( + destination text NOT NULL, + user_id text NOT NULL, + stream_id bigint NOT NULL +); + + + +CREATE TABLE device_lists_outbound_pokes ( + destination text NOT NULL, + stream_id bigint NOT NULL, + user_id text NOT NULL, + device_id text NOT NULL, + sent boolean NOT NULL, + ts bigint NOT NULL +); + + + +CREATE TABLE device_lists_remote_cache ( + user_id text NOT NULL, + device_id text NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE device_lists_remote_extremeties ( + user_id text NOT NULL, + stream_id text NOT NULL +); + + + +CREATE TABLE device_lists_stream ( + stream_id bigint NOT NULL, + user_id text NOT NULL, + device_id text NOT NULL +); + + + +CREATE TABLE device_max_stream_id ( + stream_id bigint NOT NULL +); + + + +CREATE TABLE devices ( + user_id text NOT NULL, + device_id text NOT NULL, + display_name text +); + + + +CREATE TABLE e2e_device_keys_json ( + user_id text NOT NULL, + device_id text NOT NULL, + ts_added_ms bigint NOT NULL, + key_json text NOT NULL +); + + + +CREATE TABLE e2e_one_time_keys_json ( + user_id text NOT NULL, + device_id text NOT NULL, + algorithm text NOT NULL, + key_id text NOT NULL, + ts_added_ms bigint NOT NULL, + key_json text NOT NULL +); + + + +CREATE TABLE e2e_room_keys ( + user_id text NOT NULL, + room_id text NOT NULL, + session_id text NOT NULL, + version bigint NOT NULL, + first_message_index integer, + forwarded_count integer, + is_verified boolean, + session_data text NOT NULL +); + + + +CREATE TABLE e2e_room_keys_versions ( + user_id text NOT NULL, + version bigint NOT NULL, + algorithm text NOT NULL, + auth_data text NOT NULL, + deleted smallint DEFAULT 0 NOT NULL +); + + + +CREATE TABLE erased_users ( + user_id text NOT NULL +); + + + +CREATE TABLE event_auth ( + event_id text NOT NULL, + auth_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE event_backward_extremities ( + event_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE event_edges ( + event_id text NOT NULL, + prev_event_id text NOT NULL, + room_id text NOT NULL, + is_state boolean NOT NULL +); + + + +CREATE TABLE event_forward_extremities ( + event_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE event_json ( + event_id text NOT NULL, + room_id text NOT NULL, + internal_metadata text NOT NULL, + json text NOT NULL, + format_version integer +); + + + +CREATE TABLE event_push_actions ( + room_id text NOT NULL, + event_id text NOT NULL, + user_id text NOT NULL, + profile_tag character varying(32), + actions text NOT NULL, + topological_ordering bigint, + stream_ordering bigint, + notif smallint, + highlight smallint +); + + + +CREATE TABLE event_push_actions_staging ( + event_id text NOT NULL, + user_id text NOT NULL, + actions text NOT NULL, + notif smallint NOT NULL, + highlight smallint NOT NULL +); + + + +CREATE TABLE event_push_summary ( + user_id text NOT NULL, + room_id text NOT NULL, + notif_count bigint NOT NULL, + stream_ordering bigint NOT NULL +); + + + +CREATE TABLE event_push_summary_stream_ordering ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_ordering bigint NOT NULL, + CONSTRAINT event_push_summary_stream_ordering_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE event_reference_hashes ( + event_id text, + algorithm text, + hash bytea +); + + + +CREATE TABLE event_relations ( + event_id text NOT NULL, + relates_to_id text NOT NULL, + relation_type text NOT NULL, + aggregation_key text +); + + + +CREATE TABLE event_reports ( + id bigint NOT NULL, + received_ts bigint NOT NULL, + room_id text NOT NULL, + event_id text NOT NULL, + user_id text NOT NULL, + reason text, + content text +); + + + +CREATE TABLE event_search ( + event_id text, + room_id text, + sender text, + key text, + vector tsvector, + origin_server_ts bigint, + stream_ordering bigint +); + + + +CREATE TABLE event_to_state_groups ( + event_id text NOT NULL, + state_group bigint NOT NULL +); + + + +CREATE TABLE events ( + stream_ordering integer NOT NULL, + topological_ordering bigint NOT NULL, + event_id text NOT NULL, + type text NOT NULL, + room_id text NOT NULL, + content text, + unrecognized_keys text, + processed boolean NOT NULL, + outlier boolean NOT NULL, + depth bigint DEFAULT 0 NOT NULL, + origin_server_ts bigint, + received_ts bigint, + sender text, + contains_url boolean +); + + + +CREATE TABLE ex_outlier_stream ( + event_stream_ordering bigint NOT NULL, + event_id text NOT NULL, + state_group bigint NOT NULL +); + + + +CREATE TABLE federation_stream_position ( + type text NOT NULL, + stream_id integer NOT NULL +); + + + +CREATE TABLE group_attestations_remote ( + group_id text NOT NULL, + user_id text NOT NULL, + valid_until_ms bigint NOT NULL, + attestation_json text NOT NULL +); + + + +CREATE TABLE group_attestations_renewals ( + group_id text NOT NULL, + user_id text NOT NULL, + valid_until_ms bigint NOT NULL +); + + + +CREATE TABLE group_invites ( + group_id text NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE group_roles ( + group_id text NOT NULL, + role_id text NOT NULL, + profile text NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_room_categories ( + group_id text NOT NULL, + category_id text NOT NULL, + profile text NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_rooms ( + group_id text NOT NULL, + room_id text NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_summary_roles ( + group_id text NOT NULL, + role_id text NOT NULL, + role_order bigint NOT NULL, + CONSTRAINT group_summary_roles_role_order_check CHECK ((role_order > 0)) +); + + + +CREATE TABLE group_summary_room_categories ( + group_id text NOT NULL, + category_id text NOT NULL, + cat_order bigint NOT NULL, + CONSTRAINT group_summary_room_categories_cat_order_check CHECK ((cat_order > 0)) +); + + + +CREATE TABLE group_summary_rooms ( + group_id text NOT NULL, + room_id text NOT NULL, + category_id text NOT NULL, + room_order bigint NOT NULL, + is_boolean NOT NULL, + CONSTRAINT group_summary_rooms_room_order_check CHECK ((room_order > 0)) +); + + + +CREATE TABLE group_summary_users ( + group_id text NOT NULL, + user_id text NOT NULL, + role_id text NOT NULL, + user_order bigint NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE group_users ( + group_id text NOT NULL, + user_id text NOT NULL, + is_admin boolean NOT NULL, + is_boolean NOT NULL +); + + + +CREATE TABLE groups ( + group_id text NOT NULL, + name text, + avatar_url text, + short_description text, + long_description text, + is_boolean NOT NULL, + join_policy text DEFAULT 'invite'::text NOT NULL +); + + + +CREATE TABLE guest_access ( + event_id text NOT NULL, + room_id text NOT NULL, + guest_access text NOT NULL +); + + + +CREATE TABLE history_visibility ( + event_id text NOT NULL, + room_id text NOT NULL, + history_visibility text NOT NULL +); + + + +CREATE TABLE local_group_membership ( + group_id text NOT NULL, + user_id text NOT NULL, + is_admin boolean NOT NULL, + membership text NOT NULL, + is_sed boolean NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE local_group_updates ( + stream_id bigint NOT NULL, + group_id text NOT NULL, + user_id text NOT NULL, + type text NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE local_invites ( + stream_id bigint NOT NULL, + inviter text NOT NULL, + invitee text NOT NULL, + event_id text NOT NULL, + room_id text NOT NULL, + locally_rejected text, + replaced_by text +); + + + +CREATE TABLE local_media_repository ( + media_id text, + media_type text, + media_length integer, + created_ts bigint, + upload_name text, + user_id text, + quarantined_by text, + url_cache text, + last_access_ts bigint +); + + + +CREATE TABLE local_media_repository_thumbnails ( + media_id text, + thumbnail_width integer, + thumbnail_height integer, + thumbnail_type text, + thumbnail_method text, + thumbnail_length integer +); + + + +CREATE TABLE local_media_repository_url_cache ( + url text, + response_code integer, + etag text, + expires_ts bigint, + og text, + media_id text, + download_ts bigint +); + + + +CREATE TABLE monthly_active_users ( + user_id text NOT NULL, + "timestamp" bigint NOT NULL +); + + + +CREATE TABLE open_id_tokens ( + token text NOT NULL, + ts_valid_until_ms bigint NOT NULL, + user_id text NOT NULL +); + + + +CREATE TABLE presence ( + user_id text NOT NULL, + state character varying(20), + status_msg text, + mtime bigint +); + + + +CREATE TABLE presence_allow_inbound ( + observed_user_id text NOT NULL, + observer_user_id text NOT NULL +); + + + +CREATE TABLE presence_stream ( + stream_id bigint, + user_id text, + state text, + last_active_ts bigint, + last_federation_update_ts bigint, + last_user_sync_ts bigint, + status_msg text, + currently_active boolean +); + + + +CREATE TABLE profiles ( + user_id text NOT NULL, + displayname text, + avatar_url text +); + + + +CREATE TABLE room_list_stream ( + stream_id bigint NOT NULL, + room_id text NOT NULL, + visibility boolean NOT NULL, + appservice_id text, + network_id text +); + + + +CREATE TABLE push_rules ( + id bigint NOT NULL, + user_name text NOT NULL, + rule_id text NOT NULL, + priority_class smallint NOT NULL, + priority integer DEFAULT 0 NOT NULL, + conditions text NOT NULL, + actions text NOT NULL +); + + + +CREATE TABLE push_rules_enable ( + id bigint NOT NULL, + user_name text NOT NULL, + rule_id text NOT NULL, + enabled smallint +); + + + +CREATE TABLE push_rules_stream ( + stream_id bigint NOT NULL, + event_stream_ordering bigint NOT NULL, + user_id text NOT NULL, + rule_id text NOT NULL, + op text NOT NULL, + priority_class smallint, + priority integer, + conditions text, + actions text +); + + + +CREATE TABLE pusher_throttle ( + pusher bigint NOT NULL, + room_id text NOT NULL, + last_sent_ts bigint, + throttle_ms bigint +); + + + +CREATE TABLE pushers ( + id bigint NOT NULL, + user_name text NOT NULL, + access_token bigint, + profile_tag text NOT NULL, + kind text NOT NULL, + app_id text NOT NULL, + app_display_name text NOT NULL, + device_display_name text NOT NULL, + pushkey text NOT NULL, + ts bigint NOT NULL, + lang text, + data text, + last_stream_ordering integer, + last_success bigint, + failing_since bigint +); + + + +CREATE TABLE ratelimit_override ( + user_id text NOT NULL, + messages_per_second bigint, + burst_count bigint +); + + + +CREATE TABLE receipts_graph ( + room_id text NOT NULL, + receipt_type text NOT NULL, + user_id text NOT NULL, + event_ids text NOT NULL, + data text NOT NULL +); + + + +CREATE TABLE receipts_linearized ( + stream_id bigint NOT NULL, + room_id text NOT NULL, + receipt_type text NOT NULL, + user_id text NOT NULL, + event_id text NOT NULL, + data text NOT NULL +); + + + +CREATE TABLE received_transactions ( + transaction_id text, + origin text, + ts bigint, + response_code integer, + response_json bytea, + has_been_referenced smallint DEFAULT 0 +); + + + +CREATE TABLE redactions ( + event_id text NOT NULL, + redacts text NOT NULL +); + + + +CREATE TABLE rejections ( + event_id text NOT NULL, + reason text NOT NULL, + last_check text NOT NULL +); + + + +CREATE TABLE remote_media_cache ( + media_origin text, + media_id text, + media_type text, + created_ts bigint, + upload_name text, + media_length integer, + filesystem_id text, + last_access_ts bigint, + quarantined_by text +); + + + +CREATE TABLE remote_media_cache_thumbnails ( + media_origin text, + media_id text, + thumbnail_width integer, + thumbnail_height integer, + thumbnail_method text, + thumbnail_type text, + thumbnail_length integer, + filesystem_id text +); + + + +CREATE TABLE remote_profile_cache ( + user_id text NOT NULL, + displayname text, + avatar_url text, + last_check bigint NOT NULL +); + + + +CREATE TABLE room_account_data ( + user_id text NOT NULL, + room_id text NOT NULL, + account_data_type text NOT NULL, + stream_id bigint NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE room_alias_servers ( + room_alias text NOT NULL, + server text NOT NULL +); + + + +CREATE TABLE room_aliases ( + room_alias text NOT NULL, + room_id text NOT NULL, + creator text +); + + + +CREATE TABLE room_depth ( + room_id text NOT NULL, + min_depth integer NOT NULL +); + + + +CREATE TABLE room_memberships ( + event_id text NOT NULL, + user_id text NOT NULL, + sender text NOT NULL, + room_id text NOT NULL, + membership text NOT NULL, + forgotten integer DEFAULT 0, + display_name text, + avatar_url text +); + + + +CREATE TABLE room_names ( + event_id text NOT NULL, + room_id text NOT NULL, + name text NOT NULL +); + + + +CREATE TABLE room_state ( + room_id text NOT NULL, + join_rules text, + history_visibility text, + encryption text, + name text, + topic text, + avatar text, + canonical_alias text +); + + + +CREATE TABLE room_stats ( + room_id text NOT NULL, + ts bigint NOT NULL, + bucket_size integer NOT NULL, + current_state_events integer NOT NULL, + joined_members integer NOT NULL, + invited_members integer NOT NULL, + left_members integer NOT NULL, + banned_members integer NOT NULL, + state_events integer NOT NULL +); + + + +CREATE TABLE room_stats_earliest_token ( + room_id text NOT NULL, + token bigint NOT NULL +); + + + +CREATE TABLE room_tags ( + user_id text NOT NULL, + room_id text NOT NULL, + tag text NOT NULL, + content text NOT NULL +); + + + +CREATE TABLE room_tags_revisions ( + user_id text NOT NULL, + room_id text NOT NULL, + stream_id bigint NOT NULL +); + + + +CREATE TABLE rooms ( + room_id text NOT NULL, + is_boolean, + creator text +); + + + +CREATE TABLE schema_version ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + version integer NOT NULL, + upgraded boolean NOT NULL, + CONSTRAINT schema_version_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE server_keys_json ( + server_name text NOT NULL, + key_id text NOT NULL, + from_server text NOT NULL, + ts_added_ms bigint NOT NULL, + ts_valid_until_ms bigint NOT NULL, + key_json bytea NOT NULL +); + + + +CREATE TABLE server_signature_keys ( + server_name text, + key_id text, + from_server text, + ts_added_ms bigint, + verify_key bytea, + ts_valid_until_ms bigint +); + + + +CREATE TABLE state_events ( + event_id text NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL, + prev_state text +); + + + +CREATE TABLE state_group_edges ( + state_group bigint NOT NULL, + prev_state_group bigint NOT NULL +); + + + +CREATE SEQUENCE state_group_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + + +CREATE TABLE state_groups ( + id bigint NOT NULL, + room_id text NOT NULL, + event_id text NOT NULL +); + + + +CREATE TABLE state_groups_state ( + state_group bigint NOT NULL, + room_id text NOT NULL, + type text NOT NULL, + state_key text NOT NULL, + event_id text NOT NULL +); + + + +CREATE TABLE stats_stream_pos ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_id bigint, + CONSTRAINT stats_stream_pos_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE stream_ordering_to_exterm ( + stream_ordering bigint NOT NULL, + room_id text NOT NULL, + event_id text NOT NULL +); + + + +CREATE TABLE threepid_guest_access_tokens ( + medium text, + address text, + guest_access_token text, + first_inviter text +); + + + +CREATE TABLE topics ( + event_id text NOT NULL, + room_id text NOT NULL, + topic text NOT NULL +); + + + +CREATE TABLE user_daily_visits ( + user_id text NOT NULL, + device_id text, + "timestamp" bigint NOT NULL +); + + + +CREATE TABLE user_directory ( + user_id text NOT NULL, + room_id text, + display_name text, + avatar_url text +); + + + +CREATE TABLE user_directory_search ( + user_id text NOT NULL, + vector tsvector +); + + + +CREATE TABLE user_directory_stream_pos ( + lock character(1) DEFAULT 'X'::bpchar NOT NULL, + stream_id bigint, + CONSTRAINT user_directory_stream_pos_lock_check CHECK ((lock = 'X'::bpchar)) +); + + + +CREATE TABLE user_filters ( + user_id text, + filter_id bigint, + filter_json bytea +); + + + +CREATE TABLE user_ips ( + user_id text NOT NULL, + access_token text NOT NULL, + device_id text, + ip text NOT NULL, + user_agent text NOT NULL, + last_seen bigint NOT NULL +); + + + +CREATE TABLE user_stats ( + user_id text NOT NULL, + ts bigint NOT NULL, + bucket_size integer NOT NULL, + rooms integer NOT NULL, + private_rooms integer NOT NULL +); + + + +CREATE TABLE user_threepid_id_server ( + user_id text NOT NULL, + medium text NOT NULL, + address text NOT NULL, + id_server text NOT NULL +); + + + +CREATE TABLE user_threepids ( + user_id text NOT NULL, + medium text NOT NULL, + address text NOT NULL, + validated_at bigint NOT NULL, + added_at bigint NOT NULL +); + + + +CREATE TABLE users ( + name text, + password_hash text, + creation_ts bigint, + admin smallint DEFAULT 0 NOT NULL, + upgrade_ts bigint, + is_guest smallint DEFAULT 0 NOT NULL, + appservice_id text, + consent_version text, + consent_server_notice_sent text, + user_type text +); + + + +CREATE TABLE users_in_rooms ( + user_id text NOT NULL, + room_id text NOT NULL +); + + + +CREATE TABLE users_pending_deactivation ( + user_id text NOT NULL +); + + + +CREATE TABLE users_who_share_private_rooms ( + user_id text NOT NULL, + other_user_id text NOT NULL, + room_id text NOT NULL +); + + + +ALTER TABLE ONLY access_tokens + ADD CONSTRAINT access_tokens_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY access_tokens + ADD CONSTRAINT access_tokens_token_key UNIQUE (token); + + + +ALTER TABLE ONLY account_data + ADD CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type); + + + +ALTER TABLE ONLY account_validity + ADD CONSTRAINT account_validity_pkey PRIMARY KEY (user_id); + + + +ALTER TABLE ONLY application_services_state + ADD CONSTRAINT application_services_state_pkey PRIMARY KEY (as_id); + + + +ALTER TABLE ONLY application_services_txns + ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id); + + + +ALTER TABLE ONLY applied_module_schemas + ADD CONSTRAINT applied_module_schemas_module_name_file_key UNIQUE (module_name, file); + + + +ALTER TABLE ONLY applied_schema_deltas + ADD CONSTRAINT applied_schema_deltas_version_file_key UNIQUE (version, file); + + + +ALTER TABLE ONLY appservice_stream_position + ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY background_updates + ADD CONSTRAINT background_updates_uniqueness UNIQUE (update_name); + + + +ALTER TABLE ONLY current_state_events + ADD CONSTRAINT current_state_events_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY current_state_events + ADD CONSTRAINT current_state_events_room_id_type_state_key_key UNIQUE (room_id, type, state_key); + + + +ALTER TABLE ONLY destinations + ADD CONSTRAINT destinations_pkey PRIMARY KEY (destination); + + + +ALTER TABLE ONLY devices + ADD CONSTRAINT device_uniqueness UNIQUE (user_id, device_id); + + + +ALTER TABLE ONLY e2e_device_keys_json + ADD CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id); + + + +ALTER TABLE ONLY e2e_one_time_keys_json + ADD CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id); + + + +ALTER TABLE ONLY event_backward_extremities + ADD CONSTRAINT event_backward_extremities_event_id_room_id_key UNIQUE (event_id, room_id); + + + +ALTER TABLE ONLY event_edges + ADD CONSTRAINT event_edges_event_id_prev_event_id_room_id_is_state_key UNIQUE (event_id, prev_event_id, room_id, is_state); + + + +ALTER TABLE ONLY event_forward_extremities + ADD CONSTRAINT event_forward_extremities_event_id_room_id_key UNIQUE (event_id, room_id); + + + +ALTER TABLE ONLY event_push_actions + ADD CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag); + + + +ALTER TABLE ONLY event_json + ADD CONSTRAINT event_json_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY event_push_summary_stream_ordering + ADD CONSTRAINT event_push_summary_stream_ordering_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY event_reference_hashes + ADD CONSTRAINT event_reference_hashes_event_id_algorithm_key UNIQUE (event_id, algorithm); + + + +ALTER TABLE ONLY event_reports + ADD CONSTRAINT event_reports_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY event_to_state_groups + ADD CONSTRAINT event_to_state_groups_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY events + ADD CONSTRAINT events_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY events + ADD CONSTRAINT events_pkey PRIMARY KEY (stream_ordering); + + + +ALTER TABLE ONLY ex_outlier_stream + ADD CONSTRAINT ex_outlier_stream_pkey PRIMARY KEY (event_stream_ordering); + + + +ALTER TABLE ONLY group_roles + ADD CONSTRAINT group_roles_group_id_role_id_key UNIQUE (group_id, role_id); + + + +ALTER TABLE ONLY group_room_categories + ADD CONSTRAINT group_room_categories_group_id_category_id_key UNIQUE (group_id, category_id); + + + +ALTER TABLE ONLY group_summary_roles + ADD CONSTRAINT group_summary_roles_group_id_role_id_role_order_key UNIQUE (group_id, role_id, role_order); + + + +ALTER TABLE ONLY group_summary_room_categories + ADD CONSTRAINT group_summary_room_categories_group_id_category_id_cat_orde_key UNIQUE (group_id, category_id, cat_order); + + + +ALTER TABLE ONLY group_summary_rooms + ADD CONSTRAINT group_summary_rooms_group_id_category_id_room_id_room_order_key UNIQUE (group_id, category_id, room_id, room_order); + + + +ALTER TABLE ONLY guest_access + ADD CONSTRAINT guest_access_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY history_visibility + ADD CONSTRAINT history_visibility_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY local_media_repository + ADD CONSTRAINT local_media_repository_media_id_key UNIQUE (media_id); + + + +ALTER TABLE ONLY local_media_repository_thumbnails + ADD CONSTRAINT local_media_repository_thumbn_media_id_thumbnail_width_thum_key UNIQUE (media_id, thumbnail_width, thumbnail_height, thumbnail_type); + + + +ALTER TABLE ONLY user_threepids + ADD CONSTRAINT medium_address UNIQUE (medium, address); + + + +ALTER TABLE ONLY open_id_tokens + ADD CONSTRAINT open_id_tokens_pkey PRIMARY KEY (token); + + + +ALTER TABLE ONLY presence_allow_inbound + ADD CONSTRAINT presence_allow_inbound_observed_user_id_observer_user_id_key UNIQUE (observed_user_id, observer_user_id); + + + +ALTER TABLE ONLY presence + ADD CONSTRAINT presence_user_id_key UNIQUE (user_id); + + + +ALTER TABLE ONLY account_data_max_stream_id + ADD CONSTRAINT private_user_data_max_stream_id_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY profiles + ADD CONSTRAINT profiles_user_id_key UNIQUE (user_id); + + + +ALTER TABLE ONLY push_rules_enable + ADD CONSTRAINT push_rules_enable_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY push_rules_enable + ADD CONSTRAINT push_rules_enable_user_name_rule_id_key UNIQUE (user_name, rule_id); + + + +ALTER TABLE ONLY push_rules + ADD CONSTRAINT push_rules_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY push_rules + ADD CONSTRAINT push_rules_user_name_rule_id_key UNIQUE (user_name, rule_id); + + + +ALTER TABLE ONLY pusher_throttle + ADD CONSTRAINT pusher_throttle_pkey PRIMARY KEY (pusher, room_id); + + + +ALTER TABLE ONLY pushers + ADD CONSTRAINT pushers2_app_id_pushkey_user_name_key UNIQUE (app_id, pushkey, user_name); + + + +ALTER TABLE ONLY pushers + ADD CONSTRAINT pushers2_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY receipts_graph + ADD CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id); + + + +ALTER TABLE ONLY receipts_linearized + ADD CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id); + + + +ALTER TABLE ONLY received_transactions + ADD CONSTRAINT received_transactions_transaction_id_origin_key UNIQUE (transaction_id, origin); + + + +ALTER TABLE ONLY redactions + ADD CONSTRAINT redactions_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY rejections + ADD CONSTRAINT rejections_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY remote_media_cache + ADD CONSTRAINT remote_media_cache_media_origin_media_id_key UNIQUE (media_origin, media_id); + + + +ALTER TABLE ONLY remote_media_cache_thumbnails + ADD CONSTRAINT remote_media_cache_thumbnails_media_origin_media_id_thumbna_key UNIQUE (media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type); + + + +ALTER TABLE ONLY room_account_data + ADD CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type); + + + +ALTER TABLE ONLY room_aliases + ADD CONSTRAINT room_aliases_room_alias_key UNIQUE (room_alias); + + + +ALTER TABLE ONLY room_depth + ADD CONSTRAINT room_depth_room_id_key UNIQUE (room_id); + + + +ALTER TABLE ONLY room_memberships + ADD CONSTRAINT room_memberships_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY room_names + ADD CONSTRAINT room_names_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY room_tags_revisions + ADD CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id); + + + +ALTER TABLE ONLY room_tags + ADD CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag); + + + +ALTER TABLE ONLY rooms + ADD CONSTRAINT rooms_pkey PRIMARY KEY (room_id); + + + +ALTER TABLE ONLY schema_version + ADD CONSTRAINT schema_version_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY server_keys_json + ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server); + + + +ALTER TABLE ONLY server_signature_keys + ADD CONSTRAINT server_signature_keys_server_name_key_id_key UNIQUE (server_name, key_id); + + + +ALTER TABLE ONLY state_events + ADD CONSTRAINT state_events_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY state_groups + ADD CONSTRAINT state_groups_pkey PRIMARY KEY (id); + + + +ALTER TABLE ONLY stats_stream_pos + ADD CONSTRAINT stats_stream_pos_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY topics + ADD CONSTRAINT topics_event_id_key UNIQUE (event_id); + + + +ALTER TABLE ONLY user_directory_stream_pos + ADD CONSTRAINT user_directory_stream_pos_lock_key UNIQUE (lock); + + + +ALTER TABLE ONLY users + ADD CONSTRAINT users_name_key UNIQUE (name); + + + +CREATE INDEX _extremities_to_check_id ON _extremities_to_check USING btree (event_id); + + + +CREATE INDEX account_data_stream_id ON account_data USING btree (user_id, stream_id); + + + +CREATE INDEX application_services_txns_id ON application_services_txns USING btree (as_id); + + + +CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list USING btree (appservice_id, network_id, room_id); + + + +CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms USING btree (room_id); + + + +CREATE INDEX cache_invalidation_stream_id ON cache_invalidation_stream USING btree (stream_id); + + + +CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING btree (stream_id); + + + +CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id); + + + +CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox USING btree (origin, message_id); + + + +CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox USING btree (destination, stream_id); + + + +CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree (stream_id); + + + +CREATE INDEX device_inbox_stream_id ON device_inbox USING btree (stream_id); + + + +CREATE INDEX device_inbox_user_stream_id ON device_inbox USING btree (user_id, device_id, stream_id); + + + +CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success USING btree (destination, user_id, stream_id); + + + +CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes USING btree (destination, stream_id); + + + +CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes USING btree (stream_id); + + + +CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USING btree (destination, user_id); + + + +CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id); + + + +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys USING btree (user_id, room_id, session_id); + + + +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions USING btree (user_id, version); + + + +CREATE UNIQUE INDEX erased_users_user ON erased_users USING btree (user_id); + + + +CREATE INDEX ev_b_extrem_id ON event_backward_extremities USING btree (event_id); + + + +CREATE INDEX ev_b_extrem_room ON event_backward_extremities USING btree (room_id); + + + +CREATE INDEX ev_edges_id ON event_edges USING btree (event_id); + + + +CREATE INDEX ev_edges_prev_id ON event_edges USING btree (prev_event_id); + + + +CREATE INDEX ev_extrem_id ON event_forward_extremities USING btree (event_id); + + + +CREATE INDEX ev_extrem_room ON event_forward_extremities USING btree (room_id); + + + +CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id); + + + +CREATE INDEX event_json_room_id ON event_json USING btree (room_id); + + + +CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering); + + + +CREATE INDEX event_push_actions_room_id_user_id ON event_push_actions USING btree (room_id, user_id); + + + +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging USING btree (event_id); + + + +CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btree (stream_ordering, user_id); + + + +CREATE INDEX event_push_summary_user_rm ON event_push_summary USING btree (user_id, room_id); + + + +CREATE INDEX event_reference_hashes_id ON event_reference_hashes USING btree (event_id); + + + +CREATE UNIQUE INDEX event_relations_id ON event_relations USING btree (event_id); + + + +CREATE INDEX event_relations_relates ON event_relations USING btree (relates_to_id, relation_type, aggregation_key); + + + +CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id); + + + +CREATE INDEX event_search_fts_idx ON event_search USING gin (vector); + + + +CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering); + + + +CREATE INDEX events_room_stream ON events USING btree (room_id, stream_ordering); + + + +CREATE INDEX events_ts ON events USING btree (origin_server_ts, stream_ordering); + + + +CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote USING btree (group_id, user_id); + + + +CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote USING btree (user_id); + + + +CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote USING btree (valid_until_ms); + + + +CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals USING btree (group_id, user_id); + + + +CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals USING btree (user_id); + + + +CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals USING btree (valid_until_ms); + + + +CREATE UNIQUE INDEX group_invites_g_idx ON group_invites USING btree (group_id, user_id); + + + +CREATE INDEX group_invites_u_idx ON group_invites USING btree (user_id); + + + +CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms USING btree (group_id, room_id); + + + +CREATE INDEX group_rooms_r_idx ON group_rooms USING btree (room_id); + + + +CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms USING btree (group_id, room_id, category_id); + + + +CREATE INDEX group_summary_users_g_idx ON group_summary_users USING btree (group_id); + + + +CREATE UNIQUE INDEX group_users_g_idx ON group_users USING btree (group_id, user_id); + + + +CREATE INDEX group_users_u_idx ON group_users USING btree (user_id); + + + +CREATE UNIQUE INDEX groups_idx ON groups USING btree (group_id); + + + +CREATE INDEX local_group_membership_g_idx ON local_group_membership USING btree (group_id); + + + +CREATE INDEX local_group_membership_u_idx ON local_group_membership USING btree (user_id, group_id); + + + +CREATE INDEX local_invites_for_user_idx ON local_invites USING btree (invitee, locally_rejected, replaced_by, room_id); + + + +CREATE INDEX local_invites_id ON local_invites USING btree (stream_id); + + + +CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails USING btree (media_id); + + + +CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache USING btree (url, download_ts); + + + +CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache USING btree (expires_ts); + + + +CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache USING btree (media_id); + + + +CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp"); + + + +CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users USING btree (user_id); + + + +CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens USING btree (ts_valid_until_ms); + + + +CREATE INDEX presence_stream_id ON presence_stream USING btree (stream_id, user_id); + + + +CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id); + + + +CREATE INDEX room_index ON rooms USING btree (is_; + + + +CREATE INDEX room_list_stream_idx ON room_list_stream USING btree (stream_id); + + + +CREATE INDEX room_list_stream_rm_idx ON room_list_stream USING btree (room_id, stream_id); + + + +CREATE INDEX push_rules_enable_user_name ON push_rules_enable USING btree (user_name); + + + +CREATE INDEX push_rules_stream_id ON push_rules_stream USING btree (stream_id); + + + +CREATE INDEX push_rules_stream_user_stream_id ON push_rules_stream USING btree (user_id, stream_id); + + + +CREATE INDEX push_rules_user_name ON push_rules USING btree (user_name); + + + +CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override USING btree (user_id); + + + +CREATE INDEX receipts_linearized_id ON receipts_linearized USING btree (stream_id); + + + +CREATE INDEX receipts_linearized_room_stream ON receipts_linearized USING btree (room_id, stream_id); + + + +CREATE INDEX receipts_linearized_user ON receipts_linearized USING btree (user_id); + + + +CREATE INDEX received_transactions_ts ON received_transactions USING btree (ts); + + + +CREATE INDEX redactions_redacts ON redactions USING btree (redacts); + + + +CREATE INDEX remote_profile_cache_time ON remote_profile_cache USING btree (last_check); + + + +CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache USING btree (user_id); + + + +CREATE INDEX room_account_data_stream_id ON room_account_data USING btree (user_id, stream_id); + + + +CREATE INDEX room_alias_servers_alias ON room_alias_servers USING btree (room_alias); + + + +CREATE INDEX room_aliases_id ON room_aliases USING btree (room_id); + + + +CREATE INDEX room_depth_room ON room_depth USING btree (room_id); + + + +CREATE INDEX room_memberships_room_id ON room_memberships USING btree (room_id); + + + +CREATE INDEX room_memberships_user_id ON room_memberships USING btree (user_id); + + + +CREATE INDEX room_names_room_id ON room_names USING btree (room_id); + + + +CREATE UNIQUE INDEX room_state_room ON room_state USING btree (room_id); + + + +CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token USING btree (room_id); + + + +CREATE UNIQUE INDEX room_stats_room_ts ON room_stats USING btree (room_id, ts); + + + +CREATE INDEX state_group_edges_idx ON state_group_edges USING btree (state_group); + + + +CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_state_group); + + + +CREATE INDEX state_groups_state_id ON state_groups_state USING btree (state_group); + + + +CREATE INDEX stream_ordering_to_exterm_idx ON stream_ordering_to_exterm USING btree (stream_ordering); + + + +CREATE INDEX stream_ordering_to_exterm_rm_idx ON stream_ordering_to_exterm USING btree (room_id, stream_ordering); + + + +CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens USING btree (medium, address); + + + +CREATE INDEX topics_room_id ON topics USING btree (room_id); + + + +CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits USING btree ("timestamp"); + + + +CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits USING btree (user_id, "timestamp"); + + + +CREATE INDEX user_directory_room_idx ON user_directory USING btree (room_id); + + + +CREATE INDEX user_directory_search_fts_idx ON user_directory_search USING gin (vector); + + + +CREATE UNIQUE INDEX user_directory_search_user_idx ON user_directory_search USING btree (user_id); + + + +CREATE UNIQUE INDEX user_directory_user_idx ON user_directory USING btree (user_id); + + + +CREATE INDEX user_filters_by_user_id_filter_id ON user_filters USING btree (user_id, filter_id); + + + +CREATE INDEX user_ips_user_ip ON user_ips USING btree (user_id, access_token, ip); + + + +CREATE UNIQUE INDEX user_stats_user_ts ON user_stats USING btree (user_id, ts); + + + +CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server USING btree (user_id, medium, address, id_server); + + + +CREATE INDEX user_threepids_medium_address ON user_threepids USING btree (medium, address); + + + +CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id); + + + +CREATE UNIQUE INDEX users_in_rooms_u_idx ON users_in_rooms USING btree (user_id, room_id); + + + +CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms USING btree (other_user_id); + + + +CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms USING btree (room_id); + + + +CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms USING btree (user_id, other_user_id, room_id); + + + diff --git a/synapse/storage/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/full_schemas/54/full.sql.sqlite new file mode 100644 index 0000000000..0b60a6c789 --- /dev/null +++ b/synapse/storage/schema/full_schemas/54/full.sql.sqlite @@ -0,0 +1,261 @@ +CREATE TABLE application_services_state( as_id TEXT PRIMARY KEY, state VARCHAR(5), last_txn INTEGER ); +CREATE TABLE application_services_txns( as_id TEXT NOT NULL, txn_id INTEGER NOT NULL, event_ids TEXT NOT NULL, UNIQUE(as_id, txn_id) ); +CREATE INDEX application_services_txns_id ON application_services_txns ( as_id ); +CREATE TABLE presence( user_id TEXT NOT NULL, state VARCHAR(20), status_msg TEXT, mtime BIGINT, UNIQUE (user_id) ); +CREATE TABLE presence_allow_inbound( observed_user_id TEXT NOT NULL, observer_user_id TEXT NOT NULL, UNIQUE (observed_user_id, observer_user_id) ); +CREATE TABLE users( name TEXT, password_hash TEXT, creation_ts BIGINT, admin SMALLINT DEFAULT 0 NOT NULL, upgrade_ts BIGINT, is_guest SMALLINT DEFAULT 0 NOT NULL, appservice_id TEXT, consent_version TEXT, consent_server_notice_sent TEXT, user_type TEXT DEFAULT NULL, UNIQUE(name) ); +CREATE TABLE access_tokens( id BIGINT PRIMARY KEY, user_id TEXT NOT NULL, device_id TEXT, token TEXT NOT NULL, last_used BIGINT, UNIQUE(token) ); +CREATE TABLE user_ips ( user_id TEXT NOT NULL, access_token TEXT NOT NULL, device_id TEXT, ip TEXT NOT NULL, user_agent TEXT NOT NULL, last_seen BIGINT NOT NULL ); +CREATE TABLE profiles( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, UNIQUE(user_id) ); +CREATE TABLE received_transactions( transaction_id TEXT, origin TEXT, ts BIGINT, response_code INTEGER, response_json bytea, has_been_referenced smallint default 0, UNIQUE (transaction_id, origin) ); +CREATE TABLE destinations( destination TEXT PRIMARY KEY, retry_last_ts BIGINT, retry_interval INTEGER ); +CREATE TABLE events( stream_ordering INTEGER PRIMARY KEY, topological_ordering BIGINT NOT NULL, event_id TEXT NOT NULL, type TEXT NOT NULL, room_id TEXT NOT NULL, content TEXT, unrecognized_keys TEXT, processed BOOL NOT NULL, outlier BOOL NOT NULL, depth BIGINT DEFAULT 0 NOT NULL, origin_server_ts BIGINT, received_ts BIGINT, sender TEXT, contains_url BOOLEAN, UNIQUE (event_id) ); +CREATE INDEX events_order_room ON events ( room_id, topological_ordering, stream_ordering ); +CREATE TABLE event_json( event_id TEXT NOT NULL, room_id TEXT NOT NULL, internal_metadata TEXT NOT NULL, json TEXT NOT NULL, format_version INTEGER, UNIQUE (event_id) ); +CREATE INDEX event_json_room_id ON event_json(room_id); +CREATE TABLE state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, prev_state TEXT, UNIQUE (event_id) ); +CREATE TABLE current_state_events( event_id TEXT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, UNIQUE (event_id), UNIQUE (room_id, type, state_key) ); +CREATE TABLE room_memberships( event_id TEXT NOT NULL, user_id TEXT NOT NULL, sender TEXT NOT NULL, room_id TEXT NOT NULL, membership TEXT NOT NULL, forgotten INTEGER DEFAULT 0, display_name TEXT, avatar_url TEXT, UNIQUE (event_id) ); +CREATE INDEX room_memberships_room_id ON room_memberships (room_id); +CREATE INDEX room_memberships_user_id ON room_memberships (user_id); +CREATE TABLE topics( event_id TEXT NOT NULL, room_id TEXT NOT NULL, topic TEXT NOT NULL, UNIQUE (event_id) ); +CREATE INDEX topics_room_id ON topics(room_id); +CREATE TABLE room_names( event_id TEXT NOT NULL, room_id TEXT NOT NULL, name TEXT NOT NULL, UNIQUE (event_id) ); +CREATE INDEX room_names_room_id ON room_names(room_id); +CREATE TABLE rooms( room_id TEXT PRIMARY KEY NOT NULL, is_public BOOL, creator TEXT ); +CREATE TABLE server_signature_keys( server_name TEXT, key_id TEXT, from_server TEXT, ts_added_ms BIGINT, verify_key bytea, ts_valid_until_ms BIGINT, UNIQUE (server_name, key_id) ); +CREATE TABLE rejections( event_id TEXT NOT NULL, reason TEXT NOT NULL, last_check TEXT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE push_rules ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, priority_class SMALLINT NOT NULL, priority INTEGER NOT NULL DEFAULT 0, conditions TEXT NOT NULL, actions TEXT NOT NULL, UNIQUE(user_name, rule_id) ); +CREATE INDEX push_rules_user_name on push_rules (user_name); +CREATE TABLE user_filters( user_id TEXT, filter_id BIGINT, filter_json bytea ); +CREATE INDEX user_filters_by_user_id_filter_id ON user_filters( user_id, filter_id ); +CREATE TABLE push_rules_enable ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, rule_id TEXT NOT NULL, enabled SMALLINT, UNIQUE(user_name, rule_id) ); +CREATE INDEX push_rules_enable_user_name on push_rules_enable (user_name); +CREATE TABLE event_forward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) ); +CREATE INDEX ev_extrem_room ON event_forward_extremities(room_id); +CREATE INDEX ev_extrem_id ON event_forward_extremities(event_id); +CREATE TABLE event_backward_extremities( event_id TEXT NOT NULL, room_id TEXT NOT NULL, UNIQUE (event_id, room_id) ); +CREATE INDEX ev_b_extrem_room ON event_backward_extremities(room_id); +CREATE INDEX ev_b_extrem_id ON event_backward_extremities(event_id); +CREATE TABLE event_edges( event_id TEXT NOT NULL, prev_event_id TEXT NOT NULL, room_id TEXT NOT NULL, is_state BOOL NOT NULL, UNIQUE (event_id, prev_event_id, room_id, is_state) ); +CREATE INDEX ev_edges_id ON event_edges(event_id); +CREATE INDEX ev_edges_prev_id ON event_edges(prev_event_id); +CREATE TABLE room_depth( room_id TEXT NOT NULL, min_depth INTEGER NOT NULL, UNIQUE (room_id) ); +CREATE INDEX room_depth_room ON room_depth(room_id); +CREATE TABLE state_groups( id BIGINT PRIMARY KEY, room_id TEXT NOT NULL, event_id TEXT NOT NULL ); +CREATE TABLE state_groups_state( state_group BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT NOT NULL ); +CREATE TABLE event_to_state_groups( event_id TEXT NOT NULL, state_group BIGINT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE local_media_repository ( media_id TEXT, media_type TEXT, media_length INTEGER, created_ts BIGINT, upload_name TEXT, user_id TEXT, quarantined_by TEXT, url_cache TEXT, last_access_ts BIGINT, UNIQUE (media_id) ); +CREATE TABLE local_media_repository_thumbnails ( media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_type TEXT, thumbnail_method TEXT, thumbnail_length INTEGER, UNIQUE ( media_id, thumbnail_width, thumbnail_height, thumbnail_type ) ); +CREATE INDEX local_media_repository_thumbnails_media_id ON local_media_repository_thumbnails (media_id); +CREATE TABLE remote_media_cache ( media_origin TEXT, media_id TEXT, media_type TEXT, created_ts BIGINT, upload_name TEXT, media_length INTEGER, filesystem_id TEXT, last_access_ts BIGINT, quarantined_by TEXT, UNIQUE (media_origin, media_id) ); +CREATE TABLE remote_media_cache_thumbnails ( media_origin TEXT, media_id TEXT, thumbnail_width INTEGER, thumbnail_height INTEGER, thumbnail_method TEXT, thumbnail_type TEXT, thumbnail_length INTEGER, filesystem_id TEXT, UNIQUE ( media_origin, media_id, thumbnail_width, thumbnail_height, thumbnail_type ) ); +CREATE TABLE redactions ( event_id TEXT NOT NULL, redacts TEXT NOT NULL, UNIQUE (event_id) ); +CREATE INDEX redactions_redacts ON redactions (redacts); +CREATE TABLE room_aliases( room_alias TEXT NOT NULL, room_id TEXT NOT NULL, creator TEXT, UNIQUE (room_alias) ); +CREATE INDEX room_aliases_id ON room_aliases(room_id); +CREATE TABLE room_alias_servers( room_alias TEXT NOT NULL, server TEXT NOT NULL ); +CREATE INDEX room_alias_servers_alias ON room_alias_servers(room_alias); +CREATE TABLE event_reference_hashes ( event_id TEXT, algorithm TEXT, hash bytea, UNIQUE (event_id, algorithm) ); +CREATE INDEX event_reference_hashes_id ON event_reference_hashes(event_id); +CREATE TABLE IF NOT EXISTS "server_keys_json" ( server_name TEXT NOT NULL, key_id TEXT NOT NULL, from_server TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, ts_valid_until_ms BIGINT NOT NULL, key_json bytea NOT NULL, CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server) ); +CREATE TABLE e2e_device_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_device_keys_json_uniqueness UNIQUE (user_id, device_id) ); +CREATE TABLE e2e_one_time_keys_json ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, algorithm TEXT NOT NULL, key_id TEXT NOT NULL, ts_added_ms BIGINT NOT NULL, key_json TEXT NOT NULL, CONSTRAINT e2e_one_time_keys_json_uniqueness UNIQUE (user_id, device_id, algorithm, key_id) ); +CREATE TABLE receipts_graph( room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_ids TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_graph_uniqueness UNIQUE (room_id, receipt_type, user_id) ); +CREATE TABLE receipts_linearized ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, receipt_type TEXT NOT NULL, user_id TEXT NOT NULL, event_id TEXT NOT NULL, data TEXT NOT NULL, CONSTRAINT receipts_linearized_uniqueness UNIQUE (room_id, receipt_type, user_id) ); +CREATE INDEX receipts_linearized_id ON receipts_linearized( stream_id ); +CREATE INDEX receipts_linearized_room_stream ON receipts_linearized( room_id, stream_id ); +CREATE TABLE IF NOT EXISTS "user_threepids" ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, validated_at BIGINT NOT NULL, added_at BIGINT NOT NULL, CONSTRAINT medium_address UNIQUE (medium, address) ); +CREATE INDEX user_threepids_user_id ON user_threepids(user_id); +CREATE TABLE background_updates( update_name TEXT NOT NULL, progress_json TEXT NOT NULL, depends_on TEXT, CONSTRAINT background_updates_uniqueness UNIQUE (update_name) ); +CREATE VIRTUAL TABLE event_search USING fts4 ( event_id, room_id, sender, key, value ) +/* event_search(event_id,room_id,sender,"key",value) */; +CREATE TABLE IF NOT EXISTS 'event_search_content'(docid INTEGER PRIMARY KEY, 'c0event_id', 'c1room_id', 'c2sender', 'c3key', 'c4value'); +CREATE TABLE IF NOT EXISTS 'event_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB); +CREATE TABLE IF NOT EXISTS 'event_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx)); +CREATE TABLE IF NOT EXISTS 'event_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB); +CREATE TABLE IF NOT EXISTS 'event_search_stat'(id INTEGER PRIMARY KEY, value BLOB); +CREATE TABLE guest_access( event_id TEXT NOT NULL, room_id TEXT NOT NULL, guest_access TEXT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE history_visibility( event_id TEXT NOT NULL, room_id TEXT NOT NULL, history_visibility TEXT NOT NULL, UNIQUE (event_id) ); +CREATE TABLE room_tags( user_id TEXT NOT NULL, room_id TEXT NOT NULL, tag TEXT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_tag_uniqueness UNIQUE (user_id, room_id, tag) ); +CREATE TABLE room_tags_revisions ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, stream_id BIGINT NOT NULL, CONSTRAINT room_tag_revisions_uniqueness UNIQUE (user_id, room_id) ); +CREATE TABLE IF NOT EXISTS "account_data_max_stream_id"( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT NOT NULL, CHECK (Lock='X') ); +CREATE TABLE account_data( user_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT account_data_uniqueness UNIQUE (user_id, account_data_type) ); +CREATE TABLE room_account_data( user_id TEXT NOT NULL, room_id TEXT NOT NULL, account_data_type TEXT NOT NULL, stream_id BIGINT NOT NULL, content TEXT NOT NULL, CONSTRAINT room_account_data_uniqueness UNIQUE (user_id, room_id, account_data_type) ); +CREATE INDEX account_data_stream_id on account_data(user_id, stream_id); +CREATE INDEX room_account_data_stream_id on room_account_data(user_id, stream_id); +CREATE INDEX events_ts ON events(origin_server_ts, stream_ordering); +CREATE TABLE event_push_actions( room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, profile_tag VARCHAR(32), actions TEXT NOT NULL, topological_ordering BIGINT, stream_ordering BIGINT, notif SMALLINT, highlight SMALLINT, CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag) ); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); +CREATE INDEX events_room_stream on events(room_id, stream_ordering); +CREATE INDEX public_room_index on rooms(is_public); +CREATE INDEX receipts_linearized_user ON receipts_linearized( user_id ); +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); +CREATE TABLE presence_stream( stream_id BIGINT, user_id TEXT, state TEXT, last_active_ts BIGINT, last_federation_update_ts BIGINT, last_user_sync_ts BIGINT, status_msg TEXT, currently_active BOOLEAN ); +CREATE INDEX presence_stream_id ON presence_stream(stream_id, user_id); +CREATE INDEX presence_stream_user_id ON presence_stream(user_id); +CREATE TABLE push_rules_stream( stream_id BIGINT NOT NULL, event_stream_ordering BIGINT NOT NULL, user_id TEXT NOT NULL, rule_id TEXT NOT NULL, op TEXT NOT NULL, priority_class SMALLINT, priority INTEGER, conditions TEXT, actions TEXT ); +CREATE INDEX push_rules_stream_id ON push_rules_stream(stream_id); +CREATE INDEX push_rules_stream_user_stream_id on push_rules_stream(user_id, stream_id); +CREATE TABLE ex_outlier_stream( event_stream_ordering BIGINT PRIMARY KEY NOT NULL, event_id TEXT NOT NULL, state_group BIGINT NOT NULL ); +CREATE TABLE threepid_guest_access_tokens( medium TEXT, address TEXT, guest_access_token TEXT, first_inviter TEXT ); +CREATE UNIQUE INDEX threepid_guest_access_tokens_index ON threepid_guest_access_tokens(medium, address); +CREATE TABLE local_invites( stream_id BIGINT NOT NULL, inviter TEXT NOT NULL, invitee TEXT NOT NULL, event_id TEXT NOT NULL, room_id TEXT NOT NULL, locally_rejected TEXT, replaced_by TEXT ); +CREATE INDEX local_invites_id ON local_invites(stream_id); +CREATE INDEX local_invites_for_user_idx ON local_invites(invitee, locally_rejected, replaced_by, room_id); +CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); +CREATE TABLE open_id_tokens ( token TEXT NOT NULL PRIMARY KEY, ts_valid_until_ms bigint NOT NULL, user_id TEXT NOT NULL, UNIQUE (token) ); +CREATE INDEX open_id_tokens_ts_valid_until_ms ON open_id_tokens(ts_valid_until_ms); +CREATE TABLE pusher_throttle( pusher BIGINT NOT NULL, room_id TEXT NOT NULL, last_sent_ts BIGINT, throttle_ms BIGINT, PRIMARY KEY (pusher, room_id) ); +CREATE TABLE event_reports( id BIGINT NOT NULL PRIMARY KEY, received_ts BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL, user_id TEXT NOT NULL, reason TEXT, content TEXT ); +CREATE TABLE devices ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, display_name TEXT, CONSTRAINT device_uniqueness UNIQUE (user_id, device_id) ); +CREATE TABLE appservice_stream_position( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT, CHECK (Lock='X') ); +CREATE TABLE device_inbox ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, stream_id BIGINT NOT NULL, message_json TEXT NOT NULL ); +CREATE INDEX device_inbox_user_stream_id ON device_inbox(user_id, device_id, stream_id); +CREATE INDEX received_transactions_ts ON received_transactions(ts); +CREATE TABLE device_federation_outbox ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, queued_ts BIGINT NOT NULL, messages_json TEXT NOT NULL ); +CREATE INDEX device_federation_outbox_destination_id ON device_federation_outbox(destination, stream_id); +CREATE TABLE device_federation_inbox ( origin TEXT NOT NULL, message_id TEXT NOT NULL, received_ts BIGINT NOT NULL ); +CREATE INDEX device_federation_inbox_sender_id ON device_federation_inbox(origin, message_id); +CREATE TABLE device_max_stream_id ( stream_id BIGINT NOT NULL ); +CREATE TABLE public_room_list_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, visibility BOOLEAN NOT NULL , appservice_id TEXT, network_id TEXT); +CREATE INDEX public_room_list_stream_idx on public_room_list_stream( stream_id ); +CREATE INDEX public_room_list_stream_rm_idx on public_room_list_stream( room_id, stream_id ); +CREATE TABLE state_group_edges( state_group BIGINT NOT NULL, prev_state_group BIGINT NOT NULL ); +CREATE INDEX state_group_edges_idx ON state_group_edges(state_group); +CREATE INDEX state_group_edges_prev_idx ON state_group_edges(prev_state_group); +CREATE TABLE stream_ordering_to_exterm ( stream_ordering BIGINT NOT NULL, room_id TEXT NOT NULL, event_id TEXT NOT NULL ); +CREATE INDEX stream_ordering_to_exterm_idx on stream_ordering_to_exterm( stream_ordering ); +CREATE INDEX stream_ordering_to_exterm_rm_idx on stream_ordering_to_exterm( room_id, stream_ordering ); +CREATE TABLE IF NOT EXISTS "event_auth"( event_id TEXT NOT NULL, auth_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE INDEX evauth_edges_id ON event_auth(event_id); +CREATE INDEX user_threepids_medium_address on user_threepids (medium, address); +CREATE TABLE appservice_room_list( appservice_id TEXT NOT NULL, network_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE UNIQUE INDEX appservice_room_list_idx ON appservice_room_list( appservice_id, network_id, room_id ); +CREATE INDEX device_federation_outbox_id ON device_federation_outbox(stream_id); +CREATE TABLE federation_stream_position( type TEXT NOT NULL, stream_id INTEGER NOT NULL ); +CREATE TABLE device_lists_remote_cache ( user_id TEXT NOT NULL, device_id TEXT NOT NULL, content TEXT NOT NULL ); +CREATE TABLE device_lists_remote_extremeties ( user_id TEXT NOT NULL, stream_id TEXT NOT NULL ); +CREATE TABLE device_lists_stream ( stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL ); +CREATE INDEX device_lists_stream_id ON device_lists_stream(stream_id, user_id); +CREATE TABLE device_lists_outbound_pokes ( destination TEXT NOT NULL, stream_id BIGINT NOT NULL, user_id TEXT NOT NULL, device_id TEXT NOT NULL, sent BOOLEAN NOT NULL, ts BIGINT NOT NULL ); +CREATE INDEX device_lists_outbound_pokes_id ON device_lists_outbound_pokes(destination, stream_id); +CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes(destination, user_id); +CREATE TABLE event_push_summary ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, notif_count BIGINT NOT NULL, stream_ordering BIGINT NOT NULL ); +CREATE INDEX event_push_summary_user_rm ON event_push_summary(user_id, room_id); +CREATE TABLE event_push_summary_stream_ordering ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_ordering BIGINT NOT NULL, CHECK (Lock='X') ); +CREATE TABLE IF NOT EXISTS "pushers" ( id BIGINT PRIMARY KEY, user_name TEXT NOT NULL, access_token BIGINT DEFAULT NULL, profile_tag TEXT NOT NULL, kind TEXT NOT NULL, app_id TEXT NOT NULL, app_display_name TEXT NOT NULL, device_display_name TEXT NOT NULL, pushkey TEXT NOT NULL, ts BIGINT NOT NULL, lang TEXT, data TEXT, last_stream_ordering INTEGER, last_success BIGINT, failing_since BIGINT, UNIQUE (app_id, pushkey, user_name) ); +CREATE INDEX device_lists_outbound_pokes_stream ON device_lists_outbound_pokes(stream_id); +CREATE TABLE ratelimit_override ( user_id TEXT NOT NULL, messages_per_second BIGINT, burst_count BIGINT ); +CREATE UNIQUE INDEX ratelimit_override_idx ON ratelimit_override(user_id); +CREATE TABLE current_state_delta_stream ( stream_id BIGINT NOT NULL, room_id TEXT NOT NULL, type TEXT NOT NULL, state_key TEXT NOT NULL, event_id TEXT, prev_event_id TEXT ); +CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream(stream_id); +CREATE TABLE device_lists_outbound_last_success ( destination TEXT NOT NULL, user_id TEXT NOT NULL, stream_id BIGINT NOT NULL ); +CREATE INDEX device_lists_outbound_last_success_idx ON device_lists_outbound_last_success( destination, user_id, stream_id ); +CREATE TABLE user_directory_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') ); +CREATE VIRTUAL TABLE user_directory_search USING fts4 ( user_id, value ) +/* user_directory_search(user_id,value) */; +CREATE TABLE IF NOT EXISTS 'user_directory_search_content'(docid INTEGER PRIMARY KEY, 'c0user_id', 'c1value'); +CREATE TABLE IF NOT EXISTS 'user_directory_search_segments'(blockid INTEGER PRIMARY KEY, block BLOB); +CREATE TABLE IF NOT EXISTS 'user_directory_search_segdir'(level INTEGER,idx INTEGER,start_block INTEGER,leaves_end_block INTEGER,end_block INTEGER,root BLOB,PRIMARY KEY(level, idx)); +CREATE TABLE IF NOT EXISTS 'user_directory_search_docsize'(docid INTEGER PRIMARY KEY, size BLOB); +CREATE TABLE IF NOT EXISTS 'user_directory_search_stat'(id INTEGER PRIMARY KEY, value BLOB); +CREATE TABLE blocked_rooms ( room_id TEXT NOT NULL, user_id TEXT NOT NULL ); +CREATE UNIQUE INDEX blocked_rooms_idx ON blocked_rooms(room_id); +CREATE TABLE IF NOT EXISTS "local_media_repository_url_cache"( url TEXT, response_code INTEGER, etag TEXT, expires_ts BIGINT, og TEXT, media_id TEXT, download_ts BIGINT ); +CREATE INDEX local_media_repository_url_cache_expires_idx ON local_media_repository_url_cache(expires_ts); +CREATE INDEX local_media_repository_url_cache_by_url_download_ts ON local_media_repository_url_cache(url, download_ts); +CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repository_url_cache(media_id); +CREATE TABLE group_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, is_public BOOLEAN NOT NULL ); +CREATE TABLE group_invites ( group_id TEXT NOT NULL, user_id TEXT NOT NULL ); +CREATE TABLE group_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, is_public BOOLEAN NOT NULL ); +CREATE TABLE group_summary_rooms ( group_id TEXT NOT NULL, room_id TEXT NOT NULL, category_id TEXT NOT NULL, room_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id, room_id, room_order), CHECK (room_order > 0) ); +CREATE UNIQUE INDEX group_summary_rooms_g_idx ON group_summary_rooms(group_id, room_id, category_id); +CREATE TABLE group_summary_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, cat_order BIGINT NOT NULL, UNIQUE (group_id, category_id, cat_order), CHECK (cat_order > 0) ); +CREATE TABLE group_room_categories ( group_id TEXT NOT NULL, category_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, category_id) ); +CREATE TABLE group_summary_users ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, role_id TEXT NOT NULL, user_order BIGINT NOT NULL, is_public BOOLEAN NOT NULL ); +CREATE INDEX group_summary_users_g_idx ON group_summary_users(group_id); +CREATE TABLE group_summary_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, role_order BIGINT NOT NULL, UNIQUE (group_id, role_id, role_order), CHECK (role_order > 0) ); +CREATE TABLE group_roles ( group_id TEXT NOT NULL, role_id TEXT NOT NULL, profile TEXT NOT NULL, is_public BOOLEAN NOT NULL, UNIQUE (group_id, role_id) ); +CREATE TABLE group_attestations_renewals ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL ); +CREATE INDEX group_attestations_renewals_g_idx ON group_attestations_renewals(group_id, user_id); +CREATE INDEX group_attestations_renewals_u_idx ON group_attestations_renewals(user_id); +CREATE INDEX group_attestations_renewals_v_idx ON group_attestations_renewals(valid_until_ms); +CREATE TABLE group_attestations_remote ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, valid_until_ms BIGINT NOT NULL, attestation_json TEXT NOT NULL ); +CREATE INDEX group_attestations_remote_g_idx ON group_attestations_remote(group_id, user_id); +CREATE INDEX group_attestations_remote_u_idx ON group_attestations_remote(user_id); +CREATE INDEX group_attestations_remote_v_idx ON group_attestations_remote(valid_until_ms); +CREATE TABLE local_group_membership ( group_id TEXT NOT NULL, user_id TEXT NOT NULL, is_admin BOOLEAN NOT NULL, membership TEXT NOT NULL, is_publicised BOOLEAN NOT NULL, content TEXT NOT NULL ); +CREATE INDEX local_group_membership_u_idx ON local_group_membership(user_id, group_id); +CREATE INDEX local_group_membership_g_idx ON local_group_membership(group_id); +CREATE TABLE local_group_updates ( stream_id BIGINT NOT NULL, group_id TEXT NOT NULL, user_id TEXT NOT NULL, type TEXT NOT NULL, content TEXT NOT NULL ); +CREATE TABLE remote_profile_cache ( user_id TEXT NOT NULL, displayname TEXT, avatar_url TEXT, last_check BIGINT NOT NULL ); +CREATE UNIQUE INDEX remote_profile_cache_user_id ON remote_profile_cache(user_id); +CREATE INDEX remote_profile_cache_time ON remote_profile_cache(last_check); +CREATE TABLE IF NOT EXISTS "deleted_pushers" ( stream_id BIGINT NOT NULL, app_id TEXT NOT NULL, pushkey TEXT NOT NULL, user_id TEXT NOT NULL ); +CREATE INDEX deleted_pushers_stream_id ON deleted_pushers (stream_id); +CREATE TABLE IF NOT EXISTS "groups" ( group_id TEXT NOT NULL, name TEXT, avatar_url TEXT, short_description TEXT, long_description TEXT, is_public BOOL NOT NULL , join_policy TEXT NOT NULL DEFAULT 'invite'); +CREATE UNIQUE INDEX groups_idx ON groups(group_id); +CREATE TABLE IF NOT EXISTS "user_directory" ( user_id TEXT NOT NULL, room_id TEXT, display_name TEXT, avatar_url TEXT ); +CREATE INDEX user_directory_room_idx ON user_directory(room_id); +CREATE UNIQUE INDEX user_directory_user_idx ON user_directory(user_id); +CREATE TABLE event_push_actions_staging ( event_id TEXT NOT NULL, user_id TEXT NOT NULL, actions TEXT NOT NULL, notif SMALLINT NOT NULL, highlight SMALLINT NOT NULL ); +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); +CREATE TABLE users_pending_deactivation ( user_id TEXT NOT NULL ); +CREATE UNIQUE INDEX group_invites_g_idx ON group_invites(group_id, user_id); +CREATE UNIQUE INDEX group_users_g_idx ON group_users(group_id, user_id); +CREATE INDEX group_users_u_idx ON group_users(user_id); +CREATE INDEX group_invites_u_idx ON group_invites(user_id); +CREATE UNIQUE INDEX group_rooms_g_idx ON group_rooms(group_id, room_id); +CREATE INDEX group_rooms_r_idx ON group_rooms(room_id); +CREATE TABLE user_daily_visits ( user_id TEXT NOT NULL, device_id TEXT, timestamp BIGINT NOT NULL ); +CREATE INDEX user_daily_visits_uts_idx ON user_daily_visits(user_id, timestamp); +CREATE INDEX user_daily_visits_ts_idx ON user_daily_visits(timestamp); +CREATE TABLE erased_users ( user_id TEXT NOT NULL ); +CREATE UNIQUE INDEX erased_users_user ON erased_users(user_id); +CREATE TABLE monthly_active_users ( user_id TEXT NOT NULL, timestamp BIGINT NOT NULL ); +CREATE UNIQUE INDEX monthly_active_users_users ON monthly_active_users(user_id); +CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users(timestamp); +CREATE TABLE IF NOT EXISTS "e2e_room_keys_versions" ( user_id TEXT NOT NULL, version BIGINT NOT NULL, algorithm TEXT NOT NULL, auth_data TEXT NOT NULL, deleted SMALLINT DEFAULT 0 NOT NULL ); +CREATE UNIQUE INDEX e2e_room_keys_versions_idx ON e2e_room_keys_versions(user_id, version); +CREATE TABLE IF NOT EXISTS "e2e_room_keys" ( user_id TEXT NOT NULL, room_id TEXT NOT NULL, session_id TEXT NOT NULL, version BIGINT NOT NULL, first_message_index INT, forwarded_count INT, is_verified BOOLEAN, session_data TEXT NOT NULL ); +CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys(user_id, room_id, session_id); +CREATE TABLE users_who_share_private_rooms ( user_id TEXT NOT NULL, other_user_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE UNIQUE INDEX users_who_share_private_rooms_u_idx ON users_who_share_private_rooms(user_id, other_user_id, room_id); +CREATE INDEX users_who_share_private_rooms_r_idx ON users_who_share_private_rooms(room_id); +CREATE INDEX users_who_share_private_rooms_o_idx ON users_who_share_private_rooms(other_user_id); +CREATE TABLE user_threepid_id_server ( user_id TEXT NOT NULL, medium TEXT NOT NULL, address TEXT NOT NULL, id_server TEXT NOT NULL ); +CREATE UNIQUE INDEX user_threepid_id_server_idx ON user_threepid_id_server( user_id, medium, address, id_server ); +CREATE TABLE users_in_public_rooms ( user_id TEXT NOT NULL, room_id TEXT NOT NULL ); +CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms(user_id, room_id); +CREATE TABLE account_validity ( user_id TEXT PRIMARY KEY, expiration_ts_ms BIGINT NOT NULL, email_sent BOOLEAN NOT NULL, renewal_token TEXT ); +CREATE TABLE event_relations ( event_id TEXT NOT NULL, relates_to_id TEXT NOT NULL, relation_type TEXT NOT NULL, aggregation_key TEXT ); +CREATE UNIQUE INDEX event_relations_id ON event_relations(event_id); +CREATE INDEX event_relations_relates ON event_relations(relates_to_id, relation_type, aggregation_key); +CREATE TABLE stats_stream_pos ( Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, stream_id BIGINT, CHECK (Lock='X') ); +CREATE TABLE user_stats ( user_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, public_rooms INT NOT NULL, private_rooms INT NOT NULL ); +CREATE UNIQUE INDEX user_stats_user_ts ON user_stats(user_id, ts); +CREATE TABLE room_stats ( room_id TEXT NOT NULL, ts BIGINT NOT NULL, bucket_size INT NOT NULL, current_state_events INT NOT NULL, joined_members INT NOT NULL, invited_members INT NOT NULL, left_members INT NOT NULL, banned_members INT NOT NULL, state_events INT NOT NULL ); +CREATE UNIQUE INDEX room_stats_room_ts ON room_stats(room_id, ts); +CREATE TABLE room_state ( room_id TEXT NOT NULL, join_rules TEXT, history_visibility TEXT, encryption TEXT, name TEXT, topic TEXT, avatar TEXT, canonical_alias TEXT ); +CREATE UNIQUE INDEX room_state_room ON room_state(room_id); +CREATE TABLE room_stats_earliest_token ( room_id TEXT NOT NULL, token BIGINT NOT NULL ); +CREATE UNIQUE INDEX room_stats_earliest_token_idx ON room_stats_earliest_token(room_id); +CREATE INDEX access_tokens_device_id ON access_tokens (user_id, device_id); +CREATE INDEX user_ips_device_id ON user_ips (user_id, device_id, last_seen); +CREATE INDEX event_contains_url_index ON events (room_id, topological_ordering, stream_ordering); +CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); +CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); +CREATE INDEX current_state_events_member_index ON current_state_events (state_key); +CREATE INDEX device_inbox_stream_id_user_id ON device_inbox (stream_id, user_id); +CREATE INDEX device_lists_stream_user_id ON device_lists_stream (user_id, device_id); +CREATE INDEX local_media_repository_url_idx ON local_media_repository (created_ts); +CREATE INDEX user_ips_last_seen ON user_ips (user_id, last_seen); +CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen); +CREATE INDEX users_creation_ts ON users (creation_ts); +CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group); +CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id); +CREATE TABLE sqlite_stat1(tbl,idx,stat); +CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key); +CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id); +CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip); diff --git a/synapse/storage/schema/full_schemas/README.txt b/synapse/storage/schema/full_schemas/README.txt new file mode 100644 index 0000000000..12d4eb0746 --- /dev/null +++ b/synapse/storage/schema/full_schemas/README.txt @@ -0,0 +1,14 @@ +Building full schema dumps +========================== + +Postgres +-------- + +$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres + +SQLite +------ + +$ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite + +Delete the CREATE statements for "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file From 7f81b967ca1d4004832e96513ad33e802f9a6f78 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:23:40 +1000 Subject: [PATCH 290/429] fix schemas --- synapse/storage/prepare_database.py | 4 +- .../schema/full_schemas/54/full.sql.postgres | 72 +++++-------------- .../schema/full_schemas/54/full.sql.sqlite | 1 - .../storage/schema/full_schemas/README.txt | 4 +- tests/handlers/test_user_directory.py | 2 + 5 files changed, 22 insertions(+), 61 deletions(-) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 07478b6672..b81c05369f 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -146,9 +146,9 @@ def _setup_new_database(cur, database_engine): directory_entries = os.listdir(sql_dir) - for filename in fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter( + for filename in sorted(fnmatch.filter(directory_entries, "*.sql") + fnmatch.filter( directory_entries, "*.sql." + specific - ): + )): sql_loc = os.path.join(sql_dir, filename) logger.debug("Applying schema %s", sql_loc) executescript(cur, sql_loc) diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres index 5fb54cfe77..ea3859fd24 100644 --- a/synapse/storage/schema/full_schemas/54/full.sql.postgres +++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres @@ -60,21 +60,6 @@ CREATE TABLE application_services_txns ( ); - -CREATE TABLE applied_module_schemas ( - module_name text NOT NULL, - file text NOT NULL -); - - - -CREATE TABLE applied_schema_deltas ( - version integer NOT NULL, - file text NOT NULL -); - - - CREATE TABLE appservice_room_list ( appservice_id text NOT NULL, network_id text NOT NULL, @@ -475,7 +460,7 @@ CREATE TABLE group_roles ( group_id text NOT NULL, role_id text NOT NULL, profile text NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -484,7 +469,7 @@ CREATE TABLE group_room_categories ( group_id text NOT NULL, category_id text NOT NULL, profile text NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -492,7 +477,7 @@ CREATE TABLE group_room_categories ( CREATE TABLE group_rooms ( group_id text NOT NULL, room_id text NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -520,7 +505,7 @@ CREATE TABLE group_summary_rooms ( room_id text NOT NULL, category_id text NOT NULL, room_order bigint NOT NULL, - is_boolean NOT NULL, + is_public boolean NOT NULL, CONSTRAINT group_summary_rooms_room_order_check CHECK ((room_order > 0)) ); @@ -531,7 +516,7 @@ CREATE TABLE group_summary_users ( user_id text NOT NULL, role_id text NOT NULL, user_order bigint NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -540,7 +525,7 @@ CREATE TABLE group_users ( group_id text NOT NULL, user_id text NOT NULL, is_admin boolean NOT NULL, - is_boolean NOT NULL + is_public boolean NOT NULL ); @@ -551,7 +536,7 @@ CREATE TABLE groups ( avatar_url text, short_description text, long_description text, - is_boolean NOT NULL, + is_public boolean NOT NULL, join_policy text DEFAULT 'invite'::text NOT NULL ); @@ -578,7 +563,7 @@ CREATE TABLE local_group_membership ( user_id text NOT NULL, is_admin boolean NOT NULL, membership text NOT NULL, - is_sed boolean NOT NULL, + is_publicised boolean NOT NULL, content text NOT NULL ); @@ -695,7 +680,7 @@ CREATE TABLE profiles ( -CREATE TABLE room_list_stream ( +CREATE TABLE public_room_list_stream ( stream_id bigint NOT NULL, room_id text NOT NULL, visibility boolean NOT NULL, @@ -966,21 +951,12 @@ CREATE TABLE room_tags_revisions ( CREATE TABLE rooms ( room_id text NOT NULL, - is_boolean, + is_public boolean, creator text ); -CREATE TABLE schema_version ( - lock character(1) DEFAULT 'X'::bpchar NOT NULL, - version integer NOT NULL, - upgraded boolean NOT NULL, - CONSTRAINT schema_version_lock_check CHECK ((lock = 'X'::bpchar)) -); - - - CREATE TABLE server_keys_json ( server_name text NOT NULL, key_id text NOT NULL, @@ -1135,7 +1111,7 @@ CREATE TABLE user_stats ( user_id text NOT NULL, ts bigint NOT NULL, bucket_size integer NOT NULL, - rooms integer NOT NULL, + public_rooms integer NOT NULL, private_rooms integer NOT NULL ); @@ -1175,7 +1151,7 @@ CREATE TABLE users ( -CREATE TABLE users_in_rooms ( +CREATE TABLE users_in_public_rooms ( user_id text NOT NULL, room_id text NOT NULL ); @@ -1225,17 +1201,6 @@ ALTER TABLE ONLY application_services_txns ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id); - -ALTER TABLE ONLY applied_module_schemas - ADD CONSTRAINT applied_module_schemas_module_name_file_key UNIQUE (module_name, file); - - - -ALTER TABLE ONLY applied_schema_deltas - ADD CONSTRAINT applied_schema_deltas_version_file_key UNIQUE (version, file); - - - ALTER TABLE ONLY appservice_stream_position ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock); @@ -1521,11 +1486,6 @@ ALTER TABLE ONLY rooms -ALTER TABLE ONLY schema_version - ADD CONSTRAINT schema_version_lock_key UNIQUE (lock); - - - ALTER TABLE ONLY server_keys_json ADD CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server); @@ -1846,15 +1806,15 @@ CREATE INDEX presence_stream_user_id ON presence_stream USING btree (user_id); -CREATE INDEX room_index ON rooms USING btree (is_; +CREATE INDEX public_room_index ON rooms USING btree (is_public); -CREATE INDEX room_list_stream_idx ON room_list_stream USING btree (stream_id); +CREATE INDEX public_room_list_stream_idx ON public_room_list_stream USING btree (stream_id); -CREATE INDEX room_list_stream_rm_idx ON room_list_stream USING btree (room_id, stream_id); +CREATE INDEX public_room_list_stream_rm_idx ON public_room_list_stream USING btree (room_id, stream_id); @@ -2022,7 +1982,7 @@ CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id); -CREATE UNIQUE INDEX users_in_rooms_u_idx ON users_in_rooms USING btree (user_id, room_id); +CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id); diff --git a/synapse/storage/schema/full_schemas/54/full.sql.sqlite b/synapse/storage/schema/full_schemas/54/full.sql.sqlite index 0b60a6c789..be9295e4c9 100644 --- a/synapse/storage/schema/full_schemas/54/full.sql.sqlite +++ b/synapse/storage/schema/full_schemas/54/full.sql.sqlite @@ -255,7 +255,6 @@ CREATE INDEX user_ips_last_seen_only ON user_ips (last_seen); CREATE INDEX users_creation_ts ON users (creation_ts); CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups (state_group); CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache (user_id, device_id); -CREATE TABLE sqlite_stat1(tbl,idx,stat); CREATE INDEX state_groups_state_type_idx ON state_groups_state(state_group, type, state_key); CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties (user_id); CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips (user_id, access_token, ip); diff --git a/synapse/storage/schema/full_schemas/README.txt b/synapse/storage/schema/full_schemas/README.txt index 12d4eb0746..df49f9b39e 100644 --- a/synapse/storage/schema/full_schemas/README.txt +++ b/synapse/storage/schema/full_schemas/README.txt @@ -4,11 +4,11 @@ Building full schema dumps Postgres -------- -$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres +$ pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner $DATABASE_NAME| sed -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > full.sql.postgres SQLite ------ $ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite -Delete the CREATE statements for "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file +Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 9021e647fe..b919694f54 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -96,6 +96,7 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): self.get_success(self.handler.handle_user_deactivated(r_user_id)) self.store.remove_from_user_dir.called_once_with(r_user_id) + @unittest.DEBUG def test_private_room(self): """ A user can be searched for only by people that are either in a public @@ -340,6 +341,7 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): return hs + @unittest.DEBUG def test_disabling_room_list(self): self.config.user_directory_search_enabled = True From be452fc9ace4f501398be769766e3f8bbd798571 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:24:23 +1000 Subject: [PATCH 291/429] more fix --- .../full_schemas/54/stream_positions.sql | 38 +++++++++++++++++++ tests/handlers/test_user_directory.py | 2 - 2 files changed, 38 insertions(+), 2 deletions(-) create mode 100644 synapse/storage/schema/full_schemas/54/stream_positions.sql diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql new file mode 100644 index 0000000000..d6433a5af2 --- /dev/null +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -0,0 +1,38 @@ + +INSERT INTO appservice_stream_position (stream_ordering) SELECT COALESCE(MAX(stream_ordering), 0) FROM events; +INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', -1); +INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; +INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); +INSERT INTO stats_stream_pos (stream_id) VALUES (null); + +--- User dir population + +-- Set up staging tables +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_user_directory_createtables', '{}'); + +-- Run through each room and update the user directory according to who is in it +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); + +-- Insert all users, if search_all_users is on +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); + +-- Clean up staging tables +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); + +--- Stats population + +-- Set up staging tables +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_stats_createtables', '{}'); + +-- Run through each room and update stats +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_process_rooms', '{}', 'populate_stats_createtables'); + +-- Clean up staging tables +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_stats_cleanup', '{}', 'populate_stats_process_rooms'); diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index b919694f54..9021e647fe 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -96,7 +96,6 @@ class UserDirectoryTestCase(unittest.HomeserverTestCase): self.get_success(self.handler.handle_user_deactivated(r_user_id)) self.store.remove_from_user_dir.called_once_with(r_user_id) - @unittest.DEBUG def test_private_room(self): """ A user can be searched for only by people that are either in a public @@ -341,7 +340,6 @@ class TestUserDirSearchDisabled(unittest.HomeserverTestCase): return hs - @unittest.DEBUG def test_disabling_room_list(self): self.config.user_directory_search_enabled = True From ed6138461b86ee4d085bf2e41e00e01cb9547f7b Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:29:19 +1000 Subject: [PATCH 292/429] more fix --- MANIFEST.in | 3 +++ changelog.d/5320.misc | 1 + 2 files changed, 4 insertions(+) create mode 100644 changelog.d/5320.misc diff --git a/MANIFEST.in b/MANIFEST.in index 0500dd6b87..ad1523e387 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -9,7 +9,10 @@ include demo/*.py include demo/*.sh recursive-include synapse/storage/schema *.sql +recursive-include synapse/storage/schema *.sql.postgres +recursive-include synapse/storage/schema *.sql.sqlite recursive-include synapse/storage/schema *.py +recursive-include synapse/storage/schema *.txt recursive-include docs * recursive-include scripts * diff --git a/changelog.d/5320.misc b/changelog.d/5320.misc new file mode 100644 index 0000000000..5b4bf05303 --- /dev/null +++ b/changelog.d/5320.misc @@ -0,0 +1 @@ +New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. From 4e75c5e02a6dead87e9a24cbdb8fb015221070ce Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 3 Jun 2019 22:42:12 +1000 Subject: [PATCH 293/429] WHY IS THIS CALLED A SLIGHTLY DIFFERENT THING --- synapse/storage/schema/full_schemas/54/stream_positions.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index d6433a5af2..0febedcc5e 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -4,6 +4,7 @@ INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', - INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); INSERT INTO stats_stream_pos (stream_id) VALUES (null); +INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); --- User dir population From 2198b7ce2a1316dfaf8dde061c1a6f30a818ed5a Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Jun 2019 01:06:00 +1000 Subject: [PATCH 294/429] add stuff in bg updates --- .../schema/full_schemas/54/full.sql.postgres | 72 ++++++++++++++++--- 1 file changed, 62 insertions(+), 10 deletions(-) diff --git a/synapse/storage/schema/full_schemas/54/full.sql.postgres b/synapse/storage/schema/full_schemas/54/full.sql.postgres index ea3859fd24..098434356f 100644 --- a/synapse/storage/schema/full_schemas/54/full.sql.postgres +++ b/synapse/storage/schema/full_schemas/54/full.sql.postgres @@ -3,12 +3,6 @@ -CREATE TABLE _extremities_to_check ( - event_id text -); - - - CREATE TABLE access_tokens ( id bigint NOT NULL, user_id text NOT NULL, @@ -60,6 +54,7 @@ CREATE TABLE application_services_txns ( ); + CREATE TABLE appservice_room_list ( appservice_id text NOT NULL, network_id text NOT NULL, @@ -1201,6 +1196,7 @@ ALTER TABLE ONLY application_services_txns ADD CONSTRAINT application_services_txns_as_id_txn_id_key UNIQUE (as_id, txn_id); + ALTER TABLE ONLY appservice_stream_position ADD CONSTRAINT appservice_stream_position_lock_key UNIQUE (lock); @@ -1526,7 +1522,7 @@ ALTER TABLE ONLY users -CREATE INDEX _extremities_to_check_id ON _extremities_to_check USING btree (event_id); +CREATE INDEX access_tokens_device_id ON access_tokens USING btree (user_id, device_id); @@ -1554,6 +1550,10 @@ CREATE INDEX current_state_delta_stream_idx ON current_state_delta_stream USING +CREATE INDEX current_state_events_member_index ON current_state_events USING btree (state_key) WHERE (type = 'm.room.member'::text); + + + CREATE INDEX deleted_pushers_stream_id ON deleted_pushers USING btree (stream_id); @@ -1570,7 +1570,7 @@ CREATE INDEX device_federation_outbox_id ON device_federation_outbox USING btree -CREATE INDEX device_inbox_stream_id ON device_inbox USING btree (stream_id); +CREATE INDEX device_inbox_stream_id_user_id ON device_inbox USING btree (stream_id, user_id); @@ -1594,10 +1594,22 @@ CREATE INDEX device_lists_outbound_pokes_user ON device_lists_outbound_pokes USI +CREATE UNIQUE INDEX device_lists_remote_cache_unique_id ON device_lists_remote_cache USING btree (user_id, device_id); + + + +CREATE UNIQUE INDEX device_lists_remote_extremeties_unique_idx ON device_lists_remote_extremeties USING btree (user_id); + + + CREATE INDEX device_lists_stream_id ON device_lists_stream USING btree (stream_id, user_id); +CREATE INDEX device_lists_stream_user_id ON device_lists_stream USING btree (user_id, device_id); + + + CREATE UNIQUE INDEX e2e_room_keys_idx ON e2e_room_keys USING btree (user_id, room_id, session_id); @@ -1638,10 +1650,18 @@ CREATE INDEX evauth_edges_id ON event_auth USING btree (event_id); +CREATE INDEX event_contains_url_index ON events USING btree (room_id, topological_ordering, stream_ordering) WHERE ((contains_url = true) AND (outlier = false)); + + + CREATE INDEX event_json_room_id ON event_json USING btree (room_id); +CREATE INDEX event_push_actions_highlights_index ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering) WHERE (highlight = 1); + + + CREATE INDEX event_push_actions_rm_tokens ON event_push_actions USING btree (user_id, room_id, topological_ordering, stream_ordering); @@ -1658,6 +1678,10 @@ CREATE INDEX event_push_actions_stream_ordering ON event_push_actions USING btre +CREATE INDEX event_push_actions_u_highlight ON event_push_actions USING btree (user_id, stream_ordering); + + + CREATE INDEX event_push_summary_user_rm ON event_push_summary USING btree (user_id, room_id); @@ -1678,10 +1702,18 @@ CREATE INDEX event_search_ev_ridx ON event_search USING btree (room_id); +CREATE UNIQUE INDEX event_search_event_id_idx ON event_search USING btree (event_id); + + + CREATE INDEX event_search_fts_idx ON event_search USING gin (vector); +CREATE INDEX event_to_state_groups_sg_index ON event_to_state_groups USING btree (state_group); + + + CREATE INDEX events_order_room ON events USING btree (room_id, topological_ordering, stream_ordering); @@ -1786,6 +1818,10 @@ CREATE INDEX local_media_repository_url_cache_media_idx ON local_media_repositor +CREATE INDEX local_media_repository_url_idx ON local_media_repository USING btree (created_ts) WHERE (url_cache IS NOT NULL); + + + CREATE INDEX monthly_active_users_time_stamp ON monthly_active_users USING btree ("timestamp"); @@ -1914,7 +1950,7 @@ CREATE INDEX state_group_edges_prev_idx ON state_group_edges USING btree (prev_s -CREATE INDEX state_groups_state_id ON state_groups_state USING btree (state_group); +CREATE INDEX state_groups_state_type_idx ON state_groups_state USING btree (state_group, type, state_key); @@ -1962,7 +1998,19 @@ CREATE INDEX user_filters_by_user_id_filter_id ON user_filters USING btree (user -CREATE INDEX user_ips_user_ip ON user_ips USING btree (user_id, access_token, ip); +CREATE INDEX user_ips_device_id ON user_ips USING btree (user_id, device_id, last_seen); + + + +CREATE INDEX user_ips_last_seen ON user_ips USING btree (user_id, last_seen); + + + +CREATE INDEX user_ips_last_seen_only ON user_ips USING btree (last_seen); + + + +CREATE UNIQUE INDEX user_ips_user_token_ip_unique_index ON user_ips USING btree (user_id, access_token, ip); @@ -1982,6 +2030,10 @@ CREATE INDEX user_threepids_user_id ON user_threepids USING btree (user_id); +CREATE INDEX users_creation_ts ON users USING btree (creation_ts); + + + CREATE UNIQUE INDEX users_in_public_rooms_u_idx ON users_in_public_rooms USING btree (user_id, room_id); From 83827c4922958abd930bfb6925d8a1a6a3833248 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 3 Jun 2019 17:06:47 +0100 Subject: [PATCH 295/429] Add account_validity's email_sent column to the list of boolean columns in synapse_port_db Fixes #5306 --- scripts/synapse_port_db | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/synapse_port_db b/scripts/synapse_port_db index 41be9c9220..b6ba19c776 100755 --- a/scripts/synapse_port_db +++ b/scripts/synapse_port_db @@ -54,6 +54,7 @@ BOOLEAN_COLUMNS = { "group_roles": ["is_public"], "local_group_membership": ["is_publicised", "is_admin"], "e2e_room_keys": ["is_verified"], + "account_validity": ["email_sent"], } From fa4b54aca57bebc94e2b763abdae79343a08f969 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:06:54 +0100 Subject: [PATCH 296/429] Ignore room state with null bytes in for room stats --- synapse/storage/stats.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1c0b183a56..1f39ef211a 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -328,6 +328,21 @@ class StatsStore(StateDeltasStore): room_id (str) fields (dict[str:Any]) """ + + # For whatever reason some of the fields may contain null bytes, which + # postgres isn't a fan of, so we replace those fields with null. + for col in ( + "join_rules", + "history_visibility", + "encryption", + "name", + "topic", + "avatar", + "canonical_alias" + ): + if "\0" in fields.get(col, ""): + fields[col] = None + return self._simple_upsert( table="room_state", keyvalues={"room_id": room_id}, From 4bd67db100efacc3d31a2f8187b7bdd4479d9bc3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:08:33 +0100 Subject: [PATCH 297/429] Newsfile --- changelog.d/5324.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature new file mode 100644 index 0000000000..01285e965c --- /dev/null +++ b/changelog.d/5324.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From deca87ddf2d12ab67c91f84891d9fa09b4575fcf Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 3 Jun 2019 17:11:28 +0100 Subject: [PATCH 298/429] Changelog --- changelog.d/5325.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5325.bugfix diff --git a/changelog.d/5325.bugfix b/changelog.d/5325.bugfix new file mode 100644 index 0000000000..6914398bcc --- /dev/null +++ b/changelog.d/5325.bugfix @@ -0,0 +1 @@ +Add account_validity's email_sent column to the list of boolean columns in synapse_port_db. From fe2294ec8dc4b37d19930bd1ae0867645207af2e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:17:35 +0100 Subject: [PATCH 299/429] Revert "Newsfile" This reverts commit 4bd67db100efacc3d31a2f8187b7bdd4479d9bc3. --- changelog.d/5324.feature | 1 - 1 file changed, 1 deletion(-) delete mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature deleted file mode 100644 index 01285e965c..0000000000 --- a/changelog.d/5324.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. From 0d67a8cd9de9564fcdaa1206c18b411b4c43b74a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:17:57 +0100 Subject: [PATCH 300/429] Newsfile --- changelog.d/5324.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5324.feature diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature new file mode 100644 index 0000000000..01285e965c --- /dev/null +++ b/changelog.d/5324.feature @@ -0,0 +1 @@ +Synapse now more efficiently collates room statistics. From 0a56966f7d4879f9d517c96a3c714accdce4e17f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 3 Jun 2019 17:42:52 +0100 Subject: [PATCH 301/429] Fix --- synapse/storage/stats.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/stats.py b/synapse/storage/stats.py index 1f39ef211a..ff266b09b0 100644 --- a/synapse/storage/stats.py +++ b/synapse/storage/stats.py @@ -340,7 +340,8 @@ class StatsStore(StateDeltasStore): "avatar", "canonical_alias" ): - if "\0" in fields.get(col, ""): + field = fields.get(col) + if field and "\0" in field: fields[col] = None return self._simple_upsert( From fec2dcb1a538ab8ab447f724af1a94d5b3517197 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 3 Jun 2019 22:59:51 +0100 Subject: [PATCH 302/429] Enforce validity period on server_keys for fed requests. (#5321) When handling incoming federation requests, make sure that we have an up-to-date copy of the signing key. We do not yet enforce the validity period for event signatures. --- changelog.d/5321.bugfix | 1 + synapse/crypto/keyring.py | 167 ++++++++++++++++--------- synapse/federation/federation_base.py | 4 +- synapse/federation/transport/server.py | 4 +- synapse/groups/attestations.py | 5 +- tests/crypto/test_keyring.py | 135 ++++++++++++++++---- 6 files changed, 228 insertions(+), 88 deletions(-) create mode 100644 changelog.d/5321.bugfix diff --git a/changelog.d/5321.bugfix b/changelog.d/5321.bugfix new file mode 100644 index 0000000000..943a61956d --- /dev/null +++ b/changelog.d/5321.bugfix @@ -0,0 +1 @@ +Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index b2f4cea536..cdec06c88e 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -15,6 +15,7 @@ # limitations under the License. import logging +from collections import defaultdict import six from six import raise_from @@ -70,6 +71,9 @@ class VerifyKeyRequest(object): json_object(dict): The JSON object to verify. + minimum_valid_until_ts (int): time at which we require the signing key to + be valid. (0 implies we don't care) + deferred(Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no @@ -82,7 +86,8 @@ class VerifyKeyRequest(object): server_name = attr.ib() key_ids = attr.ib() json_object = attr.ib() - deferred = attr.ib() + minimum_valid_until_ts = attr.ib() + deferred = attr.ib(default=attr.Factory(defer.Deferred)) class KeyLookupError(ValueError): @@ -90,14 +95,16 @@ class KeyLookupError(ValueError): class Keyring(object): - def __init__(self, hs): + def __init__(self, hs, key_fetchers=None): self.clock = hs.get_clock() - self._key_fetchers = ( - StoreKeyFetcher(hs), - PerspectivesKeyFetcher(hs), - ServerKeyFetcher(hs), - ) + if key_fetchers is None: + key_fetchers = ( + StoreKeyFetcher(hs), + PerspectivesKeyFetcher(hs), + ServerKeyFetcher(hs), + ) + self._key_fetchers = key_fetchers # map from server name to Deferred. Has an entry for each server with # an ongoing key download; the Deferred completes once the download @@ -106,9 +113,25 @@ class Keyring(object): # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} - def verify_json_for_server(self, server_name, json_object): + def verify_json_for_server(self, server_name, json_object, validity_time): + """Verify that a JSON object has been signed by a given server + + Args: + server_name (str): name of the server which must have signed this object + + json_object (dict): object to be checked + + validity_time (int): timestamp at which we require the signing key to + be valid. (0 implies we don't care) + + Returns: + Deferred[None]: completes if the the object was correctly signed, otherwise + errbacks with an error + """ + req = server_name, json_object, validity_time + return logcontext.make_deferred_yieldable( - self.verify_json_objects_for_server([(server_name, json_object)])[0] + self.verify_json_objects_for_server((req,))[0] ) def verify_json_objects_for_server(self, server_and_json): @@ -116,10 +139,12 @@ class Keyring(object): necessary. Args: - server_and_json (list): List of pairs of (server_name, json_object) + server_and_json (iterable[Tuple[str, dict, int]): + Iterable of triplets of (server_name, json_object, validity_time) + validity_time is a timestamp at which the signing key must be valid. Returns: - List: for each input pair, a deferred indicating success + List: for each input triplet, a deferred indicating success or failure to verify each json object's signature for the given server_name. The deferreds run their callbacks in the sentinel logcontext. @@ -128,12 +153,12 @@ class Keyring(object): verify_requests = [] handle = preserve_fn(_handle_key_deferred) - def process(server_name, json_object): + def process(server_name, json_object, validity_time): """Process an entry in the request list - Given a (server_name, json_object) pair from the request list, - adds a key request to verify_requests, and returns a deferred which will - complete or fail (in the sentinel context) when verification completes. + Given a (server_name, json_object, validity_time) triplet from the request + list, adds a key request to verify_requests, and returns a deferred which + will complete or fail (in the sentinel context) when verification completes. """ key_ids = signature_ids(json_object, server_name) @@ -148,7 +173,7 @@ class Keyring(object): # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, defer.Deferred() + server_name, key_ids, json_object, validity_time ) verify_requests.append(verify_request) @@ -160,8 +185,8 @@ class Keyring(object): return handle(verify_request) results = [ - process(server_name, json_object) - for server_name, json_object in server_and_json + process(server_name, json_object, validity_time) + for server_name, json_object, validity_time in server_and_json ] if verify_requests: @@ -298,8 +323,12 @@ class Keyring(object): verify_request.deferred.errback( SynapseError( 401, - "No key for %s with id %s" - % (verify_request.server_name, verify_request.key_ids), + "No key for %s with ids in %s (min_validity %i)" + % ( + verify_request.server_name, + verify_request.key_ids, + verify_request.minimum_valid_until_ts, + ), Codes.UNAUTHORIZED, ) ) @@ -323,18 +352,28 @@ class Keyring(object): Args: fetcher (KeyFetcher): fetcher to use to fetch the keys remaining_requests (set[VerifyKeyRequest]): outstanding key requests. - Any successfully-completed requests will be reomved from the list. + Any successfully-completed requests will be removed from the list. """ - # dict[str, set(str)]: keys to fetch for each server - missing_keys = {} + # dict[str, dict[str, int]]: keys to fetch. + # server_name -> key_id -> min_valid_ts + missing_keys = defaultdict(dict) + for verify_request in remaining_requests: # any completed requests should already have been removed assert not verify_request.deferred.called - missing_keys.setdefault(verify_request.server_name, set()).update( - verify_request.key_ids - ) + keys_for_server = missing_keys[verify_request.server_name] - results = yield fetcher.get_keys(missing_keys.items()) + for key_id in verify_request.key_ids: + # If we have several requests for the same key, then we only need to + # request that key once, but we should do so with the greatest + # min_valid_until_ts of the requests, so that we can satisfy all of + # the requests. + keys_for_server[key_id] = max( + keys_for_server.get(key_id, -1), + verify_request.minimum_valid_until_ts + ) + + results = yield fetcher.get_keys(missing_keys) completed = list() for verify_request in remaining_requests: @@ -344,25 +383,34 @@ class Keyring(object): # complete this VerifyKeyRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: - key = result_keys.get(key_id) - if key: - with PreserveLoggingContext(): - verify_request.deferred.callback( - (server_name, key_id, key.verify_key) - ) - completed.append(verify_request) - break + fetch_key_result = result_keys.get(key_id) + if not fetch_key_result: + # we didn't get a result for this key + continue + + if ( + fetch_key_result.valid_until_ts + < verify_request.minimum_valid_until_ts + ): + # key was not valid at this point + continue + + with PreserveLoggingContext(): + verify_request.deferred.callback( + (server_name, key_id, fetch_key_result.verify_key) + ) + completed.append(verify_request) + break remaining_requests.difference_update(completed) class KeyFetcher(object): - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """ Args: - server_name_and_key_ids (iterable[Tuple[str, iterable[str]]]): - list of (server_name, iterable[key_id]) tuples to fetch keys for - Note that the iterables may be iterated more than once. + keys_to_fetch (dict[str, dict[str, int]]): + the keys to be fetched. server_name -> key_id -> min_valid_ts Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: @@ -378,13 +426,15 @@ class StoreKeyFetcher(KeyFetcher): self.store = hs.get_datastore() @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" + keys_to_fetch = ( (server_name, key_id) - for server_name, key_ids in server_name_and_key_ids - for key_id in key_ids + for server_name, keys_for_server in keys_to_fetch.items() + for key_id in keys_for_server.keys() ) + res = yield self.store.get_server_verify_keys(keys_to_fetch) keys = {} for (server_name, key_id), key in res.items(): @@ -508,14 +558,14 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): self.perspective_servers = self.config.perspectives @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" @defer.inlineCallbacks def get_key(perspective_name, perspective_keys): try: result = yield self.get_server_verify_key_v2_indirect( - server_name_and_key_ids, perspective_name, perspective_keys + keys_to_fetch, perspective_name, perspective_keys ) defer.returnValue(result) except KeyLookupError as e: @@ -549,13 +599,15 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): @defer.inlineCallbacks def get_server_verify_key_v2_indirect( - self, server_names_and_key_ids, perspective_name, perspective_keys + self, keys_to_fetch, perspective_name, perspective_keys ): """ Args: - server_names_and_key_ids (iterable[Tuple[str, iterable[str]]]): - list of (server_name, iterable[key_id]) tuples to fetch keys for + keys_to_fetch (dict[str, dict[str, int]]): + the keys to be fetched. server_name -> key_id -> min_valid_ts + perspective_name (str): name of the notary server to query for the keys + perspective_keys (dict[str, VerifyKey]): map of key_id->key for the notary server @@ -569,12 +621,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): """ logger.info( "Requesting keys %s from notary server %s", - server_names_and_key_ids, + keys_to_fetch.items(), perspective_name, ) - # TODO(mark): Set the minimum_valid_until_ts to that needed by - # the events being validated or the current time if validating - # an incoming request. + try: query_response = yield self.client.post_json( destination=perspective_name, @@ -582,9 +632,10 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): data={ u"server_keys": { server_name: { - key_id: {u"minimum_valid_until_ts": 0} for key_id in key_ids + key_id: {u"minimum_valid_until_ts": min_valid_ts} + for key_id, min_valid_ts in server_keys.items() } - for server_name, key_ids in server_names_and_key_ids + for server_name, server_keys in keys_to_fetch.items() } }, long_retries=True, @@ -694,15 +745,18 @@ class ServerKeyFetcher(BaseV2KeyFetcher): self.client = hs.get_http_client() @defer.inlineCallbacks - def get_keys(self, server_name_and_key_ids): + def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" + # TODO make this more resilient results = yield logcontext.make_deferred_yieldable( defer.gatherResults( [ run_in_background( - self.get_server_verify_key_v2_direct, server_name, key_ids + self.get_server_verify_key_v2_direct, + server_name, + server_keys.keys(), ) - for server_name, key_ids in server_name_and_key_ids + for server_name, server_keys in keys_to_fetch.items() ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -721,6 +775,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: + # we may have found this key as a side-effect of asking for another. if requested_key_id in keys: continue diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index cffa831d80..4b38f7c759 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -265,7 +265,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): ] more_deferreds = keyring.verify_json_objects_for_server([ - (p.sender_domain, p.redacted_pdu_json) + (p.sender_domain, p.redacted_pdu_json, 0) for p in pdus_to_check_sender ]) @@ -298,7 +298,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): ] more_deferreds = keyring.verify_json_objects_for_server([ - (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json) + (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, 0) for p in pdus_to_check_event_id ]) diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index d0efc4e0d3..0db8858cf1 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -94,6 +94,7 @@ class NoAuthenticationError(AuthenticationError): class Authenticator(object): def __init__(self, hs): + self._clock = hs.get_clock() self.keyring = hs.get_keyring() self.server_name = hs.hostname self.store = hs.get_datastore() @@ -102,6 +103,7 @@ class Authenticator(object): # A method just so we can pass 'self' as the authenticator to the Servlets @defer.inlineCallbacks def authenticate_request(self, request, content): + now = self._clock.time_msec() json_request = { "method": request.method.decode('ascii'), "uri": request.uri.decode('ascii'), @@ -138,7 +140,7 @@ class Authenticator(object): 401, "Missing Authorization headers", Codes.UNAUTHORIZED, ) - yield self.keyring.verify_json_for_server(origin, json_request) + yield self.keyring.verify_json_for_server(origin, json_request, now) logger.info("Request from %s", origin) request.authenticated_entity = origin diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index 786149be65..fa6b641ee1 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -97,10 +97,11 @@ class GroupAttestationSigning(object): # TODO: We also want to check that *new* attestations that people give # us to store are valid for at least a little while. - if valid_until_ms < self.clock.time_msec(): + now = self.clock.time_msec() + if valid_until_ms < now: raise SynapseError(400, "Attestation expired") - yield self.keyring.verify_json_for_server(server_name, attestation) + yield self.keyring.verify_json_for_server(server_name, attestation, now) def create_attestation(self, group_id, user_id): """Create an attestation for the group_id and user_id with default diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 3933ad4347..096401938d 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,6 +19,7 @@ from mock import Mock import canonicaljson import signedjson.key import signedjson.sign +from signedjson.key import get_verify_key from twisted.internet import defer @@ -137,7 +138,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): context_11.request = "11" res_deferreds = kr.verify_json_objects_for_server( - [("server10", json1), ("server11", {})] + [("server10", json1, 0), ("server11", {}, 0)] ) # the unsigned json should be rejected pretty quickly @@ -174,7 +175,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1)] + [("server10", json1, 0)] ) res_deferreds_2[0].addBoth(self.check_context, None) yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) @@ -197,31 +198,108 @@ class KeyringTestCase(unittest.HomeserverTestCase): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) - key1_id = "%s:%s" % (key1.alg, key1.version) - r = self.hs.datastore.store_server_verify_keys( "server9", time.time() * 1000, - [ - ( - "server9", - key1_id, - FetchKeyResult(signedjson.key.get_verify_key(key1), 1000), - ), - ], + [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], ) self.get_success(r) + json1 = {} signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}) + d = _verify_json_for_server(kr, "server9", {}, 0) self.failureResultOf(d, SynapseError) - d = _verify_json_for_server(kr, "server9", json1) - self.assertFalse(d.called) + # should suceed on a signed object + d = _verify_json_for_server(kr, "server9", json1, 500) + # self.assertFalse(d.called) self.get_success(d) + def test_verify_json_dedupes_key_requests(self): + """Two requests for the same key should be deduped.""" + key1 = signedjson.key.generate_signing_key(1) + + def get_keys(keys_to_fetch): + # there should only be one request object (with the max validity) + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) + } + } + ) + + mock_fetcher = keyring.KeyFetcher() + mock_fetcher.get_keys = Mock(side_effect=get_keys) + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher,)) + + json1 = {} + signedjson.sign.sign_json(json1, "server1", key1) + + # the first request should succeed; the second should fail because the key + # has expired + results = kr.verify_json_objects_for_server( + [("server1", json1, 500), ("server1", json1, 1500)] + ) + self.assertEqual(len(results), 2) + self.get_success(results[0]) + e = self.get_failure(results[1], SynapseError).value + self.assertEqual(e.errcode, "M_UNAUTHORIZED") + self.assertEqual(e.code, 401) + + # there should have been a single call to the fetcher + mock_fetcher.get_keys.assert_called_once() + + def test_verify_json_falls_back_to_other_fetchers(self): + """If the first fetcher cannot provide a recent enough key, we fall back""" + key1 = signedjson.key.generate_signing_key(1) + + def get_keys1(keys_to_fetch): + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 800) + } + } + ) + + def get_keys2(keys_to_fetch): + self.assertEqual(keys_to_fetch, {"server1": {get_key_id(key1): 1500}}) + return defer.succeed( + { + "server1": { + get_key_id(key1): FetchKeyResult(get_verify_key(key1), 1200) + } + } + ) + + mock_fetcher1 = keyring.KeyFetcher() + mock_fetcher1.get_keys = Mock(side_effect=get_keys1) + mock_fetcher2 = keyring.KeyFetcher() + mock_fetcher2.get_keys = Mock(side_effect=get_keys2) + kr = keyring.Keyring(self.hs, key_fetchers=(mock_fetcher1, mock_fetcher2)) + + json1 = {} + signedjson.sign.sign_json(json1, "server1", key1) + + results = kr.verify_json_objects_for_server( + [("server1", json1, 1200), ("server1", json1, 1500)] + ) + self.assertEqual(len(results), 2) + self.get_success(results[0]) + e = self.get_failure(results[1], SynapseError).value + self.assertEqual(e.errcode, "M_UNAUTHORIZED") + self.assertEqual(e.code, 401) + + # there should have been a single call to each fetcher + mock_fetcher1.get_keys.assert_called_once() + mock_fetcher2.get_keys.assert_called_once() + class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): @@ -260,8 +338,8 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): self.http_client.get_json.side_effect = get_json - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) + keys_to_fetch = {SERVER_NAME: {"key1": 0}} + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) self.assertEqual(k.verify_key, testverifykey) @@ -288,9 +366,7 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): # change the server name: it should cause a rejection response["server_name"] = "OTHER_SERVER" - self.get_failure( - fetcher.get_keys(server_name_and_key_ids), KeyLookupError - ) + self.get_failure(fetcher.get_keys(keys_to_fetch), KeyLookupError) class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): @@ -342,8 +418,8 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.http_client.post_json.side_effect = post_json - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] - keys = self.get_success(fetcher.get_keys(server_name_and_key_ids)) + keys_to_fetch = {SERVER_NAME: {"key1": 0}} + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) self.assertIn(SERVER_NAME, keys) k = keys[SERVER_NAME][testverifykey_id] self.assertEqual(k.valid_until_ts, VALID_UNTIL_TS) @@ -401,7 +477,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): def get_key_from_perspectives(response): fetcher = PerspectivesKeyFetcher(self.hs) - server_name_and_key_ids = [(SERVER_NAME, ("key1",))] + keys_to_fetch = {SERVER_NAME: {"key1": 0}} def post_json(destination, path, data, **kwargs): self.assertEqual(destination, self.mock_perspective_server.server_name) @@ -410,9 +486,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.http_client.post_json.side_effect = post_json - return self.get_success( - fetcher.get_keys(server_name_and_key_ids) - ) + return self.get_success(fetcher.get_keys(keys_to_fetch)) # start with a valid response so we can check we are testing the right thing response = build_response() @@ -435,6 +509,11 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual(keys, {}, "Expected empty dict with missing origin server sig") +def get_key_id(key): + """Get the matrix ID tag for a given SigningKey or VerifyKey""" + return "%s:%s" % (key.alg, key.version) + + @defer.inlineCallbacks def run_in_context(f, *args, **kwargs): with LoggingContext("testctx") as ctx: @@ -445,14 +524,16 @@ def run_in_context(f, *args, **kwargs): defer.returnValue(rv) -def _verify_json_for_server(keyring, server_name, json_object): +def _verify_json_for_server(keyring, server_name, json_object, validity_time): """thin wrapper around verify_json_for_server which makes sure it is wrapped with the patched defer.inlineCallbacks. """ @defer.inlineCallbacks def v(): - rv1 = yield keyring.verify_json_for_server(server_name, json_object) + rv1 = yield keyring.verify_json_for_server( + server_name, json_object, validity_time + ) defer.returnValue(rv1) return run_in_context(v) From 06a1f3e20719ab2631089a37cef50b80c1155f89 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 17:56:54 +0100 Subject: [PATCH 303/429] Reduce timeout for outbound /key/v2/server requests. --- synapse/crypto/keyring.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index cdec06c88e..bef6498f4b 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -786,6 +786,19 @@ class ServerKeyFetcher(BaseV2KeyFetcher): path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), ignore_backoff=True, + + # we only give the remote server 10s to respond. It should be an + # easy request to handle, so if it doesn't reply within 10s, it's + # probably not going to. + # + # Furthermore, when we are acting as a notary server, we cannot + # wait all day for all of the origin servers, as the requesting + # server will otherwise time out before we can respond. + # + # (Note that get_json may make 4 attempts, so this can still take + # almost 45 seconds to fetch the headers, plus up to another 60s to + # read the response). + timeout=10000, ) except (NotRetryingDestination, RequestSendFailed) as e: raise_from(KeyLookupError("Failed to connect to remote server"), e) From dce6e9e0c11fc5d99b2da6698aed04e9f525f242 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 23:50:43 +0100 Subject: [PATCH 304/429] Avoid rapidly backing-off a server if we ignore the retry interval --- changelog.d/5335.bugfix | 1 + synapse/util/retryutils.py | 60 +++++++++++++++++++++++--------------- 2 files changed, 38 insertions(+), 23 deletions(-) create mode 100644 changelog.d/5335.bugfix diff --git a/changelog.d/5335.bugfix b/changelog.d/5335.bugfix new file mode 100644 index 0000000000..7318cbe35e --- /dev/null +++ b/changelog.d/5335.bugfix @@ -0,0 +1 @@ +Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index 26cce7d197..f6dfa77d8f 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -46,8 +46,7 @@ class NotRetryingDestination(Exception): @defer.inlineCallbacks -def get_retry_limiter(destination, clock, store, ignore_backoff=False, - **kwargs): +def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs): """For a given destination check if we have previously failed to send a request there and are waiting before retrying the destination. If we are not ready to retry the destination, this will raise a @@ -60,8 +59,7 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, clock (synapse.util.clock): timing source store (synapse.storage.transactions.TransactionStore): datastore ignore_backoff (bool): true to ignore the historical backoff data and - try the request anyway. We will still update the next - retry_interval on success/failure. + try the request anyway. We will still reset the retry_interval on success. Example usage: @@ -75,13 +73,12 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, """ retry_last_ts, retry_interval = (0, 0) - retry_timings = yield store.get_destination_retry_timings( - destination - ) + retry_timings = yield store.get_destination_retry_timings(destination) if retry_timings: retry_last_ts, retry_interval = ( - retry_timings["retry_last_ts"], retry_timings["retry_interval"] + retry_timings["retry_last_ts"], + retry_timings["retry_interval"], ) now = int(clock.time_msec()) @@ -93,22 +90,31 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, destination=destination, ) + # if we are ignoring the backoff data, we should also not increment the backoff + # when we get another failure - otherwise a server can very quickly reach the + # maximum backoff even though it might only have been down briefly + backoff_on_failure = not ignore_backoff + defer.returnValue( RetryDestinationLimiter( - destination, - clock, - store, - retry_interval, - **kwargs + destination, clock, store, retry_interval, backoff_on_failure, **kwargs ) ) class RetryDestinationLimiter(object): - def __init__(self, destination, clock, store, retry_interval, - min_retry_interval=10 * 60 * 1000, - max_retry_interval=24 * 60 * 60 * 1000, - multiplier_retry_interval=5, backoff_on_404=False): + def __init__( + self, + destination, + clock, + store, + retry_interval, + min_retry_interval=10 * 60 * 1000, + max_retry_interval=24 * 60 * 60 * 1000, + multiplier_retry_interval=5, + backoff_on_404=False, + backoff_on_failure=True, + ): """Marks the destination as "down" if an exception is thrown in the context, except for CodeMessageException with code < 500. @@ -128,6 +134,9 @@ class RetryDestinationLimiter(object): multiplier_retry_interval (int): The multiplier to use to increase the retry interval after a failed request. backoff_on_404 (bool): Back off if we get a 404 + + backoff_on_failure (bool): set to False if we should not increase the + retry interval on a failure. """ self.clock = clock self.store = store @@ -138,6 +147,7 @@ class RetryDestinationLimiter(object): self.max_retry_interval = max_retry_interval self.multiplier_retry_interval = multiplier_retry_interval self.backoff_on_404 = backoff_on_404 + self.backoff_on_failure = backoff_on_failure def __enter__(self): pass @@ -173,10 +183,13 @@ class RetryDestinationLimiter(object): if not self.retry_interval: return - logger.debug("Connection to %s was successful; clearing backoff", - self.destination) + logger.debug( + "Connection to %s was successful; clearing backoff", self.destination + ) retry_last_ts = 0 self.retry_interval = 0 + elif not self.backoff_on_failure: + return else: # We couldn't connect. if self.retry_interval: @@ -190,7 +203,10 @@ class RetryDestinationLimiter(object): logger.info( "Connection to %s was unsuccessful (%s(%s)); backoff now %i", - self.destination, exc_type, exc_val, self.retry_interval + self.destination, + exc_type, + exc_val, + self.retry_interval, ) retry_last_ts = int(self.clock.time_msec()) @@ -201,9 +217,7 @@ class RetryDestinationLimiter(object): self.destination, retry_last_ts, self.retry_interval ) except Exception: - logger.exception( - "Failed to store destination_retry_timings", - ) + logger.exception("Failed to store destination_retry_timings") # we deliberately do this in the background. synapse.util.logcontext.run_in_background(store_retry_timings) From def5ea4062295759d7c28d9c2302187871a1bc72 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 15:36:41 +0100 Subject: [PATCH 305/429] Don't bomb out on direct key fetches as soon as one fails --- synapse/crypto/keyring.py | 58 ++++++++++++++++++++++-------------- tests/crypto/test_keyring.py | 12 ++++---- 2 files changed, 41 insertions(+), 29 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index bef6498f4b..5660c96023 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -46,6 +46,7 @@ from synapse.api.errors import ( ) from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext, unwrapFirstError +from synapse.util.async_helpers import yieldable_gather_results from synapse.util.logcontext import ( LoggingContext, PreserveLoggingContext, @@ -169,7 +170,12 @@ class Keyring(object): ) ) - logger.debug("Verifying for %s with key_ids %s", server_name, key_ids) + logger.debug( + "Verifying for %s with key_ids %s, min_validity %i", + server_name, + key_ids, + validity_time, + ) # add the key request to the queue, but don't start it off yet. verify_request = VerifyKeyRequest( @@ -744,34 +750,42 @@ class ServerKeyFetcher(BaseV2KeyFetcher): self.clock = hs.get_clock() self.client = hs.get_http_client() - @defer.inlineCallbacks def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" - # TODO make this more resilient - results = yield logcontext.make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.get_server_verify_key_v2_direct, - server_name, - server_keys.keys(), - ) - for server_name, server_keys in keys_to_fetch.items() - ], - consumeErrors=True, - ).addErrback(unwrapFirstError) - ) - merged = {} - for result in results: - merged.update(result) + results = {} - defer.returnValue( - {server_name: keys for server_name, keys in merged.items() if keys} + @defer.inlineCallbacks + def get_key(key_to_fetch_item): + server_name, key_ids = key_to_fetch_item + try: + keys = yield self.get_server_verify_key_v2_direct(server_name, key_ids) + results[server_name] = keys + except KeyLookupError as e: + logger.warning( + "Error looking up keys %s from %s: %s", key_ids, server_name, e + ) + except Exception: + logger.exception("Error getting keys %s from %s", key_ids, server_name) + + return yieldable_gather_results(get_key, keys_to_fetch.items()).addCallback( + lambda _: results ) @defer.inlineCallbacks def get_server_verify_key_v2_direct(self, server_name, key_ids): + """ + + Args: + server_name (str): + key_ids (iterable[str]): + + Returns: + Deferred[dict[str, FetchKeyResult]]: map from key ID to lookup result + + Raises: + KeyLookupError if there was a problem making the lookup + """ keys = {} # type: dict[str, FetchKeyResult] for requested_key_id in key_ids: @@ -823,7 +837,7 @@ class ServerKeyFetcher(BaseV2KeyFetcher): ) keys.update(response_keys) - defer.returnValue({server_name: keys}) + defer.returnValue(keys) @defer.inlineCallbacks diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 096401938d..4cff7e36c8 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -25,11 +25,7 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.crypto.keyring import ( - KeyLookupError, - PerspectivesKeyFetcher, - ServerKeyFetcher, -) +from synapse.crypto.keyring import PerspectivesKeyFetcher, ServerKeyFetcher from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -364,9 +360,11 @@ class ServerKeyFetcherTestCase(unittest.HomeserverTestCase): bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) ) - # change the server name: it should cause a rejection + # change the server name: the result should be ignored response["server_name"] = "OTHER_SERVER" - self.get_failure(fetcher.get_keys(keys_to_fetch), KeyLookupError) + + keys = self.get_success(fetcher.get_keys(keys_to_fetch)) + self.assertEqual(keys, {}) class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): From c5d60eadd5949ab4c12857e0830eb0afbb857f72 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 18:07:19 +0100 Subject: [PATCH 306/429] Notary server: make requests to origins in parallel ... else we're guaranteed to time out. --- synapse/crypto/keyring.py | 10 +++++++++- synapse/rest/key/v2/remote_key_resource.py | 12 ++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 5660c96023..6dae713ebc 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -751,7 +751,15 @@ class ServerKeyFetcher(BaseV2KeyFetcher): self.client = hs.get_http_client() def get_keys(self, keys_to_fetch): - """see KeyFetcher.get_keys""" + """ + Args: + keys_to_fetch (dict[str, iterable[str]]): + the keys to be fetched. server_name -> key_ids + + Returns: + Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult|None]]]: + map from server_name -> key_id -> FetchKeyResult + """ results = {} diff --git a/synapse/rest/key/v2/remote_key_resource.py b/synapse/rest/key/v2/remote_key_resource.py index 21c3c807b9..8a730bbc35 100644 --- a/synapse/rest/key/v2/remote_key_resource.py +++ b/synapse/rest/key/v2/remote_key_resource.py @@ -20,7 +20,7 @@ from twisted.web.resource import Resource from twisted.web.server import NOT_DONE_YET from synapse.api.errors import Codes, SynapseError -from synapse.crypto.keyring import KeyLookupError, ServerKeyFetcher +from synapse.crypto.keyring import ServerKeyFetcher from synapse.http.server import respond_with_json_bytes, wrap_json_request_handler from synapse.http.servlet import parse_integer, parse_json_object_from_request @@ -215,15 +215,7 @@ class RemoteKey(Resource): json_results.add(bytes(result["key_json"])) if cache_misses and query_remote_on_cache_miss: - for server_name, key_ids in cache_misses.items(): - try: - yield self.fetcher.get_server_verify_key_v2_direct( - server_name, key_ids - ) - except KeyLookupError as e: - logger.info("Failed to fetch key: %s", e) - except Exception: - logger.exception("Failed to get key for %r", server_name) + yield self.fetcher.get_keys(cache_misses) yield self.query_keys( request, query, query_remote_on_cache_miss=False ) From a3f2d000e031f2e9b6e76f679967fd0c0ba890f3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 3 Jun 2019 23:12:48 +0100 Subject: [PATCH 307/429] changelog --- changelog.d/5333.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5333.bugfix diff --git a/changelog.d/5333.bugfix b/changelog.d/5333.bugfix new file mode 100644 index 0000000000..cb05a6dd63 --- /dev/null +++ b/changelog.d/5333.bugfix @@ -0,0 +1 @@ +Fix various problems which made the signing-key notary server time out for some requests. \ No newline at end of file From b2b90b7d34bf9afc437df6a2e58ab89cfd8ab91f Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 4 Jun 2019 15:54:27 +1000 Subject: [PATCH 308/429] Hawkowl/fix missing auth (#5328) --- changelog.d/5328.misc | 1 + synapse/rest/client/v1/voip.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5328.misc diff --git a/changelog.d/5328.misc b/changelog.d/5328.misc new file mode 100644 index 0000000000..e1b9dc58a3 --- /dev/null +++ b/changelog.d/5328.misc @@ -0,0 +1 @@ +The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/synapse/rest/client/v1/voip.py b/synapse/rest/client/v1/voip.py index 0975df84cf..6381049210 100644 --- a/synapse/rest/client/v1/voip.py +++ b/synapse/rest/client/v1/voip.py @@ -29,6 +29,7 @@ class VoipRestServlet(RestServlet): def __init__(self, hs): super(VoipRestServlet, self).__init__() self.hs = hs + self.auth = hs.get_auth() @defer.inlineCallbacks def on_GET(self, request): From 5bdb189f86b462890ff55c9244506b0c41fed856 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 11:14:16 +0100 Subject: [PATCH 309/429] Improve docstrings on MatrixFederationClient. (#5332) --- changelog.d/5332.misc | 1 + synapse/http/matrixfederationclient.py | 71 ++++++++++++++++++++------ 2 files changed, 56 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5332.misc diff --git a/changelog.d/5332.misc b/changelog.d/5332.misc new file mode 100644 index 0000000000..dcfac4eac9 --- /dev/null +++ b/changelog.d/5332.misc @@ -0,0 +1 @@ +Improve docstrings on MatrixFederationClient. diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 8197619a78..663ea72a7a 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -285,7 +285,24 @@ class MatrixFederationHttpClient(object): request (MatrixFederationRequest): details of request to be sent timeout (int|None): number of milliseconds to wait for the response headers - (including connecting to the server). 60s by default. + (including connecting to the server), *for each attempt*. + 60s by default. + + long_retries (bool): whether to use the long retry algorithm. + + The regular retry algorithm makes 4 attempts, with intervals + [0.5s, 1s, 2s]. + + The long retry algorithm makes 11 attempts, with intervals + [4s, 16s, 60s, 60s, ...] + + Both algorithms add -20%/+40% jitter to the retry intervals. + + Note that the above intervals are *in addition* to the time spent + waiting for the request to complete (up to `timeout` ms). + + NB: the long retry algorithm takes over 20 minutes to complete, with + a default timeout of 60s! ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. @@ -566,10 +583,14 @@ class MatrixFederationHttpClient(object): the request body. This will be encoded as JSON. json_data_callback (callable): A callable returning the dict to use as the request body. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. backoff_on_404 (bool): True if we should count a 404 response as @@ -627,15 +648,22 @@ class MatrixFederationHttpClient(object): Args: destination (str): The remote server to send the HTTP request to. + path (str): The HTTP path. + data (dict): A dict containing the data that will be used as the request body. This will be encoded as JSON. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + args (dict): query params Returns: Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The @@ -686,14 +714,19 @@ class MatrixFederationHttpClient(object): Args: destination (str): The remote server to send the HTTP request to. + path (str): The HTTP path. + args (dict|None): A dictionary used to create query strings, defaults to None. - timeout (int): How long to try (in ms) the destination for before - giving up. None indicates no timeout and that the request will - be retried. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + try_trailing_slash_on_400 (bool): True if on a 400 M_UNRECOGNIZED response we should try appending a trailing slash to the end of the request. Workaround for #3622 in Synapse <= v0.99.3. @@ -742,12 +775,18 @@ class MatrixFederationHttpClient(object): destination (str): The remote server to send the HTTP request to. path (str): The HTTP path. - long_retries (bool): A boolean that indicates whether we should - retry for a short or long time. - timeout(int): How long to try (in ms) the destination for before - giving up. None indicates no timeout. + + long_retries (bool): whether to use the long retry algorithm. See + docs on _send_request for details. + + timeout (int|None): number of milliseconds to wait for the response headers + (including connecting to the server), *for each attempt*. + self._default_timeout (60s) by default. + ignore_backoff (bool): true to ignore the historical backoff data and try the request anyway. + + args (dict): query params Returns: Deferred[dict|list]: Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. From 4d08b8f30c6a10caaa570bd93059d496b66185a0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 11:53:07 +0100 Subject: [PATCH 310/429] Don't do long retries when calling the key notary server. (#5334) It takes at least 20 minutes to work through the long_retries schedule (11 attempts, each with a 60 second timeout, and 60 seconds between each request), so if the notary server isn't returning within the timeout, we'll just end up blocking whatever request is happening for 20 minutes. Ain't nobody got time for that. --- changelog.d/5334.bugfix | 1 + synapse/crypto/keyring.py | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 changelog.d/5334.bugfix diff --git a/changelog.d/5334.bugfix b/changelog.d/5334.bugfix new file mode 100644 index 0000000000..ed141e0918 --- /dev/null +++ b/changelog.d/5334.bugfix @@ -0,0 +1 @@ +Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 6dae713ebc..0fd15287e7 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -644,7 +644,6 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): for server_name, server_keys in keys_to_fetch.items() } }, - long_retries=True, ) except (NotRetryingDestination, RequestSendFailed) as e: raise_from(KeyLookupError("Failed to connect to remote server"), e) From ac3cc3236748877b692e6c6c771019fdb23d3e71 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 13:47:44 +0100 Subject: [PATCH 311/429] Make account validity renewal emails work when email notifs are disabled --- changelog.d/5341.bugfix | 1 + synapse/config/emailconfig.py | 101 +++++++++++++++++++--------------- 2 files changed, 59 insertions(+), 43 deletions(-) create mode 100644 changelog.d/5341.bugfix diff --git a/changelog.d/5341.bugfix b/changelog.d/5341.bugfix new file mode 100644 index 0000000000..a7aaa95f39 --- /dev/null +++ b/changelog.d/5341.bugfix @@ -0,0 +1 @@ +Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 342a6ce5fd..cf4875f1f3 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -29,12 +31,49 @@ logger = logging.getLogger(__name__) class EmailConfig(Config): def read_config(self, config): + # TODO: We should separate better the email configuration from the notification + # and account validity config. + self.email_enable_notifs = False email_config = config.get("email", {}) - self.email_enable_notifs = email_config.get("enable_notifs", False) - if self.email_enable_notifs: + self.email_smtp_host = email_config.get("smtp_host", None) + self.email_smtp_port = email_config.get("smtp_port", None) + self.email_smtp_user = email_config.get("smtp_user", None) + self.email_smtp_pass = email_config.get("smtp_pass", None) + self.require_transport_security = email_config.get( + "require_transport_security", False + ) + if "app_name" in email_config: + self.email_app_name = email_config["app_name"] + else: + self.email_app_name = "Matrix" + + self.email_notif_from = email_config.get("notif_from", None) + # make sure it's valid + parsed = email.utils.parseaddr(self.email_notif_from) + if self.email_notif_from and parsed[1] == '': + raise RuntimeError("Invalid notif_from address") + + template_dir = email_config.get("template_dir") + # we need an absolute path, because we change directory after starting (and + # we don't yet know what auxilliary templates like mail.css we will need). + # (Note that loading as package_resources with jinja.PackageLoader doesn't + # work for the same reason.) + if not template_dir: + template_dir = pkg_resources.resource_filename( + 'synapse', 'res/templates' + ) + + self.email_template_dir = os.path.abspath(template_dir) + + self.email_enable_notifs = email_config.get("enable_notifs", False) + account_validity_renewal_enabled = config.get( + "account_validity", {}, + ).get("renew_at") + + if self.email_enable_notifs or account_validity_renewal_enabled: # make sure we can import the required deps import jinja2 import bleach @@ -42,6 +81,7 @@ class EmailConfig(Config): jinja2 bleach + if self.email_enable_notifs: required = [ "smtp_host", "smtp_port", @@ -66,34 +106,13 @@ class EmailConfig(Config): "email.enable_notifs is True but no public_baseurl is set" ) - self.email_smtp_host = email_config["smtp_host"] - self.email_smtp_port = email_config["smtp_port"] - self.email_notif_from = email_config["notif_from"] self.email_notif_template_html = email_config["notif_template_html"] self.email_notif_template_text = email_config["notif_template_text"] - self.email_expiry_template_html = email_config.get( - "expiry_template_html", "notice_expiry.html", - ) - self.email_expiry_template_text = email_config.get( - "expiry_template_text", "notice_expiry.txt", - ) - - template_dir = email_config.get("template_dir") - # we need an absolute path, because we change directory after starting (and - # we don't yet know what auxilliary templates like mail.css we will need). - # (Note that loading as package_resources with jinja.PackageLoader doesn't - # work for the same reason.) - if not template_dir: - template_dir = pkg_resources.resource_filename( - 'synapse', 'res/templates' - ) - template_dir = os.path.abspath(template_dir) for f in self.email_notif_template_text, self.email_notif_template_html: - p = os.path.join(template_dir, f) + p = os.path.join(self.email_template_dir, f) if not os.path.isfile(p): raise ConfigError("Unable to find email template file %s" % (p, )) - self.email_template_dir = template_dir self.email_notif_for_new_users = email_config.get( "notif_for_new_users", True @@ -101,29 +120,25 @@ class EmailConfig(Config): self.email_riot_base_url = email_config.get( "riot_base_url", None ) - self.email_smtp_user = email_config.get( - "smtp_user", None - ) - self.email_smtp_pass = email_config.get( - "smtp_pass", None - ) - self.require_transport_security = email_config.get( - "require_transport_security", False - ) - if "app_name" in email_config: - self.email_app_name = email_config["app_name"] - else: - self.email_app_name = "Matrix" - - # make sure it's valid - parsed = email.utils.parseaddr(self.email_notif_from) - if parsed[1] == '': - raise RuntimeError("Invalid notif_from address") else: self.email_enable_notifs = False # Not much point setting defaults for the rest: it would be an # error for them to be used. + if account_validity_renewal_enabled: + self.email_expiry_template_html = email_config.get( + "expiry_template_html", "notice_expiry.html", + ) + self.email_expiry_template_text = email_config.get( + "expiry_template_text", "notice_expiry.txt", + ) + + for f in self.email_expiry_template_text, self.email_expiry_template_html: + p = os.path.join(self.email_template_dir, f) + if not os.path.isfile(p): + raise ConfigError("Unable to find email template file %s" % (p, )) + + def default_config(self, config_dir_path, server_name, **kwargs): return """ # Enable sending emails for notification events or expiry notices From 1cc5fc1f6c316e8ea1c50669cd80f4a7d441570a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 13:51:23 +0100 Subject: [PATCH 312/429] Lint --- synapse/config/emailconfig.py | 1 - 1 file changed, 1 deletion(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index cf4875f1f3..7ca3505895 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -138,7 +138,6 @@ class EmailConfig(Config): if not os.path.isfile(p): raise ConfigError("Unable to find email template file %s" % (p, )) - def default_config(self, config_dir_path, server_name, **kwargs): return """ # Enable sending emails for notification events or expiry notices From 2f62e1f6ff671bf6404bd90b1d945f8d029f0d37 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 4 Jun 2019 14:24:36 +0100 Subject: [PATCH 313/429] Only parse from email if provided --- synapse/config/emailconfig.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 7ca3505895..8400471f40 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -51,10 +51,11 @@ class EmailConfig(Config): self.email_app_name = "Matrix" self.email_notif_from = email_config.get("notif_from", None) - # make sure it's valid - parsed = email.utils.parseaddr(self.email_notif_from) - if self.email_notif_from and parsed[1] == '': - raise RuntimeError("Invalid notif_from address") + if self.email_notif_from is not None: + # make sure it's valid + parsed = email.utils.parseaddr(self.email_notif_from) + if parsed[1] == '': + raise RuntimeError("Invalid notif_from address") template_dir = email_config.get("template_dir") # we need an absolute path, because we change directory after starting (and From b4189b112fcacf8143aa8fe7674d5c2518067bc8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 18:01:09 +0100 Subject: [PATCH 314/429] Rename get_events->get_events_from_store_or_dest (#5344) We have too many things called get_event, and it's hard to figure out what we mean. Also remove some unused params from the signature, and add some logging. --- changelog.d/5344.misc | 1 + synapse/federation/federation_client.py | 33 ++++++++++--------------- 2 files changed, 14 insertions(+), 20 deletions(-) create mode 100644 changelog.d/5344.misc diff --git a/changelog.d/5344.misc b/changelog.d/5344.misc new file mode 100644 index 0000000000..a20c563bf1 --- /dev/null +++ b/changelog.d/5344.misc @@ -0,0 +1 @@ +Clean up FederationClient.get_events for clarity. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f3fc897a0a..916ff487c9 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -17,7 +17,6 @@ import copy import itertools import logging -import random from six.moves import range @@ -326,8 +325,8 @@ class FederationClient(FederationBase): state_event_ids = result["pdu_ids"] auth_event_ids = result.get("auth_chain_ids", []) - fetched_events, failed_to_fetch = yield self.get_events( - [destination], room_id, set(state_event_ids + auth_event_ids) + fetched_events, failed_to_fetch = yield self.get_events_from_store_or_dest( + destination, room_id, set(state_event_ids + auth_event_ids) ) if failed_to_fetch: @@ -397,27 +396,20 @@ class FederationClient(FederationBase): defer.returnValue((signed_pdus, signed_auth)) @defer.inlineCallbacks - def get_events(self, destinations, room_id, event_ids, return_local=True): - """Fetch events from some remote destinations, checking if we already - have them. + def get_events_from_store_or_dest(self, destination, room_id, event_ids): + """Fetch events from a remote destination, checking if we already have them. Args: - destinations (list) + destination (str) room_id (str) event_ids (list) - return_local (bool): Whether to include events we already have in - the DB in the returned list of events Returns: Deferred: A deferred resolving to a 2-tuple where the first is a list of events and the second is a list of event ids that we failed to fetch. """ - if return_local: - seen_events = yield self.store.get_events(event_ids, allow_rejected=True) - signed_events = list(seen_events.values()) - else: - seen_events = yield self.store.have_seen_events(event_ids) - signed_events = [] + seen_events = yield self.store.get_events(event_ids, allow_rejected=True) + signed_events = list(seen_events.values()) failed_to_fetch = set() @@ -428,10 +420,11 @@ class FederationClient(FederationBase): if not missing_events: defer.returnValue((signed_events, failed_to_fetch)) - def random_server_list(): - srvs = list(destinations) - random.shuffle(srvs) - return srvs + logger.debug( + "Fetching unknown state/auth events %s for room %s", + missing_events, + event_ids, + ) room_version = yield self.store.get_room_version(room_id) @@ -443,7 +436,7 @@ class FederationClient(FederationBase): deferreds = [ run_in_background( self.get_pdu, - destinations=random_server_list(), + destinations=[destination], event_id=e_id, room_version=room_version, ) From dae224a73f6a799718f7dfc5c6d8ac3e050fca1d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 18:05:06 +0100 Subject: [PATCH 315/429] Fix failure to fetch batches of PDUs (#5342) FederationClient.get_pdu is called in a loop to fetch a batch of PDUs. A failure to fetch one should not result in a failure of the whole batch. Add the missing `continue`. --- changelog.d/5342.bugfix | 1 + synapse/federation/federation_client.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5342.bugfix diff --git a/changelog.d/5342.bugfix b/changelog.d/5342.bugfix new file mode 100644 index 0000000000..66a3076292 --- /dev/null +++ b/changelog.d/5342.bugfix @@ -0,0 +1 @@ +Fix failure when fetching batches of events during backfill, etc. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 916ff487c9..d559605382 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -279,6 +279,7 @@ class FederationClient(FederationBase): "Failed to get PDU %s from %s because %s", event_id, destination, e, ) + continue except NotRetryingDestination as e: logger.info(str(e)) continue From aa530e68005d041ad037b081fe748d301a11291a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 4 Jun 2019 22:02:53 +0100 Subject: [PATCH 316/429] Call RetryLimiter correctly (#5340) Fixes a regression introduced in #5335. --- changelog.d/5340.bugfix | 2 ++ synapse/util/retryutils.py | 7 ++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5340.bugfix diff --git a/changelog.d/5340.bugfix b/changelog.d/5340.bugfix new file mode 100644 index 0000000000..931ee904e1 --- /dev/null +++ b/changelog.d/5340.bugfix @@ -0,0 +1,2 @@ +Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. + diff --git a/synapse/util/retryutils.py b/synapse/util/retryutils.py index f6dfa77d8f..1a77456498 100644 --- a/synapse/util/retryutils.py +++ b/synapse/util/retryutils.py @@ -97,7 +97,12 @@ def get_retry_limiter(destination, clock, store, ignore_backoff=False, **kwargs) defer.returnValue( RetryDestinationLimiter( - destination, clock, store, retry_interval, backoff_on_failure, **kwargs + destination, + clock, + store, + retry_interval, + backoff_on_failure=backoff_on_failure, + **kwargs ) ) From 016af015980d28d7efdce9aad1fe2148335f5086 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 10:35:13 +0100 Subject: [PATCH 317/429] Rename VerifyKeyRequest.deferred field (#5343) it's a bit confusing --- changelog.d/5343.misc | 1 + synapse/crypto/keyring.py | 24 ++++++++++++------------ 2 files changed, 13 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5343.misc diff --git a/changelog.d/5343.misc b/changelog.d/5343.misc new file mode 100644 index 0000000000..dbee0f71b9 --- /dev/null +++ b/changelog.d/5343.misc @@ -0,0 +1 @@ +Rename VerifyKeyRequest.deferred field. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 0fd15287e7..8e1d666db1 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -75,7 +75,7 @@ class VerifyKeyRequest(object): minimum_valid_until_ts (int): time at which we require the signing key to be valid. (0 implies we don't care) - deferred(Deferred[str, str, nacl.signing.VerifyKey]): + key_ready (Deferred[str, str, nacl.signing.VerifyKey]): A deferred (server_name, key_id, verify_key) tuple that resolves when a verify key has been fetched. The deferreds' callbacks are run with no logcontext. @@ -88,7 +88,7 @@ class VerifyKeyRequest(object): key_ids = attr.ib() json_object = attr.ib() minimum_valid_until_ts = attr.ib() - deferred = attr.ib(default=attr.Factory(defer.Deferred)) + key_ready = attr.ib(default=attr.Factory(defer.Deferred)) class KeyLookupError(ValueError): @@ -204,7 +204,7 @@ class Keyring(object): def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request - Once each fetch completes, verify_request.deferred will be resolved. + Once each fetch completes, verify_request.key_ready will be resolved. Args: verify_requests (List[VerifyKeyRequest]): @@ -250,7 +250,7 @@ class Keyring(object): return res for verify_request in verify_requests: - verify_request.deferred.addBoth(remove_deferreds, verify_request) + verify_request.key_ready.addBoth(remove_deferreds, verify_request) except Exception: logger.exception("Error starting key lookups") @@ -303,7 +303,7 @@ class Keyring(object): def _get_server_verify_keys(self, verify_requests): """Tries to find at least one key for each verify request - For each verify_request, verify_request.deferred is called back with + For each verify_request, verify_request.key_ready is called back with params (server_name, key_id, VerifyKey) if a key is found, or errbacked with a SynapseError if none of the keys are found. @@ -312,7 +312,7 @@ class Keyring(object): """ remaining_requests = set( - (rq for rq in verify_requests if not rq.deferred.called) + (rq for rq in verify_requests if not rq.key_ready.called) ) @defer.inlineCallbacks @@ -326,7 +326,7 @@ class Keyring(object): # look for any requests which weren't satisfied with PreserveLoggingContext(): for verify_request in remaining_requests: - verify_request.deferred.errback( + verify_request.key_ready.errback( SynapseError( 401, "No key for %s with ids in %s (min_validity %i)" @@ -346,8 +346,8 @@ class Keyring(object): logger.error("Unexpected error in _get_server_verify_keys: %s", err) with PreserveLoggingContext(): for verify_request in remaining_requests: - if not verify_request.deferred.called: - verify_request.deferred.errback(err) + if not verify_request.key_ready.called: + verify_request.key_ready.errback(err) run_in_background(do_iterations).addErrback(on_err) @@ -366,7 +366,7 @@ class Keyring(object): for verify_request in remaining_requests: # any completed requests should already have been removed - assert not verify_request.deferred.called + assert not verify_request.key_ready.called keys_for_server = missing_keys[verify_request.server_name] for key_id in verify_request.key_ids: @@ -402,7 +402,7 @@ class Keyring(object): continue with PreserveLoggingContext(): - verify_request.deferred.callback( + verify_request.key_ready.callback( (server_name, key_id, fetch_key_result.verify_key) ) completed.append(verify_request) @@ -862,7 +862,7 @@ def _handle_key_deferred(verify_request): """ server_name = verify_request.server_name with PreserveLoggingContext(): - _, key_id, verify_key = yield verify_request.deferred + _, key_id, verify_key = yield verify_request.key_ready json_object = verify_request.json_object From 2615c6bd9e5456c5aefc23a9c89a4346b8afc6b0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 10:35:40 +0100 Subject: [PATCH 318/429] Clean up debug logging (#5347) Remove some spurious stuff, clarify some other stuff --- changelog.d/5347.misc | 2 ++ synapse/crypto/event_signing.py | 6 +++++- synapse/crypto/keyring.py | 4 ---- synapse/federation/federation_client.py | 16 +++++++++++++--- 4 files changed, 20 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5347.misc diff --git a/changelog.d/5347.misc b/changelog.d/5347.misc new file mode 100644 index 0000000000..436245fb11 --- /dev/null +++ b/changelog.d/5347.misc @@ -0,0 +1,2 @@ +Various improvements to debug logging. + diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 1dfa727fcf..99a586655b 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -31,7 +31,11 @@ logger = logging.getLogger(__name__) def check_event_content_hash(event, hash_algorithm=hashlib.sha256): """Check whether the hash for this PDU matches the contents""" name, expected_hash = compute_content_hash(event.get_pdu_json(), hash_algorithm) - logger.debug("Expecting hash: %s", encode_base64(expected_hash)) + logger.debug( + "Verifying content hash on %s (expecting: %s)", + event.event_id, + encode_base64(expected_hash), + ) # some malformed events lack a 'hashes'. Protect against it being missing # or a weird type by basically treating it the same as an unhashed event. diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 8e1d666db1..e94e71bdad 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -866,10 +866,6 @@ def _handle_key_deferred(verify_request): json_object = verify_request.json_object - logger.debug( - "Got key %s %s:%s for server %s, verifying" - % (key_id, verify_key.alg, verify_key.version, server_name) - ) try: verify_signed_json(json_object, server_name, verify_key) except SignatureVerifyException as e: diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index d559605382..70573746d6 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -232,7 +232,8 @@ class FederationClient(FederationBase): moving to the next destination. None indicates no timeout. Returns: - Deferred: Results in the requested PDU. + Deferred: Results in the requested PDU, or None if we were unable to find + it. """ # TODO: Rate limit the number of times we try and get the same event. @@ -257,7 +258,12 @@ class FederationClient(FederationBase): destination, event_id, timeout=timeout, ) - logger.debug("transaction_data %r", transaction_data) + logger.debug( + "retrieved event id %s from %s: %r", + event_id, + destination, + transaction_data, + ) pdu_list = [ event_from_pdu_json(p, format_ver, outlier=outlier) @@ -331,7 +337,11 @@ class FederationClient(FederationBase): ) if failed_to_fetch: - logger.warn("Failed to get %r", failed_to_fetch) + logger.warning( + "Failed to fetch missing state/auth events for %s: %s", + room_id, + failed_to_fetch + ) event_map = { ev.event_id: ev for ev in fetched_events From 14f13babb00d64009b11ef822ebe6fafe044eebd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 10:38:25 +0100 Subject: [PATCH 319/429] Add a test room version where we enforce key validity (#5348) --- changelog.d/5348.bugfix | 1 + synapse/api/room_versions.py | 20 +++++++++----- synapse/federation/federation_base.py | 39 ++++++++++++++++----------- 3 files changed, 38 insertions(+), 22 deletions(-) create mode 100644 changelog.d/5348.bugfix diff --git a/changelog.d/5348.bugfix b/changelog.d/5348.bugfix new file mode 100644 index 0000000000..8d396c7990 --- /dev/null +++ b/changelog.d/5348.bugfix @@ -0,0 +1 @@ +Add a new room version where the timestamps on events are checked against the validity periods on signing keys. \ No newline at end of file diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 4085bd10b9..501cdfb6a4 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -50,6 +50,7 @@ class RoomVersion(object): disposition = attr.ib() # str; one of the RoomDispositions event_format = attr.ib() # int; one of the EventFormatVersions state_res = attr.ib() # int; one of the StateResolutionVersions + enforce_key_validity = attr.ib() # bool class RoomVersions(object): @@ -58,30 +59,35 @@ class RoomVersions(object): RoomDisposition.STABLE, EventFormatVersions.V1, StateResolutionVersions.V1, - ) - STATE_V2_TEST = RoomVersion( - "state-v2-test", - RoomDisposition.UNSTABLE, - EventFormatVersions.V1, - StateResolutionVersions.V2, + enforce_key_validity=False, ) V2 = RoomVersion( "2", RoomDisposition.STABLE, EventFormatVersions.V1, StateResolutionVersions.V2, + enforce_key_validity=False, ) V3 = RoomVersion( "3", RoomDisposition.STABLE, EventFormatVersions.V2, StateResolutionVersions.V2, + enforce_key_validity=False, ) V4 = RoomVersion( "4", RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, + enforce_key_validity=False, + ) + VDH_TEST_KEY_VALIDITY = RoomVersion( + "vdh-test-key-validity", + RoomDisposition.UNSTABLE, + EventFormatVersions.V3, + StateResolutionVersions.V2, + enforce_key_validity=False, ) @@ -90,7 +96,7 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V1, RoomVersions.V2, RoomVersions.V3, - RoomVersions.STATE_V2_TEST, RoomVersions.V4, + RoomVersions.VDH_TEST_KEY_VALIDITY, ) } # type: dict[str, RoomVersion] diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 4b38f7c759..b541913d82 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -223,9 +223,6 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): the signatures are valid, or fail (with a SynapseError) if not. """ - # (currently this is written assuming the v1 room structure; we'll probably want a - # separate function for checking v2 rooms) - # we want to check that the event is signed by: # # (a) the sender's server @@ -257,6 +254,10 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): for p in pdus ] + v = KNOWN_ROOM_VERSIONS.get(room_version) + if not v: + raise RuntimeError("Unrecognized room version %s" % (room_version,)) + # First we check that the sender event is signed by the sender's domain # (except if its a 3pid invite, in which case it may be sent by any server) pdus_to_check_sender = [ @@ -264,10 +265,16 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): if not _is_invite_via_3pid(p.pdu) ] - more_deferreds = keyring.verify_json_objects_for_server([ - (p.sender_domain, p.redacted_pdu_json, 0) - for p in pdus_to_check_sender - ]) + more_deferreds = keyring.verify_json_objects_for_server( + [ + ( + p.sender_domain, + p.redacted_pdu_json, + p.pdu.origin_server_ts if v.enforce_key_validity else 0, + ) + for p in pdus_to_check_sender + ] + ) def sender_err(e, pdu_to_check): errmsg = "event id %s: unable to verify signature for sender %s: %s" % ( @@ -287,20 +294,22 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): # event id's domain (normally only the case for joins/leaves), and add additional # checks. Only do this if the room version has a concept of event ID domain # (ie, the room version uses old-style non-hash event IDs). - v = KNOWN_ROOM_VERSIONS.get(room_version) - if not v: - raise RuntimeError("Unrecognized room version %s" % (room_version,)) - if v.event_format == EventFormatVersions.V1: pdus_to_check_event_id = [ p for p in pdus_to_check if p.sender_domain != get_domain_from_id(p.pdu.event_id) ] - more_deferreds = keyring.verify_json_objects_for_server([ - (get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, 0) - for p in pdus_to_check_event_id - ]) + more_deferreds = keyring.verify_json_objects_for_server( + [ + ( + get_domain_from_id(p.pdu.event_id), + p.redacted_pdu_json, + p.pdu.origin_server_ts if v.enforce_key_validity else 0, + ) + for p in pdus_to_check_event_id + ] + ) def event_err(e, pdu_to_check): errmsg = ( From cea9750d112cf74790fb8c16482a0068717954d5 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 4 Jun 2019 16:12:57 +0100 Subject: [PATCH 320/429] Associate a request_name with each verify request, for logging Also: * rename VerifyKeyRequest->VerifyJsonRequest * calculate key_ids on VerifyJsonRequest construction * refactor things to pass around VerifyJsonRequests instead of 4-tuples --- changelog.d/5353.misc | 2 + synapse/crypto/keyring.py | 112 +++++++++++++++---------- synapse/federation/federation_base.py | 2 + synapse/federation/transport/server.py | 4 +- synapse/groups/attestations.py | 4 +- tests/crypto/test_keyring.py | 18 ++-- 6 files changed, 86 insertions(+), 56 deletions(-) create mode 100644 changelog.d/5353.misc diff --git a/changelog.d/5353.misc b/changelog.d/5353.misc new file mode 100644 index 0000000000..436245fb11 --- /dev/null +++ b/changelog.d/5353.misc @@ -0,0 +1,2 @@ +Various improvements to debug logging. + diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index e94e71bdad..2b6b5913bc 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -60,9 +60,9 @@ logger = logging.getLogger(__name__) @attr.s(slots=True, cmp=False) -class VerifyKeyRequest(object): +class VerifyJsonRequest(object): """ - A request for a verify key to verify a JSON object. + A request to verify a JSON object. Attributes: server_name(str): The name of the server to verify against. @@ -85,11 +85,15 @@ class VerifyKeyRequest(object): """ server_name = attr.ib() - key_ids = attr.ib() json_object = attr.ib() minimum_valid_until_ts = attr.ib() + request_name = attr.ib() + key_ids = attr.ib(init=False) key_ready = attr.ib(default=attr.Factory(defer.Deferred)) + def __attrs_post_init__(self): + self.key_ids = signature_ids(self.json_object, self.server_name) + class KeyLookupError(ValueError): pass @@ -114,7 +118,9 @@ class Keyring(object): # These are regular, logcontext-agnostic Deferreds. self.key_downloads = {} - def verify_json_for_server(self, server_name, json_object, validity_time): + def verify_json_for_server( + self, server_name, json_object, validity_time, request_name + ): """Verify that a JSON object has been signed by a given server Args: @@ -125,24 +131,31 @@ class Keyring(object): validity_time (int): timestamp at which we require the signing key to be valid. (0 implies we don't care) + request_name (str): an identifier for this json object (eg, an event id) + for logging. + Returns: Deferred[None]: completes if the the object was correctly signed, otherwise errbacks with an error """ - req = server_name, json_object, validity_time - - return logcontext.make_deferred_yieldable( - self.verify_json_objects_for_server((req,))[0] - ) + req = VerifyJsonRequest(server_name, json_object, validity_time, request_name) + requests = (req,) + return logcontext.make_deferred_yieldable(self._verify_objects(requests)[0]) def verify_json_objects_for_server(self, server_and_json): """Bulk verifies signatures of json objects, bulk fetching keys as necessary. Args: - server_and_json (iterable[Tuple[str, dict, int]): - Iterable of triplets of (server_name, json_object, validity_time) - validity_time is a timestamp at which the signing key must be valid. + server_and_json (iterable[Tuple[str, dict, int, str]): + Iterable of (server_name, json_object, validity_time, request_name) + tuples. + + validity_time is a timestamp at which the signing key must be + valid. + + request_name is an identifier for this json object (eg, an event id) + for logging. Returns: List: for each input triplet, a deferred indicating success @@ -150,38 +163,54 @@ class Keyring(object): server_name. The deferreds run their callbacks in the sentinel logcontext. """ - # a list of VerifyKeyRequests - verify_requests = [] + return self._verify_objects( + VerifyJsonRequest(server_name, json_object, validity_time, request_name) + for server_name, json_object, validity_time, request_name in server_and_json + ) + + def _verify_objects(self, verify_requests): + """Does the work of verify_json_[objects_]for_server + + + Args: + verify_requests (iterable[VerifyJsonRequest]): + Iterable of verification requests. + + Returns: + List: for each input item, a deferred indicating success + or failure to verify each json object's signature for the given + server_name. The deferreds run their callbacks in the sentinel + logcontext. + """ + # a list of VerifyJsonRequests which are awaiting a key lookup + key_lookups = [] handle = preserve_fn(_handle_key_deferred) - def process(server_name, json_object, validity_time): + def process(verify_request): """Process an entry in the request list - Given a (server_name, json_object, validity_time) triplet from the request - list, adds a key request to verify_requests, and returns a deferred which + Adds a key request to key_lookups, and returns a deferred which will complete or fail (in the sentinel context) when verification completes. """ - key_ids = signature_ids(json_object, server_name) - - if not key_ids: + if not verify_request.key_ids: return defer.fail( SynapseError( - 400, "Not signed by %s" % (server_name,), Codes.UNAUTHORIZED + 400, + "Not signed by %s" % (verify_request.server_name,), + Codes.UNAUTHORIZED, ) ) logger.debug( - "Verifying for %s with key_ids %s, min_validity %i", - server_name, - key_ids, - validity_time, + "Verifying %s for %s with key_ids %s, min_validity %i", + verify_request.request_name, + verify_request.server_name, + verify_request.key_ids, + verify_request.minimum_valid_until_ts, ) # add the key request to the queue, but don't start it off yet. - verify_request = VerifyKeyRequest( - server_name, key_ids, json_object, validity_time - ) - verify_requests.append(verify_request) + key_lookups.append(verify_request) # now run _handle_key_deferred, which will wait for the key request # to complete and then do the verification. @@ -190,13 +219,10 @@ class Keyring(object): # wrap it with preserve_fn (aka run_in_background) return handle(verify_request) - results = [ - process(server_name, json_object, validity_time) - for server_name, json_object, validity_time in server_and_json - ] + results = [process(r) for r in verify_requests] - if verify_requests: - run_in_background(self._start_key_lookups, verify_requests) + if key_lookups: + run_in_background(self._start_key_lookups, key_lookups) return results @@ -207,7 +233,7 @@ class Keyring(object): Once each fetch completes, verify_request.key_ready will be resolved. Args: - verify_requests (List[VerifyKeyRequest]): + verify_requests (List[VerifyJsonRequest]): """ try: @@ -308,7 +334,7 @@ class Keyring(object): with a SynapseError if none of the keys are found. Args: - verify_requests (list[VerifyKeyRequest]): list of verify requests + verify_requests (list[VerifyJsonRequest]): list of verify requests """ remaining_requests = set( @@ -357,7 +383,7 @@ class Keyring(object): Args: fetcher (KeyFetcher): fetcher to use to fetch the keys - remaining_requests (set[VerifyKeyRequest]): outstanding key requests. + remaining_requests (set[VerifyJsonRequest]): outstanding key requests. Any successfully-completed requests will be removed from the list. """ # dict[str, dict[str, int]]: keys to fetch. @@ -376,7 +402,7 @@ class Keyring(object): # the requests. keys_for_server[key_id] = max( keys_for_server.get(key_id, -1), - verify_request.minimum_valid_until_ts + verify_request.minimum_valid_until_ts, ) results = yield fetcher.get_keys(missing_keys) @@ -386,7 +412,7 @@ class Keyring(object): server_name = verify_request.server_name # see if any of the keys we got this time are sufficient to - # complete this VerifyKeyRequest. + # complete this VerifyJsonRequest. result_keys = results.get(server_name, {}) for key_id in verify_request.key_ids: fetch_key_result = result_keys.get(key_id) @@ -454,9 +480,7 @@ class BaseV2KeyFetcher(object): self.config = hs.get_config() @defer.inlineCallbacks - def process_v2_response( - self, from_server, response_json, time_added_ms - ): + def process_v2_response(self, from_server, response_json, time_added_ms): """Parse a 'Server Keys' structure from the result of a /key request This is used to parse either the entirety of the response from @@ -852,7 +876,7 @@ def _handle_key_deferred(verify_request): """Waits for the key to become available, and then performs a verification Args: - verify_request (VerifyKeyRequest): + verify_request (VerifyJsonRequest): Returns: Deferred[None] diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index b541913d82..fc5cfb7d83 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -271,6 +271,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): p.sender_domain, p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, + p.pdu.event_id, ) for p in pdus_to_check_sender ] @@ -306,6 +307,7 @@ def _check_sigs_on_pdus(keyring, room_version, pdus): get_domain_from_id(p.pdu.event_id), p.redacted_pdu_json, p.pdu.origin_server_ts if v.enforce_key_validity else 0, + p.pdu.event_id, ) for p in pdus_to_check_event_id ] diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 0db8858cf1..949a5fb2aa 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -140,7 +140,9 @@ class Authenticator(object): 401, "Missing Authorization headers", Codes.UNAUTHORIZED, ) - yield self.keyring.verify_json_for_server(origin, json_request, now) + yield self.keyring.verify_json_for_server( + origin, json_request, now, "Incoming request" + ) logger.info("Request from %s", origin) request.authenticated_entity = origin diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index fa6b641ee1..e5dda1975f 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -101,7 +101,9 @@ class GroupAttestationSigning(object): if valid_until_ms < now: raise SynapseError(400, "Attestation expired") - yield self.keyring.verify_json_for_server(server_name, attestation, now) + yield self.keyring.verify_json_for_server( + server_name, attestation, now, "Group attestation" + ) def create_attestation(self, group_id, user_id): """Create an attestation for the group_id and user_id with default diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 4cff7e36c8..18121f4f6c 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -134,7 +134,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): context_11.request = "11" res_deferreds = kr.verify_json_objects_for_server( - [("server10", json1, 0), ("server11", {}, 0)] + [("server10", json1, 0, "test10"), ("server11", {}, 0, "test11")] ) # the unsigned json should be rejected pretty quickly @@ -171,7 +171,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( - [("server10", json1, 0)] + [("server10", json1, 0, "test")] ) res_deferreds_2[0].addBoth(self.check_context, None) yield logcontext.make_deferred_yieldable(res_deferreds_2[0]) @@ -205,11 +205,11 @@ class KeyringTestCase(unittest.HomeserverTestCase): signedjson.sign.sign_json(json1, "server9", key1) # should fail immediately on an unsigned object - d = _verify_json_for_server(kr, "server9", {}, 0) + d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") self.failureResultOf(d, SynapseError) # should suceed on a signed object - d = _verify_json_for_server(kr, "server9", json1, 500) + d = _verify_json_for_server(kr, "server9", json1, 500, "test signed") # self.assertFalse(d.called) self.get_success(d) @@ -239,7 +239,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): # the first request should succeed; the second should fail because the key # has expired results = kr.verify_json_objects_for_server( - [("server1", json1, 500), ("server1", json1, 1500)] + [("server1", json1, 500, "test1"), ("server1", json1, 1500, "test2")] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -284,7 +284,7 @@ class KeyringTestCase(unittest.HomeserverTestCase): signedjson.sign.sign_json(json1, "server1", key1) results = kr.verify_json_objects_for_server( - [("server1", json1, 1200), ("server1", json1, 1500)] + [("server1", json1, 1200, "test1"), ("server1", json1, 1500, "test2")] ) self.assertEqual(len(results), 2) self.get_success(results[0]) @@ -522,16 +522,14 @@ def run_in_context(f, *args, **kwargs): defer.returnValue(rv) -def _verify_json_for_server(keyring, server_name, json_object, validity_time): +def _verify_json_for_server(kr, *args): """thin wrapper around verify_json_for_server which makes sure it is wrapped with the patched defer.inlineCallbacks. """ @defer.inlineCallbacks def v(): - rv1 = yield keyring.verify_json_for_server( - server_name, json_object, validity_time - ) + rv1 = yield kr.verify_json_for_server(*args) defer.returnValue(rv1) return run_in_context(v) From d18e4ea0d46a9390a75b270fe5f17dc3bc23f29a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 10:58:51 +0100 Subject: [PATCH 321/429] Implement room v5 which enforces signing key validity Implements [MSC2077](https://github.com/matrix-org/matrix-doc/pull/2077) and fixes #5247 and #4364. --- changelog.d/5354.bugfix | 2 ++ synapse/api/room_versions.py | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5354.bugfix diff --git a/changelog.d/5354.bugfix b/changelog.d/5354.bugfix new file mode 100644 index 0000000000..0c56032b30 --- /dev/null +++ b/changelog.d/5354.bugfix @@ -0,0 +1,2 @@ +Add a new room version where the timestamps on events are checked against the validity periods on signing keys. + diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index 501cdfb6a4..d644803d38 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -82,12 +82,12 @@ class RoomVersions(object): StateResolutionVersions.V2, enforce_key_validity=False, ) - VDH_TEST_KEY_VALIDITY = RoomVersion( - "vdh-test-key-validity", - RoomDisposition.UNSTABLE, + V5 = RoomVersion( + "5", + RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, - enforce_key_validity=False, + enforce_key_validity=True, ) @@ -97,6 +97,6 @@ KNOWN_ROOM_VERSIONS = { RoomVersions.V2, RoomVersions.V3, RoomVersions.V4, - RoomVersions.VDH_TEST_KEY_VALIDITY, + RoomVersions.V5, ) } # type: dict[str, RoomVersion] From bc3d6b918b62c3dd6ce96eba638cf4601126e2f9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Jun 2019 11:31:27 +0100 Subject: [PATCH 322/429] Add logging when request fails and clarify we ignore errors. --- synapse/handlers/federation.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index fa735efedd..ac5ca79143 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2034,8 +2034,14 @@ class FederationHandler(BaseHandler): ) except Exception: # We don't really mind if the above fails, so lets not fail - # processing if it does. - logger.exception("Failed to call _update_auth_events_and_context_for_auth") + # processing if it does. However, it really shouldn't fail so + # let's still log as an exception since we'll still want to fix + # any bugs. + logger.exception( + "Failed to double check auth events for %s with remote. " + "Ignoring failure and continuing processing of event.", + event.event_id, + ) try: self.auth.check(room_version, event, auth_events=auth_events) @@ -2108,9 +2114,10 @@ class FederationHandler(BaseHandler): remote_auth_chain = yield self.federation_client.get_event_auth( origin, event.room_id, event.event_id ) - except RequestSendFailed: + except RequestSendFailed as e: # The other side isn't around or doesn't implement the # endpoint, so lets just bail out. + logger.info("Failed to get event auth from remote: %s", e) return seen_remotes = yield self.store.have_seen_events( @@ -2264,9 +2271,10 @@ class FederationHandler(BaseHandler): event.event_id, local_auth_chain, ) - except RequestSendFailed: + except RequestSendFailed as e: # The other side isn't around or doesn't implement the # endpoint, so lets just bail out. + logger.info("Failed to query auth from remote: %s", e) return seen_remotes = yield self.store.have_seen_events( From dbbaf25dd3ce1c2b784cc3347142073cc586988d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 11:50:27 +0100 Subject: [PATCH 323/429] Do user_id != me checks before deciding whether we should pick heroes from the joined members or the parted ones --- synapse/handlers/sync.py | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 1ee9a6e313..bbf74027ac 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -598,15 +598,28 @@ class SyncHandler(object): if canonical_alias and canonical_alias.content: defer.returnValue(summary) + me = sync_config.user.to_string() + joined_user_ids = [ - r[0] for r in details.get(Membership.JOIN, empty_ms).members + r[0] + for r in details.get(Membership.JOIN, empty_ms).members + if r[0] != me ] invited_user_ids = [ - r[0] for r in details.get(Membership.INVITE, empty_ms).members + r[0] + for r in details.get(Membership.INVITE, empty_ms).members + if r[0] != me ] gone_user_ids = ( - [r[0] for r in details.get(Membership.LEAVE, empty_ms).members] + - [r[0] for r in details.get(Membership.BAN, empty_ms).members] + [ + r[0] + for r in details.get(Membership.LEAVE, empty_ms).members + if r[0] != me + ] + [ + r[0] + for r in details.get(Membership.BAN, empty_ms).members + if r[0] != me + ] ) # FIXME: only build up a member_ids list for our heroes @@ -621,22 +634,13 @@ class SyncHandler(object): member_ids[user_id] = event_id # FIXME: order by stream ordering rather than as returned by SQL - me = sync_config.user.to_string() if (joined_user_ids or invited_user_ids): summary['m.heroes'] = sorted( - [ - user_id - for user_id in (joined_user_ids + invited_user_ids) - if user_id != me - ] + [user_id for user_id in (joined_user_ids + invited_user_ids)] )[0:5] else: summary['m.heroes'] = sorted( - [ - user_id - for user_id in gone_user_ids - if user_id != me - ] + [user_id for user_id in gone_user_ids] )[0:5] if not sync_config.filter_collection.lazy_load_members(): From a412be2bc796d90c28941960aa29012dcc171ba1 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 11:53:50 +0100 Subject: [PATCH 324/429] Changelog --- changelog.d/5355.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5355.bugfix diff --git a/changelog.d/5355.bugfix b/changelog.d/5355.bugfix new file mode 100644 index 0000000000..5de469e867 --- /dev/null +++ b/changelog.d/5355.bugfix @@ -0,0 +1 @@ +Include left members in room summaries' heroes From 804f26a9ffb4a5f90acdf3d8e7e869fcecf33942 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 12:03:01 +0100 Subject: [PATCH 325/429] Properly format the changelog --- changelog.d/5355.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5355.bugfix b/changelog.d/5355.bugfix index 5de469e867..e1955a7403 100644 --- a/changelog.d/5355.bugfix +++ b/changelog.d/5355.bugfix @@ -1 +1 @@ -Include left members in room summaries' heroes +Include left members in room summaries' heroes. From 26713515de97c98dda99a9b06325781fe09b1cbe Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Jun 2019 13:16:23 +0100 Subject: [PATCH 326/429] Neilj/mau tracking config explainer (#5284) Improve documentation of monthly active user blocking and mau_trial_days --- changelog.d/5284.misc | 1 + docs/sample_config.yaml | 16 ++++++++++++++++ synapse/config/server.py | 16 ++++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 changelog.d/5284.misc diff --git a/changelog.d/5284.misc b/changelog.d/5284.misc new file mode 100644 index 0000000000..c4d42ca3d9 --- /dev/null +++ b/changelog.d/5284.misc @@ -0,0 +1 @@ +Improve sample config for monthly active user blocking. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 493ea9ee9e..0960b9b5ed 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -261,6 +261,22 @@ listeners: # Monthly Active User Blocking # +# Used in cases where the admin or server owner wants to limit to the +# number of monthly active users. +# +# 'limit_usage_by_mau' disables/enables monthly active user blocking. When +# anabled and a limit is reached the server returns a 'ResourceLimitError' +# with error type Codes.RESOURCE_LIMIT_EXCEEDED +# +# 'max_mau_value' is the hard limit of monthly active users above which +# the server will start blocking user actions. +# +# 'mau_trial_days' is a means to add a grace period for active users. It +# means that users must be active for this number of days before they +# can be considered active and guards against the case where lots of users +# sign up in a short space of time never to return after their initial +# session. +# #limit_usage_by_mau: False #max_mau_value: 50 #mau_trial_days: 2 diff --git a/synapse/config/server.py b/synapse/config/server.py index e763e19e15..334921d421 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -585,6 +585,22 @@ class ServerConfig(Config): # Monthly Active User Blocking # + # Used in cases where the admin or server owner wants to limit to the + # number of monthly active users. + # + # 'limit_usage_by_mau' disables/enables monthly active user blocking. When + # anabled and a limit is reached the server returns a 'ResourceLimitError' + # with error type Codes.RESOURCE_LIMIT_EXCEEDED + # + # 'max_mau_value' is the hard limit of monthly active users above which + # the server will start blocking user actions. + # + # 'mau_trial_days' is a means to add a grace period for active users. It + # means that users must be active for this number of days before they + # can be considered active and guards against the case where lots of users + # sign up in a short space of time never to return after their initial + # session. + # #limit_usage_by_mau: False #max_mau_value: 50 #mau_trial_days: 2 From 4650526b5ebf699920ebf9ecfdf13797c189a922 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Jun 2019 13:47:03 +0100 Subject: [PATCH 327/429] Neilj/changelog clean up (#5356) * group together key validity refactors --- changelog.d/5232.misc | 2 +- changelog.d/5234.misc | 2 +- changelog.d/5235.misc | 2 +- changelog.d/5236.misc | 2 +- changelog.d/5237.misc | 2 +- changelog.d/5244.misc | 2 +- changelog.d/5250.misc | 2 +- changelog.d/5296.misc | 2 +- changelog.d/5299.misc | 2 +- changelog.d/5343.misc | 2 +- changelog.d/5347.misc | 3 +-- changelog.d/5356.misc | 1 + 12 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5356.misc diff --git a/changelog.d/5232.misc b/changelog.d/5232.misc index 1cdc71f095..8336bc55dc 100644 --- a/changelog.d/5232.misc +++ b/changelog.d/5232.misc @@ -1 +1 @@ -Run black on synapse.crypto.keyring. +Preparatory work for key-validity features. diff --git a/changelog.d/5234.misc b/changelog.d/5234.misc index 43fbd6f67c..8336bc55dc 100644 --- a/changelog.d/5234.misc +++ b/changelog.d/5234.misc @@ -1 +1 @@ -Rewrite store_server_verify_key to store several keys at once. +Preparatory work for key-validity features. diff --git a/changelog.d/5235.misc b/changelog.d/5235.misc index 2296ad2a4f..8336bc55dc 100644 --- a/changelog.d/5235.misc +++ b/changelog.d/5235.misc @@ -1 +1 @@ -Remove unused VerifyKey.expired and .time_added fields. +Preparatory work for key-validity features. diff --git a/changelog.d/5236.misc b/changelog.d/5236.misc index cb4417a9f4..8336bc55dc 100644 --- a/changelog.d/5236.misc +++ b/changelog.d/5236.misc @@ -1 +1 @@ -Simplify Keyring.process_v2_response. \ No newline at end of file +Preparatory work for key-validity features. diff --git a/changelog.d/5237.misc b/changelog.d/5237.misc index f4fe3b821b..8336bc55dc 100644 --- a/changelog.d/5237.misc +++ b/changelog.d/5237.misc @@ -1 +1 @@ -Store key validity time in the storage layer. +Preparatory work for key-validity features. diff --git a/changelog.d/5244.misc b/changelog.d/5244.misc index 9cc1fb869d..8336bc55dc 100644 --- a/changelog.d/5244.misc +++ b/changelog.d/5244.misc @@ -1 +1 @@ -Refactor synapse.crypto.keyring to use a KeyFetcher interface. +Preparatory work for key-validity features. diff --git a/changelog.d/5250.misc b/changelog.d/5250.misc index 575a299a82..8336bc55dc 100644 --- a/changelog.d/5250.misc +++ b/changelog.d/5250.misc @@ -1 +1 @@ -Simplification to Keyring.wait_for_previous_lookups. +Preparatory work for key-validity features. diff --git a/changelog.d/5296.misc b/changelog.d/5296.misc index a038a6f7f6..8336bc55dc 100644 --- a/changelog.d/5296.misc +++ b/changelog.d/5296.misc @@ -1 +1 @@ -Refactor keyring.VerifyKeyRequest to use attr.s. +Preparatory work for key-validity features. diff --git a/changelog.d/5299.misc b/changelog.d/5299.misc index 53297c768b..8336bc55dc 100644 --- a/changelog.d/5299.misc +++ b/changelog.d/5299.misc @@ -1 +1 @@ -Rewrite get_server_verify_keys, again. +Preparatory work for key-validity features. diff --git a/changelog.d/5343.misc b/changelog.d/5343.misc index dbee0f71b9..8336bc55dc 100644 --- a/changelog.d/5343.misc +++ b/changelog.d/5343.misc @@ -1 +1 @@ -Rename VerifyKeyRequest.deferred field. +Preparatory work for key-validity features. diff --git a/changelog.d/5347.misc b/changelog.d/5347.misc index 436245fb11..8336bc55dc 100644 --- a/changelog.d/5347.misc +++ b/changelog.d/5347.misc @@ -1,2 +1 @@ -Various improvements to debug logging. - +Preparatory work for key-validity features. diff --git a/changelog.d/5356.misc b/changelog.d/5356.misc new file mode 100644 index 0000000000..8336bc55dc --- /dev/null +++ b/changelog.d/5356.misc @@ -0,0 +1 @@ +Preparatory work for key-validity features. From a4cf2c1184f137bc52c2b12ef32876a1cb10801f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 14:00:18 +0100 Subject: [PATCH 328/429] Rewrite changelog --- changelog.d/5325.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5325.bugfix b/changelog.d/5325.bugfix index 6914398bcc..b9413388f5 100644 --- a/changelog.d/5325.bugfix +++ b/changelog.d/5325.bugfix @@ -1 +1 @@ -Add account_validity's email_sent column to the list of boolean columns in synapse_port_db. +Fix a bug where running synapse_port_db would cause the account validity feature to fail because it didn't set the type of the email_sent column to boolean. From 0a2f5226441936bab45ed4bc69836a008f69249a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 14:02:29 +0100 Subject: [PATCH 329/429] Simplify condition --- synapse/handlers/sync.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 72997d6d04..78318aacd8 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -587,15 +587,14 @@ class SyncHandler(object): # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content and name.content.get("name", ""): + if name and name.content.get("name", ""): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if (canonical_alias and canonical_alias.content - and canonical_alias.content.get("alias", "")): + if canonical_alias and canonical_alias.content.get("alias", ""): defer.returnValue(summary) joined_user_ids = [ From e2dfb922e1334e4a506a9d678d0f1bf573cc95e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 14:16:07 +0100 Subject: [PATCH 330/429] Validate federation server TLS certificates by default. --- changelog.d/5359.feature | 1 + synapse/config/tls.py | 10 +++++----- .../http/federation/test_matrix_federation_agent.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5359.feature diff --git a/changelog.d/5359.feature b/changelog.d/5359.feature new file mode 100644 index 0000000000..2a03939834 --- /dev/null +++ b/changelog.d/5359.feature @@ -0,0 +1 @@ +Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 72dd5926f9..43712b8213 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -74,7 +74,7 @@ class TlsConfig(Config): # Whether to verify certificates on outbound federation traffic self.federation_verify_certificates = config.get( - "federation_verify_certificates", False, + "federation_verify_certificates", True, ) # Whitelist of domains to not verify certificates for @@ -241,12 +241,12 @@ class TlsConfig(Config): # #tls_private_key_path: "%(tls_private_key_path)s" - # Whether to verify TLS certificates when sending federation traffic. + # Whether to verify TLS server certificates for outbound federation requests. # - # This currently defaults to `false`, however this will change in - # Synapse 1.0 when valid federation certificates will be required. + # Defaults to `true`. To disable certificate verification, uncomment the + # following line. # - #federation_verify_certificates: true + #federation_verify_certificates: false # Skip federation certificate verification on the following whitelist # of domains. diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index ed0ca079d9..4153da4da7 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -27,6 +27,7 @@ from twisted.web.http import HTTPChannel from twisted.web.http_headers import Headers from twisted.web.iweb import IPolicyForHTTPS +from synapse.config.homeserver import HomeServerConfig from synapse.crypto.context_factory import ClientTLSOptionsFactory from synapse.http.federation.matrix_federation_agent import ( MatrixFederationAgent, @@ -52,11 +53,16 @@ class MatrixFederationAgentTests(TestCase): self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) + # for now, we disable cert verification for the test, since the cert we + # present will not be trusted. We should do better here, though. + config_dict = default_config("test", parse=False) + config_dict["federation_verify_certificates"] = False + config = HomeServerConfig() + config.parse_config_dict(config_dict) + self.agent = MatrixFederationAgent( reactor=self.reactor, - tls_client_options_factory=ClientTLSOptionsFactory( - default_config("test", parse=True) - ), + tls_client_options_factory=ClientTLSOptionsFactory(config), _well_known_tls_policy=TrustingTLSPolicyForHTTPS(), _srv_resolver=self.mock_resolver, _well_known_cache=self.well_known_cache, From 95ab2eb4a1e9757bfe881abacce6ff81b3dbc371 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 5 Jun 2019 15:12:33 +0100 Subject: [PATCH 331/429] Fix notes about well-known and acme (#5357) fixes #4951 --- changelog.d/5357.doc | 1 + docs/MSC1711_certificates_FAQ.md | 11 +++++------ 2 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog.d/5357.doc diff --git a/changelog.d/5357.doc b/changelog.d/5357.doc new file mode 100644 index 0000000000..27cba49641 --- /dev/null +++ b/changelog.d/5357.doc @@ -0,0 +1 @@ +Fix notes about ACME in the MSC1711 faq. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index ebfb20f5c8..37f7f669c9 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -145,12 +145,11 @@ You can do this with a `.well-known` file as follows: 1. Keep the SRV record in place - it is needed for backwards compatibility with Synapse 0.34 and earlier. - 2. Give synapse a certificate corresponding to the target domain - (`customer.example.net` in the above example). Currently Synapse's ACME - support [does not support - this](https://github.com/matrix-org/synapse/issues/4552), so you will have - to acquire a certificate yourself and give it to Synapse via - `tls_certificate_path` and `tls_private_key_path`. + 2. Give Synapse a certificate corresponding to the target domain + (`customer.example.net` in the above example). You can either use Synapse's + built-in [ACME support](./ACME.md) for this (via the `domain` parameter in + the `acme` section), or acquire a certificate yourself and give it to + Synapse via `tls_certificate_path` and `tls_private_key_path`. 3. Restart Synapse to ensure the new certificate is loaded. From b4f1cd31f49dff94b4006a6eeb3b2c27eb003dd4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 15:30:10 +0100 Subject: [PATCH 332/429] Update sample config --- docs/sample_config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 493ea9ee9e..d10a355d68 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -313,12 +313,12 @@ listeners: # #tls_private_key_path: "CONFDIR/SERVERNAME.tls.key" -# Whether to verify TLS certificates when sending federation traffic. +# Whether to verify TLS server certificates for outbound federation requests. # -# This currently defaults to `false`, however this will change in -# Synapse 1.0 when valid federation certificates will be required. +# Defaults to `true`. To disable certificate verification, uncomment the +# following line. # -#federation_verify_certificates: true +#federation_verify_certificates: false # Skip federation certificate verification on the following whitelist # of domains. From fb98c05e0373a517f74568f83634c5b579812571 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 15:32:06 +0100 Subject: [PATCH 333/429] add a script to generate new signing_key files --- changelog.d/5361.feature | 1 + scripts/generate_signing_key.py | 37 +++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 changelog.d/5361.feature create mode 100755 scripts/generate_signing_key.py diff --git a/changelog.d/5361.feature b/changelog.d/5361.feature new file mode 100644 index 0000000000..10768cdad3 --- /dev/null +++ b/changelog.d/5361.feature @@ -0,0 +1 @@ +Add a script to generate new signing-key files. diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py new file mode 100755 index 0000000000..ba3ba97395 --- /dev/null +++ b/scripts/generate_signing_key.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import sys + +from signedjson.key import write_signing_keys, generate_signing_key + +from synapse.util.stringutils import random_string + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument( + "-o", "--output_file", + + type=argparse.FileType('w'), + default=sys.stdout, + help="Where to write the output to", + ) + args = parser.parse_args() + + key_id = "a_" + random_string(4) + key = generate_signing_key(key_id), + write_signing_keys(args.output_file, key) From 75538813fcd0403ec8915484a813b99e6eb256c6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 5 Jun 2019 15:45:46 +0100 Subject: [PATCH 334/429] Fix background updates to handle redactions/rejections (#5352) * Fix background updates to handle redactions/rejections In background updates based on current state delta stream we need to handle that we may not have all the events (or at least that `get_events` may raise an exception). --- changelog.d/5352.bugfix | 1 + synapse/handlers/presence.py | 11 +++--- synapse/handlers/stats.py | 18 +++++++--- synapse/storage/events_worker.py | 37 +++++++++++++++++++ tests/handlers/test_stats.py | 62 ++++++++++++++++++++++++++++++-- 5 files changed, 117 insertions(+), 12 deletions(-) create mode 100644 changelog.d/5352.bugfix diff --git a/changelog.d/5352.bugfix b/changelog.d/5352.bugfix new file mode 100644 index 0000000000..2ffefe5a68 --- /dev/null +++ b/changelog.d/5352.bugfix @@ -0,0 +1 @@ +Fix room stats and presence background updates to correctly handle missing events. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 6209858bbb..e49c8203ef 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -828,14 +828,17 @@ class PresenceHandler(object): # joins. continue - event = yield self.store.get_event(event_id) - if event.content.get("membership") != Membership.JOIN: + event = yield self.store.get_event(event_id, allow_none=True) + if not event or event.content.get("membership") != Membership.JOIN: # We only care about joins continue if prev_event_id: - prev_event = yield self.store.get_event(prev_event_id) - if prev_event.content.get("membership") == Membership.JOIN: + prev_event = yield self.store.get_event(prev_event_id, allow_none=True) + if ( + prev_event + and prev_event.content.get("membership") == Membership.JOIN + ): # Ignore changes to join events. continue diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 0e92b405ba..7ad16c8566 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -115,6 +115,7 @@ class StatsHandler(StateDeltasHandler): event_id = delta["event_id"] stream_id = delta["stream_id"] prev_event_id = delta["prev_event_id"] + stream_pos = delta["stream_id"] logger.debug("Handling: %r %r, %s", typ, state_key, event_id) @@ -136,10 +137,15 @@ class StatsHandler(StateDeltasHandler): event_content = {} if event_id is not None: - event_content = (yield self.store.get_event(event_id)).content or {} + event = yield self.store.get_event(event_id, allow_none=True) + if event: + event_content = event.content or {} + + # We use stream_pos here rather than fetch by event_id as event_id + # may be None + now = yield self.store.get_received_ts_by_stream_pos(stream_pos) # quantise time to the nearest bucket - now = yield self.store.get_received_ts(event_id) now = (now // 1000 // self.stats_bucket_size) * self.stats_bucket_size if typ == EventTypes.Member: @@ -149,9 +155,11 @@ class StatsHandler(StateDeltasHandler): # compare them. prev_event_content = {} if prev_event_id is not None: - prev_event_content = ( - yield self.store.get_event(prev_event_id) - ).content + prev_event = yield self.store.get_event( + prev_event_id, allow_none=True, + ) + if prev_event: + prev_event_content = prev_event.content membership = event_content.get("membership", Membership.LEAVE) prev_membership = prev_event_content.get("membership", Membership.LEAVE) diff --git a/synapse/storage/events_worker.py b/synapse/storage/events_worker.py index 1782428048..cc7df5cf14 100644 --- a/synapse/storage/events_worker.py +++ b/synapse/storage/events_worker.py @@ -78,6 +78,43 @@ class EventsWorkerStore(SQLBaseStore): desc="get_received_ts", ) + def get_received_ts_by_stream_pos(self, stream_ordering): + """Given a stream ordering get an approximate timestamp of when it + happened. + + This is done by simply taking the received ts of the first event that + has a stream ordering greater than or equal to the given stream pos. + If none exists returns the current time, on the assumption that it must + have happened recently. + + Args: + stream_ordering (int) + + Returns: + Deferred[int] + """ + + def _get_approximate_received_ts_txn(txn): + sql = """ + SELECT received_ts FROM events + WHERE stream_ordering >= ? + LIMIT 1 + """ + + txn.execute(sql, (stream_ordering,)) + row = txn.fetchone() + if row and row[0]: + ts = row[0] + else: + ts = self.clock.time_msec() + + return ts + + return self.runInteraction( + "get_approximate_received_ts", + _get_approximate_received_ts_txn, + ) + @defer.inlineCallbacks def get_event( self, diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index 249aba3d59..2710c991cf 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -204,7 +204,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): "a2": {"membership": "not a real thing"}, } - def get_event(event_id): + def get_event(event_id, allow_none=True): m = Mock() m.content = events[event_id] d = defer.Deferred() @@ -224,7 +224,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): "room_id": "room", "event_id": "a1", "prev_event_id": "a2", - "stream_id": "bleb", + "stream_id": 60, } ] @@ -241,7 +241,7 @@ class StatsRoomTests(unittest.HomeserverTestCase): "room_id": "room", "event_id": "a2", "prev_event_id": "a1", - "stream_id": "bleb", + "stream_id": 100, } ] @@ -249,3 +249,59 @@ class StatsRoomTests(unittest.HomeserverTestCase): self.assertEqual( f.value.args[0], "'not a real thing' is not a valid membership" ) + + def test_redacted_prev_event(self): + """ + If the prev_event does not exist, then it is assumed to be a LEAVE. + """ + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + + # Do the initial population of the user directory via the background update + self._add_background_updates() + + while not self.get_success(self.store.has_completed_background_updates()): + self.get_success(self.store.do_next_background_update(100), by=0.1) + + events = { + "a1": None, + "a2": {"membership": Membership.JOIN}, + } + + def get_event(event_id, allow_none=True): + if events.get(event_id): + m = Mock() + m.content = events[event_id] + else: + m = None + d = defer.Deferred() + self.reactor.callLater(0.0, d.callback, m) + return d + + def get_received_ts(event_id): + return defer.succeed(1) + + self.store.get_received_ts = get_received_ts + self.store.get_event = get_event + + deltas = [ + { + "type": EventTypes.Member, + "state_key": "some_user:test", + "room_id": room_1, + "event_id": "a2", + "prev_event_id": "a1", + "stream_id": 100, + } + ] + + # Handle our fake deltas, which has a user going from LEAVE -> JOIN. + self.get_success(self.handler._handle_deltas(deltas)) + + # One delta, with two joined members -- the room creator, and our fake + # user. + r = self.get_success(self.store.get_deltas_for_room(room_1, 0)) + self.assertEqual(len(r), 1) + self.assertEqual(r[0]["joined_members"], 2) From 94f6c674df8035d44e7219193377f77afdfa6669 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Wed, 5 Jun 2019 16:11:31 +0100 Subject: [PATCH 335/429] Neilj/add r0.5 to versions (#5360) * Update _matrix/client/versions to reference support for r0.5.0 --- changelog.d/5360.feature | 1 + synapse/rest/client/versions.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/5360.feature diff --git a/changelog.d/5360.feature b/changelog.d/5360.feature new file mode 100644 index 0000000000..01fbb3b06d --- /dev/null +++ b/changelog.d/5360.feature @@ -0,0 +1 @@ +Update /_matrix/client/versions to reference support for r0.5.0. diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 27e7cbf3cc..babbf6a23c 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -39,6 +39,7 @@ class VersionsRestServlet(RestServlet): "r0.2.0", "r0.3.0", "r0.4.0", + "r0.5.0", ], # as per MSC1497: "unstable_features": { From f8a45302c9ce147d7797ceb9e3757bd3b2af6b99 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 5 Jun 2019 16:16:33 +0100 Subject: [PATCH 336/429] Fix `federation_custom_ca_list` configuration option. Previously, setting this option would cause an exception at startup. --- changelog.d/5362.bugfix | 1 + synapse/config/tls.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5362.bugfix diff --git a/changelog.d/5362.bugfix b/changelog.d/5362.bugfix new file mode 100644 index 0000000000..1c8b19182c --- /dev/null +++ b/changelog.d/5362.bugfix @@ -0,0 +1 @@ +Fix `federation_custom_ca_list` configuration option. diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 72dd5926f9..94a53d05f9 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -107,7 +107,7 @@ class TlsConfig(Config): certs = [] for ca_file in custom_ca_list: logger.debug("Reading custom CA certificate file: %s", ca_file) - content = self.read_file(ca_file) + content = self.read_file(ca_file, "federation_custom_ca_list") # Parse the CA certificates try: From fe13bd52acb67de56fb5e1866d0ec64fff10ed94 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 16:35:05 +0100 Subject: [PATCH 337/429] Don't check whether the user's account is expired on /send_mail requests --- synapse/api/auth.py | 10 ++++-- .../rest/client/v2_alpha/account_validity.py | 2 +- tests/rest/client/v2_alpha/test_register.py | 35 +++++++++++++++++++ 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 0c6c93a87b..e24d942553 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -184,7 +184,13 @@ class Auth(object): return event_auth.get_public_keys(invite_event) @defer.inlineCallbacks - def get_user_by_req(self, request, allow_guest=False, rights="access"): + def get_user_by_req( + self, + request, + allow_guest=False, + rights="access", + allow_expired=False, + ): """ Get a registered user's ID. Args: @@ -229,7 +235,7 @@ class Auth(object): is_guest = user_info["is_guest"] # Deny the request if the user account has expired. - if self._account_validity.enabled: + if self._account_validity.enabled and not allow_expired: user_id = user.to_string() expiration_ts = yield self.store.get_expiration_ts_for_user(user_id) if expiration_ts is not None and self.clock.time_msec() >= expiration_ts: diff --git a/synapse/rest/client/v2_alpha/account_validity.py b/synapse/rest/client/v2_alpha/account_validity.py index fc8dbeb617..9bc1e208ca 100644 --- a/synapse/rest/client/v2_alpha/account_validity.py +++ b/synapse/rest/client/v2_alpha/account_validity.py @@ -79,7 +79,7 @@ class AccountValiditySendMailServlet(RestServlet): if not self.account_validity.renew_by_email_enabled: raise AuthError(403, "Account renewal via email is disabled on this server.") - requester = yield self.auth.get_user_by_req(request) + requester = yield self.auth.get_user_by_req(request, allow_expired=True) user_id = requester.user.to_string() yield self.account_activity_handler.send_renewal_email_to_user(user_id) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index d4a1d4d50c..77a2923af6 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -427,6 +427,41 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): self.assertEqual(len(self.email_attempts), 1) + def test_manual_email_send_expired_account(self): + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + + # We need to manually add an email address otherwise the handler will do + # nothing. + now = self.hs.clock.time_msec() + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address="kermit@example.com", + validated_at=now, + added_at=now, + ) + ) + + # Make the account expire. + self.reactor.advance(datetime.timedelta(days=8).total_seconds()) + + # Ignore all emails sent by the automatic background task and only focus on the + # ones sent manually. + self.email_attempts = [] + + # Test that we're still able to manually trigger a mail to be sent. + request, channel = self.make_request( + b"POST", + "/_matrix/client/unstable/account_validity/send_mail", + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.assertEqual(len(self.email_attempts), 1) + class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): From d51ca9d9b3856d60af67ceac05df98347838a221 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 16:38:51 +0100 Subject: [PATCH 338/429] Changelog --- changelog.d/5363.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5363.feature diff --git a/changelog.d/5363.feature b/changelog.d/5363.feature new file mode 100644 index 0000000000..179a789fd7 --- /dev/null +++ b/changelog.d/5363.feature @@ -0,0 +1 @@ +Allow expired user to trigger renewal email sending manually From ccbc9e5e17b59661d5f1b67050927c2fb69a0a89 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 5 Jun 2019 16:41:26 +0100 Subject: [PATCH 339/429] Gah towncrier --- changelog.d/5363.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5363.feature b/changelog.d/5363.feature index 179a789fd7..803fe3fc37 100644 --- a/changelog.d/5363.feature +++ b/changelog.d/5363.feature @@ -1 +1 @@ -Allow expired user to trigger renewal email sending manually +Allow expired user to trigger renewal email sending manually. From 6362e3af14cff28bc51d0e66d207b84bae7fd422 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 6 Jun 2019 04:20:35 +1000 Subject: [PATCH 340/429] add more comments --- synapse/storage/schema/full_schemas/README.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/storage/schema/full_schemas/README.txt b/synapse/storage/schema/full_schemas/README.txt index df49f9b39e..d3f6401344 100644 --- a/synapse/storage/schema/full_schemas/README.txt +++ b/synapse/storage/schema/full_schemas/README.txt @@ -1,6 +1,8 @@ Building full schema dumps ========================== +These schemas need to be made from a database that has had all background updates run. + Postgres -------- @@ -11,4 +13,7 @@ SQLite $ sqlite3 $DATABASE_FILE ".schema" > full.sql.sqlite +After +----- + Delete the CREATE statements for "sqlite_stat1", "schema_version", "applied_schema_deltas", and "applied_module_schemas". \ No newline at end of file From 64fa9287920fd391090a607a9bfd3da5415aee16 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 6 Jun 2019 10:34:12 +0100 Subject: [PATCH 341/429] Simplify condition --- synapse/handlers/sync.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 78318aacd8..b878ce11dc 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -587,14 +587,14 @@ class SyncHandler(object): # for the "name" value and default to an empty string. if name_id: name = yield self.store.get_event(name_id, allow_none=True) - if name and name.content.get("name", ""): + if name and name.content.get("name"): defer.returnValue(summary) if canonical_alias_id: canonical_alias = yield self.store.get_event( canonical_alias_id, allow_none=True, ) - if canonical_alias and canonical_alias.content.get("alias", ""): + if canonical_alias and canonical_alias.content.get("alias"): defer.returnValue(summary) joined_user_ids = [ From 7898a1a48de18302bab06e985cc72ee67908d609 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 6 Jun 2019 10:34:33 +0100 Subject: [PATCH 342/429] Add credit in the changelog --- changelog.d/5084.bugfix | 1 - changelog.d/5089.bugfix | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/5084.bugfix create mode 100644 changelog.d/5089.bugfix diff --git a/changelog.d/5084.bugfix b/changelog.d/5084.bugfix deleted file mode 100644 index 9d8434460c..0000000000 --- a/changelog.d/5084.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. diff --git a/changelog.d/5089.bugfix b/changelog.d/5089.bugfix new file mode 100644 index 0000000000..68643cebb7 --- /dev/null +++ b/changelog.d/5089.bugfix @@ -0,0 +1 @@ +Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! From 7f08a3523a2018b66cd96cccf30602c8f687b495 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 6 Jun 2019 11:09:38 +0100 Subject: [PATCH 343/429] Better words --- synapse/storage/stream.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/stream.py b/synapse/storage/stream.py index 0b5f5f9663..6f7f65d96b 100644 --- a/synapse/storage/stream.py +++ b/synapse/storage/stream.py @@ -592,7 +592,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): ) def get_max_topological_token(self, room_id, stream_key): - """Get the max topological token in a room that before given stream + """Get the max topological token in a room before the given stream ordering. Args: From 71063a69b8a72576ae7587042f4c2f24fcbd6bcd Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 6 Jun 2019 14:45:17 +0100 Subject: [PATCH 344/429] Fix missing logcontext for PresenceHandler.on_shutdown. (#5369) Fixes some warnings, and a scary-looking stacktrace when sytest kills the process. --- changelog.d/5369.bugfix | 1 + synapse/handlers/presence.py | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5369.bugfix diff --git a/changelog.d/5369.bugfix b/changelog.d/5369.bugfix new file mode 100644 index 0000000000..cc61618f39 --- /dev/null +++ b/changelog.d/5369.bugfix @@ -0,0 +1 @@ +Fix missing logcontext warnings on shutdown. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index e49c8203ef..557fb5f83d 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -158,7 +158,13 @@ class PresenceHandler(object): # have not yet been persisted self.unpersisted_users_changes = set() - hs.get_reactor().addSystemEventTrigger("before", "shutdown", self._on_shutdown) + hs.get_reactor().addSystemEventTrigger( + "before", + "shutdown", + run_as_background_process, + "presence.on_shutdown", + self._on_shutdown, + ) self.serial_to_user = {} self._next_serial = 1 From 3b6645d3bf3d3be449e1162e4135b677a1086ade Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 7 Jun 2019 01:20:58 +1000 Subject: [PATCH 345/429] remove background updates that arent needed --- .../full_schemas/54/stream_positions.sql | 34 +------------------ 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index 0febedcc5e..084a70db65 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -4,36 +4,4 @@ INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', - INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); INSERT INTO stats_stream_pos (stream_id) VALUES (null); -INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); - ---- User dir population - --- Set up staging tables -INSERT INTO background_updates (update_name, progress_json) VALUES - ('populate_user_directory_createtables', '{}'); - --- Run through each room and update the user directory according to who is in it -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); - --- Insert all users, if search_all_users is on -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); - --- Clean up staging tables -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); - ---- Stats population - --- Set up staging tables -INSERT INTO background_updates (update_name, progress_json) VALUES - ('populate_stats_createtables', '{}'); - --- Run through each room and update stats -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_stats_process_rooms', '{}', 'populate_stats_createtables'); - --- Clean up staging tables -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_stats_cleanup', '{}', 'populate_stats_process_rooms'); +INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); \ No newline at end of file From f868c8df031e1c4a52e68ab32bb6fda086bd6ad7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 6 Jun 2019 16:36:28 +0100 Subject: [PATCH 346/429] Regen sample config before kicking off agents (#5370) * Regen sample config before kicking off agents * Add changelog --- .buildkite/pipeline.yml | 4 ++-- changelog.d/5370.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/5370.misc diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 44b258dca6..b805b2d839 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -36,8 +36,6 @@ steps: image: "python:3.6" propagate-environment: true - - wait - - command: - "python -m pip install tox" - "tox -e check-sampleconfig" @@ -46,6 +44,8 @@ steps: - docker#v3.0.1: image: "python:3.6" + - wait + - command: - "python -m pip install tox" - "tox -e py27,codecov" diff --git a/changelog.d/5370.misc b/changelog.d/5370.misc new file mode 100644 index 0000000000..b0473ef280 --- /dev/null +++ b/changelog.d/5370.misc @@ -0,0 +1 @@ +Don't run CI build checks until sample config check has passed. From 837e32ef551b9c53d23615cdde56cc9babcc9059 Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 7 Jun 2019 01:49:25 +1000 Subject: [PATCH 347/429] just user dir? --- .../full_schemas/54/stream_positions.sql | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index 084a70db65..575ab6b354 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -4,4 +4,22 @@ INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', - INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); INSERT INTO stats_stream_pos (stream_id) VALUES (null); -INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); \ No newline at end of file +INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); + +--- User dir population + +-- Set up staging tables +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_user_directory_createtables', '{}'); + +-- Run through each room and update the user directory according to who is in it +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); + +-- Insert all users, if search_all_users is on +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); + +-- Clean up staging tables +INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES + ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); From 833c406b9b34392eb64780eeef6b670be762ea21 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Jun 2019 17:23:02 +0100 Subject: [PATCH 348/429] Neilj/1.0 upgrade notes (#5371) 1.0 upgrade/install notes --- INSTALL.md | 25 +++++++++++++++- UPGRADE.rst | 49 ++++++++++++++++++++++++++++++++ changelog.d/5371.feature | 1 + docs/MSC1711_certificates_FAQ.md | 12 ++++---- 4 files changed, 79 insertions(+), 8 deletions(-) create mode 100644 changelog.d/5371.feature diff --git a/INSTALL.md b/INSTALL.md index 1934593148..d3a450f40f 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -5,6 +5,7 @@ * [Prebuilt packages](#prebuilt-packages) * [Setting up Synapse](#setting-up-synapse) * [TLS certificates](#tls-certificates) + * [Email](#email) * [Registering a user](#registering-a-user) * [Setting up a TURN server](#setting-up-a-turn-server) * [URL previews](#url-previews) @@ -394,9 +395,31 @@ To configure Synapse to expose an HTTPS port, you will need to edit instance, if using certbot, use `fullchain.pem` as your certificate, not `cert.pem`). -For those of you upgrading your TLS certificate in readiness for Synapse 1.0, +For those of you upgrading your TLS certificate for Synapse 1.0 compliance, please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100). +## Email + +It is desirable for Synapse to have the capability to send email. For example, +this is required to support the 'password reset' feature. + +To configure an SMTP server for Synapse, modify the configuration section +headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port`` +and ``notif_from`` fields filled out. You may also need to set ``smtp_user``, +``smtp_pass``, and ``require_transport_security``.. + +If Synapse is not configured with an SMTP server, password reset via email will + be disabled by default. + +Alternatively it is possible delegate the sending of email to the server's +identity server. Doing so is convenient but not recommended, since a malicious +or compromised identity server could theoretically hijack a given user's +account by redirecting mail. + +If you are absolutely certain that you wish to use the server's identity server +for password resets, set ``trust_identity_server_for_password_resets`` to +``true`` under the ``email:`` configuration section. + ## Registering a user You will need at least one user on your server in order to use a Matrix diff --git a/UPGRADE.rst b/UPGRADE.rst index 228222d534..6032a505c9 100644 --- a/UPGRADE.rst +++ b/UPGRADE.rst @@ -49,6 +49,55 @@ returned by the Client-Server API: # configured on port 443. curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:" +Upgrading to v1.0 +================= + +Validation of TLS certificates +------------------------------ + +Synapse v1.0 is the first release to enforce +validation of TLS certificates for the federation API. It is therefore +essential that your certificates are correctly configured. See the `FAQ +`_ for more information. + +Note, v1.0 installations will also no longer be able to federate with servers +that have not correctly configured their certificates. + +In rare cases, it may be desirable to disable certificate checking: for +example, it might be essential to be able to federate with a given legacy +server in a closed federation. This can be done in one of two ways:- + +* Configure the global switch ``federation_verify_certificates`` to ``false``. +* Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``. + +See the `sample configuration file `_ +for more details on these settings. + +Email +----- +When a user requests a password reset, Synapse will send an email to the +user to confirm the request. + +Previous versions of Synapse delegated the job of sending this email to an +identity server. If the identity server was somehow malicious or became +compromised, it would be theoretically possible to hijack an account through +this means. + +Therefore, by default, Synapse v1.0 will send the confirmation email itself. If +Synapse is not configured with an SMTP server, password reset via email will be +disabled. + +To configure an SMTP server for Synapse, modify the configuration section +headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port`` +and ``notif_from`` fields filled out. You may also need to set ``smtp_user``, +``smtp_pass``, and ``require_transport_security``. + +If you are absolutely certain that you wish to continue using an identity +server for password resets, set ``trust_identity_server_for_password_resets`` to ``true``. + +See the `sample configuration file `_ +for more details on these settings. + Upgrading to v0.99.0 ==================== diff --git a/changelog.d/5371.feature b/changelog.d/5371.feature new file mode 100644 index 0000000000..7f960630e0 --- /dev/null +++ b/changelog.d/5371.feature @@ -0,0 +1 @@ +Update upgrade and installation guides ahead of 1.0. diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 37f7f669c9..599462bdcb 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -68,16 +68,14 @@ Admins should upgrade and configure a valid CA cert. Homeservers that require a .well-known entry (see below), should retain their SRV record and use it alongside their .well-known record. -**>= 5th March 2019 - Synapse 1.0.0 is released** +**10th June 2019 - Synapse 1.0.0 is released** -1.0.0 will land no sooner than 1 month after 0.99.0, leaving server admins one -month after 5th February to upgrade to 0.99.0 and deploy their certificates. In +1.0.0 is scheduled for release on 10th June. In accordance with the the [S2S spec](https://matrix.org/docs/spec/server_server/r0.1.0.html) 1.0.0 will enforce certificate validity. This means that any homeserver without a valid certificate after this point will no longer be able to federate with 1.0.0 servers. - ## Configuring certificates for compatibility with Synapse 1.0.0 ### If you do not currently have an SRV record @@ -146,9 +144,9 @@ You can do this with a `.well-known` file as follows: with Synapse 0.34 and earlier. 2. Give Synapse a certificate corresponding to the target domain - (`customer.example.net` in the above example). You can either use Synapse's - built-in [ACME support](./ACME.md) for this (via the `domain` parameter in - the `acme` section), or acquire a certificate yourself and give it to + (`customer.example.net` in the above example). You can either use Synapse's + built-in [ACME support](./ACME.md) for this (via the `domain` parameter in + the `acme` section), or acquire a certificate yourself and give it to Synapse via `tls_certificate_path` and `tls_private_key_path`. 3. Restart Synapse to ensure the new certificate is loaded. From 9fbb20a531161652143028cde333429fe03b0343 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 6 Jun 2019 17:33:11 +0100 Subject: [PATCH 349/429] Stop hardcoding trust of old matrix.org key (#5374) There are a few changes going on here: * We make checking the signature on a key server response optional: if no verify_keys are specified, we trust to TLS to validate the connection. * We change the default config so that it does not require responses to be signed by the old key. * We replace the old 'perspectives' config with 'trusted_key_servers', which is also formatted slightly differently. * We emit a warning to the logs every time we trust a key server response signed by the old key. --- changelog.d/5374.feature | 1 + docs/sample_config.yaml | 43 +++- synapse/config/key.py | 228 +++++++++++++++--- synapse/crypto/keyring.py | 74 +++--- tests/crypto/test_keyring.py | 43 ++-- .../test_matrix_federation_agent.py | 1 + 6 files changed, 294 insertions(+), 96 deletions(-) create mode 100644 changelog.d/5374.feature diff --git a/changelog.d/5374.feature b/changelog.d/5374.feature new file mode 100644 index 0000000000..17937637ab --- /dev/null +++ b/changelog.d/5374.feature @@ -0,0 +1 @@ +Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 2f37e71601..a2e815ea52 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -952,12 +952,43 @@ signing_key_path: "CONFDIR/SERVERNAME.signing.key" # The trusted servers to download signing keys from. # -#perspectives: -# servers: -# "matrix.org": -# verify_keys: -# "ed25519:auto": -# key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" +# When we need to fetch a signing key, each server is tried in parallel. +# +# Normally, the connection to the key server is validated via TLS certificates. +# Additional security can be provided by configuring a `verify key`, which +# will make synapse check that the response is signed by that key. +# +# This setting supercedes an older setting named `perspectives`. The old format +# is still supported for backwards-compatibility, but it is deprecated. +# +# Options for each entry in the list include: +# +# server_name: the name of the server. required. +# +# verify_keys: an optional map from key id to base64-encoded public key. +# If specified, we will check that the response is signed by at least +# one of the given keys. +# +# accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, +# and federation_verify_certificates is not `true`, synapse will refuse +# to start, because this would allow anyone who can spoof DNS responses +# to masquerade as the trusted key server. If you know what you are doing +# and are sure that your network environment provides a secure connection +# to the key server, you can set this to `true` to override this +# behaviour. +# +# An example configuration might look like: +# +#trusted_key_servers: +# - server_name: "my_trusted_server.example.com" +# verify_keys: +# "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" +# - server_name: "my_other_trusted_server.example.com" +# +# The default configuration is: +# +#trusted_key_servers: +# - server_name: "matrix.org" # Enable SAML2 for registration and login. Uses pysaml2. diff --git a/synapse/config/key.py b/synapse/config/key.py index eb10259818..aba7092ccd 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,6 +18,8 @@ import hashlib import logging import os +import attr +import jsonschema from signedjson.key import ( NACL_ED25519, decode_signing_key_base64, @@ -32,11 +35,27 @@ from synapse.util.stringutils import random_string, random_string_with_symbols from ._base import Config, ConfigError +INSECURE_NOTARY_ERROR = """\ +Your server is configured to accept key server responses without signature +validation or TLS certificate validation. This is likely to be very insecure. If +you are *sure* you want to do this, set 'accept_keys_insecurely' on the +keyserver configuration.""" + + logger = logging.getLogger(__name__) -class KeyConfig(Config): +@attr.s +class TrustedKeyServer(object): + # string: name of the server. + server_name = attr.ib() + # dict[str,VerifyKey]|None: map from key id to key object, or None to disable + # signature verification. + verify_keys = attr.ib(default=None) + + +class KeyConfig(Config): def read_config(self, config): # the signing key can be specified inline or in a separate file if "signing_key" in config: @@ -49,16 +68,27 @@ class KeyConfig(Config): config.get("old_signing_keys", {}) ) self.key_refresh_interval = self.parse_duration( - config.get("key_refresh_interval", "1d"), + config.get("key_refresh_interval", "1d") ) - self.perspectives = self.read_perspectives( - config.get("perspectives", {}).get("servers", { - "matrix.org": {"verify_keys": { - "ed25519:auto": { - "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw", - } - }} - }) + + # if neither trusted_key_servers nor perspectives are given, use the default. + if "perspectives" not in config and "trusted_key_servers" not in config: + key_servers = [{"server_name": "matrix.org"}] + else: + key_servers = config.get("trusted_key_servers", []) + + if not isinstance(key_servers, list): + raise ConfigError( + "trusted_key_servers, if given, must be a list, not a %s" + % (type(key_servers).__name__,) + ) + + # merge the 'perspectives' config into the 'trusted_key_servers' config. + key_servers.extend(_perspectives_to_key_servers(config)) + + # list of TrustedKeyServer objects + self.key_servers = list( + _parse_key_servers(key_servers, self.federation_verify_certificates) ) self.macaroon_secret_key = config.get( @@ -78,8 +108,9 @@ class KeyConfig(Config): # falsification of values self.form_secret = config.get("form_secret", None) - def default_config(self, config_dir_path, server_name, generate_secrets=False, - **kwargs): + def default_config( + self, config_dir_path, server_name, generate_secrets=False, **kwargs + ): base_key_name = os.path.join(config_dir_path, server_name) if generate_secrets: @@ -91,7 +122,8 @@ class KeyConfig(Config): macaroon_secret_key = "# macaroon_secret_key: " form_secret = "# form_secret: " - return """\ + return ( + """\ # a secret which is used to sign access tokens. If none is specified, # the registration_shared_secret is used, if one is given; otherwise, # a secret key is derived from the signing key. @@ -133,33 +165,53 @@ class KeyConfig(Config): # The trusted servers to download signing keys from. # - #perspectives: - # servers: - # "matrix.org": - # verify_keys: - # "ed25519:auto": - # key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" - """ % locals() - - def read_perspectives(self, perspectives_servers): - servers = {} - for server_name, server_config in perspectives_servers.items(): - for key_id, key_data in server_config["verify_keys"].items(): - if is_signing_algorithm_supported(key_id): - key_base64 = key_data["key"] - key_bytes = decode_base64(key_base64) - verify_key = decode_verify_key_bytes(key_id, key_bytes) - servers.setdefault(server_name, {})[key_id] = verify_key - return servers + # When we need to fetch a signing key, each server is tried in parallel. + # + # Normally, the connection to the key server is validated via TLS certificates. + # Additional security can be provided by configuring a `verify key`, which + # will make synapse check that the response is signed by that key. + # + # This setting supercedes an older setting named `perspectives`. The old format + # is still supported for backwards-compatibility, but it is deprecated. + # + # Options for each entry in the list include: + # + # server_name: the name of the server. required. + # + # verify_keys: an optional map from key id to base64-encoded public key. + # If specified, we will check that the response is signed by at least + # one of the given keys. + # + # accept_keys_insecurely: a boolean. Normally, if `verify_keys` is unset, + # and federation_verify_certificates is not `true`, synapse will refuse + # to start, because this would allow anyone who can spoof DNS responses + # to masquerade as the trusted key server. If you know what you are doing + # and are sure that your network environment provides a secure connection + # to the key server, you can set this to `true` to override this + # behaviour. + # + # An example configuration might look like: + # + #trusted_key_servers: + # - server_name: "my_trusted_server.example.com" + # verify_keys: + # "ed25519:auto": "abcdefghijklmnopqrstuvwxyzabcdefghijklmopqr" + # - server_name: "my_other_trusted_server.example.com" + # + # The default configuration is: + # + #trusted_key_servers: + # - server_name: "matrix.org" + """ + % locals() + ) def read_signing_key(self, signing_key_path): signing_keys = self.read_file(signing_key_path, "signing_key") try: return read_signing_keys(signing_keys.splitlines(True)) except Exception as e: - raise ConfigError( - "Error reading signing_key: %s" % (str(e)) - ) + raise ConfigError("Error reading signing_key: %s" % (str(e))) def read_old_signing_keys(self, old_signing_keys): keys = {} @@ -182,9 +234,7 @@ class KeyConfig(Config): if not self.path_exists(signing_key_path): with open(signing_key_path, "w") as signing_key_file: key_id = "a_" + random_string(4) - write_signing_keys( - signing_key_file, (generate_signing_key(key_id),), - ) + write_signing_keys(signing_key_file, (generate_signing_key(key_id),)) else: signing_keys = self.read_file(signing_key_path, "signing_key") if len(signing_keys.split("\n")[0].split()) == 1: @@ -194,6 +244,106 @@ class KeyConfig(Config): NACL_ED25519, key_id, signing_keys.split("\n")[0] ) with open(signing_key_path, "w") as signing_key_file: - write_signing_keys( - signing_key_file, (key,), + write_signing_keys(signing_key_file, (key,)) + + +def _perspectives_to_key_servers(config): + """Convert old-style 'perspectives' configs into new-style 'trusted_key_servers' + + Returns an iterable of entries to add to trusted_key_servers. + """ + + # 'perspectives' looks like: + # + # { + # "servers": { + # "matrix.org": { + # "verify_keys": { + # "ed25519:auto": { + # "key": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" + # } + # } + # } + # } + # } + # + # 'trusted_keys' looks like: + # + # [ + # { + # "server_name": "matrix.org", + # "verify_keys": { + # "ed25519:auto": "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw", + # } + # } + # ] + + perspectives_servers = config.get("perspectives", {}).get("servers", {}) + + for server_name, server_opts in perspectives_servers.items(): + trusted_key_server_entry = {"server_name": server_name} + verify_keys = server_opts.get("verify_keys") + if verify_keys is not None: + trusted_key_server_entry["verify_keys"] = { + key_id: key_data["key"] for key_id, key_data in verify_keys.items() + } + yield trusted_key_server_entry + + +TRUSTED_KEY_SERVERS_SCHEMA = { + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "schema for the trusted_key_servers setting", + "type": "array", + "items": { + "type": "object", + "properties": { + "server_name": {"type": "string"}, + "verify_keys": { + "type": "object", + # each key must be a base64 string + "additionalProperties": {"type": "string"}, + }, + }, + "required": ["server_name"], + }, +} + + +def _parse_key_servers(key_servers, federation_verify_certificates): + try: + jsonschema.validate(key_servers, TRUSTED_KEY_SERVERS_SCHEMA) + except jsonschema.ValidationError as e: + raise ConfigError("Unable to parse 'trusted_key_servers': " + e.message) + + for server in key_servers: + server_name = server["server_name"] + result = TrustedKeyServer(server_name=server_name) + + verify_keys = server.get("verify_keys") + if verify_keys is not None: + result.verify_keys = {} + for key_id, key_base64 in verify_keys.items(): + if not is_signing_algorithm_supported(key_id): + raise ConfigError( + "Unsupported signing algorithm on key %s for server %s in " + "trusted_key_servers" % (key_id, server_name) ) + try: + key_bytes = decode_base64(key_base64) + verify_key = decode_verify_key_bytes(key_id, key_bytes) + except Exception as e: + raise ConfigError( + "Unable to parse key %s for server %s in " + "trusted_key_servers: %s" % (key_id, server_name, e) + ) + + result.verify_keys[key_id] = verify_key + + if ( + not verify_keys + and not server.get("accept_keys_insecurely") + and not federation_verify_certificates + ): + raise ConfigError(INSECURE_NOTARY_ERROR) + + yield result diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 2b6b5913bc..96964b0d50 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -585,25 +585,27 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): super(PerspectivesKeyFetcher, self).__init__(hs) self.clock = hs.get_clock() self.client = hs.get_http_client() - self.perspective_servers = self.config.perspectives + self.key_servers = self.config.key_servers @defer.inlineCallbacks def get_keys(self, keys_to_fetch): """see KeyFetcher.get_keys""" @defer.inlineCallbacks - def get_key(perspective_name, perspective_keys): + def get_key(key_server): try: result = yield self.get_server_verify_key_v2_indirect( - keys_to_fetch, perspective_name, perspective_keys + keys_to_fetch, key_server ) defer.returnValue(result) except KeyLookupError as e: - logger.warning("Key lookup failed from %r: %s", perspective_name, e) + logger.warning( + "Key lookup failed from %r: %s", key_server.server_name, e + ) except Exception as e: logger.exception( "Unable to get key from %r: %s %s", - perspective_name, + key_server.server_name, type(e).__name__, str(e), ) @@ -613,8 +615,8 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): results = yield logcontext.make_deferred_yieldable( defer.gatherResults( [ - run_in_background(get_key, p_name, p_keys) - for p_name, p_keys in self.perspective_servers.items() + run_in_background(get_key, server) + for server in self.key_servers ], consumeErrors=True, ).addErrback(unwrapFirstError) @@ -629,17 +631,15 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): @defer.inlineCallbacks def get_server_verify_key_v2_indirect( - self, keys_to_fetch, perspective_name, perspective_keys + self, keys_to_fetch, key_server ): """ Args: keys_to_fetch (dict[str, dict[str, int]]): the keys to be fetched. server_name -> key_id -> min_valid_ts - perspective_name (str): name of the notary server to query for the keys - - perspective_keys (dict[str, VerifyKey]): map of key_id->key for the - notary server + key_server (synapse.config.key.TrustedKeyServer): notary server to query for + the keys Returns: Deferred[dict[str, dict[str, synapse.storage.keys.FetchKeyResult]]]: map @@ -649,6 +649,7 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): KeyLookupError if there was an error processing the entire response from the server """ + perspective_name = key_server.server_name logger.info( "Requesting keys %s from notary server %s", keys_to_fetch.items(), @@ -689,11 +690,13 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): ) try: - processed_response = yield self._process_perspectives_response( - perspective_name, - perspective_keys, + self._validate_perspectives_response( + key_server, response, - time_added_ms=time_now_ms, + ) + + processed_response = yield self.process_v2_response( + perspective_name, response, time_added_ms=time_now_ms ) except KeyLookupError as e: logger.warning( @@ -717,28 +720,24 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): defer.returnValue(keys) - def _process_perspectives_response( - self, perspective_name, perspective_keys, response, time_added_ms + def _validate_perspectives_response( + self, key_server, response, ): - """Parse a 'Server Keys' structure from the result of a /key/query request - - Checks that the entry is correctly signed by the perspectives server, and then - passes over to process_v2_response + """Optionally check the signature on the result of a /key/query request Args: - perspective_name (str): the name of the notary server that produced this - result - - perspective_keys (dict[str, VerifyKey]): map of key_id->key for the - notary server + key_server (synapse.config.key.TrustedKeyServer): the notary server that + produced this result response (dict): the json-decoded Server Keys response object - - time_added_ms (int): the timestamp to record in server_keys_json - - Returns: - Deferred[dict[str, FetchKeyResult]]: map from key_id to result object """ + perspective_name = key_server.server_name + perspective_keys = key_server.verify_keys + + if perspective_keys is None: + # signature checking is disabled on this server + return + if ( u"signatures" not in response or perspective_name not in response[u"signatures"] @@ -751,6 +750,13 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): verify_signed_json(response, perspective_name, perspective_keys[key_id]) verified = True + if perspective_name == "matrix.org" and key_id == "ed25519:auto": + logger.warning( + "Trusting trusted_key_server responses signed by the " + "compromised matrix.org signing key 'ed25519:auto'. " + "This is a placebo." + ) + if not verified: raise KeyLookupError( "Response not signed with a known key: signed with: %r, known keys: %r" @@ -760,10 +766,6 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): ) ) - return self.process_v2_response( - perspective_name, response, time_added_ms=time_added_ms - ) - class ServerKeyFetcher(BaseV2KeyFetcher): """KeyFetcher impl which fetches keys from the origin servers""" diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 18121f4f6c..4b1901ce31 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -19,7 +19,7 @@ from mock import Mock import canonicaljson import signedjson.key import signedjson.sign -from signedjson.key import get_verify_key +from signedjson.key import encode_verify_key_base64, get_verify_key from twisted.internet import defer @@ -40,7 +40,7 @@ class MockPerspectiveServer(object): def get_verify_keys(self): vk = signedjson.key.get_verify_key(self.key) - return {"%s:%s" % (vk.alg, vk.version): vk} + return {"%s:%s" % (vk.alg, vk.version): encode_verify_key_base64(vk)} def get_signed_key(self, server_name, verify_key): key_id = "%s:%s" % (verify_key.alg, verify_key.version) @@ -48,9 +48,7 @@ class MockPerspectiveServer(object): "server_name": server_name, "old_verify_keys": {}, "valid_until_ts": time.time() * 1000 + 3600, - "verify_keys": { - key_id: {"key": signedjson.key.encode_verify_key_base64(verify_key)} - }, + "verify_keys": {key_id: {"key": encode_verify_key_base64(verify_key)}}, } self.sign_response(res) return res @@ -63,10 +61,18 @@ class KeyringTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.mock_perspective_server = MockPerspectiveServer() self.http_client = Mock() - hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) - keys = self.mock_perspective_server.get_verify_keys() - hs.config.perspectives = {self.mock_perspective_server.server_name: keys} - return hs + + config = self.default_config() + config["trusted_key_servers"] = [ + { + "server_name": self.mock_perspective_server.server_name, + "verify_keys": self.mock_perspective_server.get_verify_keys(), + } + ] + + return self.setup_test_homeserver( + handlers=None, http_client=self.http_client, config=config + ) def check_context(self, _, expected): self.assertEquals( @@ -371,10 +377,18 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): self.mock_perspective_server = MockPerspectiveServer() self.http_client = Mock() - hs = self.setup_test_homeserver(handlers=None, http_client=self.http_client) - keys = self.mock_perspective_server.get_verify_keys() - hs.config.perspectives = {self.mock_perspective_server.server_name: keys} - return hs + + config = self.default_config() + config["trusted_key_servers"] = [ + { + "server_name": self.mock_perspective_server.server_name, + "verify_keys": self.mock_perspective_server.get_verify_keys(), + } + ] + + return self.setup_test_homeserver( + handlers=None, http_client=self.http_client, config=config + ) def test_get_keys_from_perspectives(self): # arbitrarily advance the clock a bit @@ -439,8 +453,7 @@ class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): self.assertEqual(res["ts_valid_until_ms"], VALID_UNTIL_TS) self.assertEqual( - bytes(res["key_json"]), - canonicaljson.encode_canonical_json(response), + bytes(res["key_json"]), canonicaljson.encode_canonical_json(response) ) def test_invalid_perspectives_responses(self): diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 4153da4da7..05880a1048 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -57,6 +57,7 @@ class MatrixFederationAgentTests(TestCase): # present will not be trusted. We should do better here, though. config_dict = default_config("test", parse=False) config_dict["federation_verify_certificates"] = False + config_dict["trusted_key_servers"] = [] config = HomeServerConfig() config.parse_config_dict(config_dict) From 3719680ee42b72b8480fa76a1455576897b65ef0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 6 Jun 2019 17:34:07 +0100 Subject: [PATCH 350/429] Add ability to perform password reset via email without trusting the identity server (#5377) Sends password reset emails from the homeserver instead of proxying to the identity server. This is now the default behaviour for security reasons. If you wish to continue proxying password reset requests to the identity server you must now enable the email.trust_identity_server_for_password_resets option. This PR is a culmination of 3 smaller PRs which have each been separately reviewed: * #5308 * #5345 * #5368 --- changelog.d/5377.feature | 1 + docs/sample_config.yaml | 60 +++- synapse/api/errors.py | 9 + synapse/app/homeserver.py | 1 + synapse/config/emailconfig.py | 153 ++++++++- synapse/handlers/auth.py | 64 +++- synapse/handlers/identity.py | 13 +- synapse/push/mailer.py | 87 ++++-- synapse/push/pusher.py | 4 +- synapse/python_dependencies.py | 2 +- synapse/res/templates/password_reset.html | 9 + synapse/res/templates/password_reset.txt | 7 + .../res/templates/password_reset_failure.html | 6 + .../res/templates/password_reset_success.html | 6 + synapse/rest/client/v2_alpha/account.py | 243 ++++++++++++++- synapse/storage/_base.py | 6 +- synapse/storage/prepare_database.py | 2 +- synapse/storage/registration.py | 290 +++++++++++++++++- .../delta/55/track_threepid_validations.sql | 31 ++ tests/utils.py | 1 - 20 files changed, 923 insertions(+), 72 deletions(-) create mode 100644 changelog.d/5377.feature create mode 100644 synapse/res/templates/password_reset.html create mode 100644 synapse/res/templates/password_reset.txt create mode 100644 synapse/res/templates/password_reset_failure.html create mode 100644 synapse/res/templates/password_reset_success.html create mode 100644 synapse/storage/schema/delta/55/track_threepid_validations.sql diff --git a/changelog.d/5377.feature b/changelog.d/5377.feature new file mode 100644 index 0000000000..6aae41847a --- /dev/null +++ b/changelog.d/5377.feature @@ -0,0 +1 @@ +Add ability to perform password reset via email without trusting the identity server. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index a2e815ea52..ea73306fb9 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1065,10 +1065,8 @@ password_config: -# Enable sending emails for notification events or expiry notices -# Defining a custom URL for Riot is only needed if email notifications -# should contain links to a self-hosted installation of Riot; when set -# the "app_name" setting is ignored. +# Enable sending emails for password resets, notification events or +# account expiry notices # # If your SMTP server requires authentication, the optional smtp_user & # smtp_pass variables should be used @@ -1076,22 +1074,64 @@ password_config: #email: # enable_notifs: false # smtp_host: "localhost" -# smtp_port: 25 +# smtp_port: 25 # SSL: 465, STARTTLS: 587 # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: False # notif_from: "Your Friendly %(app)s Home Server " # app_name: Matrix -# # if template_dir is unset, uses the example templates that are part of -# # the Synapse distribution. +# +# # Enable email notifications by default +# notif_for_new_users: True +# +# # Defining a custom URL for Riot is only needed if email notifications +# # should contain links to a self-hosted installation of Riot; when set +# # the "app_name" setting is ignored +# riot_base_url: "http://localhost/riot" +# +# # Enable sending password reset emails via the configured, trusted +# # identity servers +# # +# # IMPORTANT! This will give a malicious or overtaken identity server +# # the ability to reset passwords for your users! Make absolutely sure +# # that you want to do this! It is strongly recommended that password +# # reset emails be sent by the homeserver instead +# # +# # If this option is set to false and SMTP options have not been +# # configured, resetting user passwords via email will be disabled +# #trust_identity_server_for_password_resets: false +# +# # Configure the time that a validation email or text message code +# # will expire after sending +# # +# # This is currently used for password resets +# #validation_token_lifetime: 1h +# +# # Template directory. All template files should be stored within this +# # directory +# # # #template_dir: res/templates +# +# # Templates for email notifications +# # # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt -# # Templates for account expiry notices. +# +# # Templates for account expiry notices +# # # expiry_template_html: notice_expiry.html # expiry_template_text: notice_expiry.txt -# notif_for_new_users: True -# riot_base_url: "http://localhost/riot" +# +# # Templates for password reset emails sent by the homeserver +# # +# #password_reset_template_html: password_reset.html +# #password_reset_template_text: password_reset.txt +# +# # Templates for password reset success and failure pages that a user +# # will see after attempting to reset their password +# # +# #password_reset_template_success_html: password_reset_success.html +# #password_reset_template_failure_html: password_reset_failure.html #password_providers: diff --git a/synapse/api/errors.py b/synapse/api/errors.py index e91697049c..66201d6efe 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -339,6 +339,15 @@ class UnsupportedRoomVersionError(SynapseError): ) +class ThreepidValidationError(SynapseError): + """An error raised when there was a problem authorising an event.""" + + def __init__(self, *args, **kwargs): + if "errcode" not in kwargs: + kwargs["errcode"] = Codes.FORBIDDEN + super(ThreepidValidationError, self).__init__(*args, **kwargs) + + class IncompatibleRoomVersionError(SynapseError): """A server is trying to join a room whose version it does not support. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 1045d28949..df524a23dd 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -176,6 +176,7 @@ class SynapseHomeServer(HomeServer): resources.update({ "/_matrix/client/api/v1": client_resource, + "/_synapse/password_reset": client_resource, "/_matrix/client/r0": client_resource, "/_matrix/client/unstable": client_resource, "/_matrix/client/v2_alpha": client_resource, diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 8400471f40..ae04252906 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -50,6 +50,11 @@ class EmailConfig(Config): else: self.email_app_name = "Matrix" + # TODO: Rename notif_from to something more generic, or have a separate + # from for password resets, message notifications, etc? + # Currently the email section is a bit bogged down with settings for + # multiple functions. Would be good to split it out into separate + # sections and only put the common ones under email: self.email_notif_from = email_config.get("notif_from", None) if self.email_notif_from is not None: # make sure it's valid @@ -74,7 +79,28 @@ class EmailConfig(Config): "account_validity", {}, ).get("renew_at") - if self.email_enable_notifs or account_validity_renewal_enabled: + email_trust_identity_server_for_password_resets = email_config.get( + "trust_identity_server_for_password_resets", False, + ) + self.email_password_reset_behaviour = ( + "remote" if email_trust_identity_server_for_password_resets else "local" + ) + if self.email_password_reset_behaviour == "local" and email_config == {}: + logger.warn( + "User password resets have been disabled due to lack of email config" + ) + self.email_password_reset_behaviour = "off" + + # Get lifetime of a validation token in milliseconds + self.email_validation_token_lifetime = self.parse_duration( + email_config.get("validation_token_lifetime", "1h") + ) + + if ( + self.email_enable_notifs + or account_validity_renewal_enabled + or self.email_password_reset_behaviour == "local" + ): # make sure we can import the required deps import jinja2 import bleach @@ -82,6 +108,67 @@ class EmailConfig(Config): jinja2 bleach + if self.email_password_reset_behaviour == "local": + required = [ + "smtp_host", + "smtp_port", + "notif_from", + ] + + missing = [] + for k in required: + if k not in email_config: + missing.append(k) + + if (len(missing) > 0): + raise RuntimeError( + "email.password_reset_behaviour is set to 'local' " + "but required keys are missing: %s" % + (", ".join(["email." + k for k in missing]),) + ) + + # Templates for password reset emails + self.email_password_reset_template_html = email_config.get( + "password_reset_template_html", "password_reset.html", + ) + self.email_password_reset_template_text = email_config.get( + "password_reset_template_text", "password_reset.txt", + ) + self.email_password_reset_failure_template = email_config.get( + "password_reset_failure_template", "password_reset_failure.html", + ) + # This template does not support any replaceable variables, so we will + # read it from the disk once during setup + email_password_reset_success_template = email_config.get( + "password_reset_success_template", "password_reset_success.html", + ) + + # Check templates exist + for f in [self.email_password_reset_template_html, + self.email_password_reset_template_text, + self.email_password_reset_failure_template, + email_password_reset_success_template]: + p = os.path.join(self.email_template_dir, f) + if not os.path.isfile(p): + raise ConfigError("Unable to find template file %s" % (p, )) + + # Retrieve content of web templates + filepath = os.path.join( + self.email_template_dir, + email_password_reset_success_template, + ) + self.email_password_reset_success_html_content = self.read_file( + filepath, + "email.password_reset_template_success_html", + ) + + if config.get("public_baseurl") is None: + raise RuntimeError( + "email.password_reset_behaviour is set to 'local' but no " + "public_baseurl is set. This is necessary to generate password " + "reset links" + ) + if self.email_enable_notifs: required = [ "smtp_host", @@ -121,10 +208,6 @@ class EmailConfig(Config): self.email_riot_base_url = email_config.get( "riot_base_url", None ) - else: - self.email_enable_notifs = False - # Not much point setting defaults for the rest: it would be an - # error for them to be used. if account_validity_renewal_enabled: self.email_expiry_template_html = email_config.get( @@ -141,10 +224,8 @@ class EmailConfig(Config): def default_config(self, config_dir_path, server_name, **kwargs): return """ - # Enable sending emails for notification events or expiry notices - # Defining a custom URL for Riot is only needed if email notifications - # should contain links to a self-hosted installation of Riot; when set - # the "app_name" setting is ignored. + # Enable sending emails for password resets, notification events or + # account expiry notices # # If your SMTP server requires authentication, the optional smtp_user & # smtp_pass variables should be used @@ -152,20 +233,62 @@ class EmailConfig(Config): #email: # enable_notifs: false # smtp_host: "localhost" - # smtp_port: 25 + # smtp_port: 25 # SSL: 465, STARTTLS: 587 # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: False # notif_from: "Your Friendly %(app)s Home Server " # app_name: Matrix - # # if template_dir is unset, uses the example templates that are part of - # # the Synapse distribution. + # + # # Enable email notifications by default + # notif_for_new_users: True + # + # # Defining a custom URL for Riot is only needed if email notifications + # # should contain links to a self-hosted installation of Riot; when set + # # the "app_name" setting is ignored + # riot_base_url: "http://localhost/riot" + # + # # Enable sending password reset emails via the configured, trusted + # # identity servers + # # + # # IMPORTANT! This will give a malicious or overtaken identity server + # # the ability to reset passwords for your users! Make absolutely sure + # # that you want to do this! It is strongly recommended that password + # # reset emails be sent by the homeserver instead + # # + # # If this option is set to false and SMTP options have not been + # # configured, resetting user passwords via email will be disabled + # #trust_identity_server_for_password_resets: false + # + # # Configure the time that a validation email or text message code + # # will expire after sending + # # + # # This is currently used for password resets + # #validation_token_lifetime: 1h + # + # # Template directory. All template files should be stored within this + # # directory + # # # #template_dir: res/templates + # + # # Templates for email notifications + # # # notif_template_html: notif_mail.html # notif_template_text: notif_mail.txt - # # Templates for account expiry notices. + # + # # Templates for account expiry notices + # # # expiry_template_html: notice_expiry.html # expiry_template_text: notice_expiry.txt - # notif_for_new_users: True - # riot_base_url: "http://localhost/riot" + # + # # Templates for password reset emails sent by the homeserver + # # + # #password_reset_template_html: password_reset.html + # #password_reset_template_text: password_reset.txt + # + # # Templates for password reset success and failure pages that a user + # # will see after attempting to reset their password + # # + # #password_reset_template_success_html: password_reset_success.html + # #password_reset_template_failure_html: password_reset_failure.html """ diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index aa5d89a9ac..7f8ddc99c6 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -162,7 +162,7 @@ class AuthHandler(BaseHandler): defer.returnValue(params) @defer.inlineCallbacks - def check_auth(self, flows, clientdict, clientip): + def check_auth(self, flows, clientdict, clientip, password_servlet=False): """ Takes a dictionary sent by the client in the login / registration protocol and handles the User-Interactive Auth flow. @@ -186,6 +186,16 @@ class AuthHandler(BaseHandler): clientip (str): The IP address of the client. + password_servlet (bool): Whether the request originated from + PasswordRestServlet. + XXX: This is a temporary hack to distinguish between checking + for threepid validations locally (in the case of password + resets) and using the identity server (in the case of binding + a 3PID during registration). Once we start using the + homeserver for both tasks, this distinction will no longer be + necessary. + + Returns: defer.Deferred[dict, dict, str]: a deferred tuple of (creds, params, session_id). @@ -241,7 +251,9 @@ class AuthHandler(BaseHandler): if 'type' in authdict: login_type = authdict['type'] try: - result = yield self._check_auth_dict(authdict, clientip) + result = yield self._check_auth_dict( + authdict, clientip, password_servlet=password_servlet, + ) if result: creds[login_type] = result self._save_session(session) @@ -351,7 +363,7 @@ class AuthHandler(BaseHandler): return sess.setdefault('serverdict', {}).get(key, default) @defer.inlineCallbacks - def _check_auth_dict(self, authdict, clientip): + def _check_auth_dict(self, authdict, clientip, password_servlet=False): """Attempt to validate the auth dict provided by a client Args: @@ -369,7 +381,13 @@ class AuthHandler(BaseHandler): login_type = authdict['type'] checker = self.checkers.get(login_type) if checker is not None: - res = yield checker(authdict, clientip) + # XXX: Temporary workaround for having Synapse handle password resets + # See AuthHandler.check_auth for further details + res = yield checker( + authdict, + clientip=clientip, + password_servlet=password_servlet, + ) defer.returnValue(res) # build a v1-login-style dict out of the authdict and fall back to the @@ -383,7 +401,7 @@ class AuthHandler(BaseHandler): defer.returnValue(canonical_id) @defer.inlineCallbacks - def _check_recaptcha(self, authdict, clientip): + def _check_recaptcha(self, authdict, clientip, **kwargs): try: user_response = authdict["response"] except KeyError: @@ -429,20 +447,20 @@ class AuthHandler(BaseHandler): defer.returnValue(True) raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) - def _check_email_identity(self, authdict, _): - return self._check_threepid('email', authdict) + def _check_email_identity(self, authdict, **kwargs): + return self._check_threepid('email', authdict, **kwargs) - def _check_msisdn(self, authdict, _): + def _check_msisdn(self, authdict, **kwargs): return self._check_threepid('msisdn', authdict) - def _check_dummy_auth(self, authdict, _): + def _check_dummy_auth(self, authdict, **kwargs): return defer.succeed(True) - def _check_terms_auth(self, authdict, _): + def _check_terms_auth(self, authdict, **kwargs): return defer.succeed(True) @defer.inlineCallbacks - def _check_threepid(self, medium, authdict): + def _check_threepid(self, medium, authdict, password_servlet=False, **kwargs): if 'threepid_creds' not in authdict: raise LoginError(400, "Missing threepid_creds", Codes.MISSING_PARAM) @@ -451,7 +469,29 @@ class AuthHandler(BaseHandler): identity_handler = self.hs.get_handlers().identity_handler logger.info("Getting validated threepid. threepidcreds: %r", (threepid_creds,)) - threepid = yield identity_handler.threepid_from_creds(threepid_creds) + if ( + not password_servlet + or self.hs.config.email_password_reset_behaviour == "remote" + ): + threepid = yield identity_handler.threepid_from_creds(threepid_creds) + elif self.hs.config.email_password_reset_behaviour == "local": + row = yield self.store.get_threepid_validation_session( + medium, + threepid_creds["client_secret"], + sid=threepid_creds["sid"], + ) + + threepid = { + "medium": row["medium"], + "address": row["address"], + "validated_at": row["validated_at"], + } if row else None + + if row: + # Valid threepid returned, delete from the db + yield self.store.delete_threepid_session(threepid_creds["sid"]) + else: + raise SynapseError(400, "Password resets are not enabled on this homeserver") if not threepid: raise LoginError(401, "", errcode=Codes.UNAUTHORIZED) diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 22469486d7..04caf65793 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -247,7 +247,14 @@ class IdentityHandler(BaseHandler): defer.returnValue(changed) @defer.inlineCallbacks - def requestEmailToken(self, id_server, email, client_secret, send_attempt, **kwargs): + def requestEmailToken( + self, + id_server, + email, + client_secret, + send_attempt, + next_link=None, + ): if not self._should_trust_id_server(id_server): raise SynapseError( 400, "Untrusted ID server '%s'" % id_server, @@ -259,7 +266,9 @@ class IdentityHandler(BaseHandler): 'client_secret': client_secret, 'send_attempt': send_attempt, } - params.update(kwargs) + + if next_link: + params.update({'next_link': next_link}) try: data = yield self.http_client.post_json_get_json( diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index c269bcf4a4..4bc9eb7313 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -80,10 +80,10 @@ ALLOWED_ATTRS = { class Mailer(object): - def __init__(self, hs, app_name, notif_template_html, notif_template_text): + def __init__(self, hs, app_name, template_html, template_text): self.hs = hs - self.notif_template_html = notif_template_html - self.notif_template_text = notif_template_text + self.template_html = template_html + self.template_text = template_text self.sendmail = self.hs.get_sendmail() self.store = self.hs.get_datastore() @@ -93,22 +93,49 @@ class Mailer(object): logger.info("Created Mailer for app_name %s" % app_name) + @defer.inlineCallbacks + def send_password_reset_mail( + self, + email_address, + token, + client_secret, + sid, + ): + """Send an email with a password reset link to a user + + Args: + email_address (str): Email address we're sending the password + reset to + token (str): Unique token generated by the server to verify + password reset email was received + client_secret (str): Unique token generated by the client to + group together multiple email sending attempts + sid (str): The generated session ID + """ + if email.utils.parseaddr(email_address)[1] == '': + raise RuntimeError("Invalid 'to' email address") + + link = ( + self.hs.config.public_baseurl + + "_synapse/password_reset/email/submit_token" + "?token=%s&client_secret=%s&sid=%s" % + (token, client_secret, sid) + ) + + template_vars = { + "link": link, + } + + yield self.send_email( + email_address, + "[%s] Password Reset Email" % self.hs.config.server_name, + template_vars, + ) + @defer.inlineCallbacks def send_notification_mail(self, app_id, user_id, email_address, push_actions, reason): - try: - from_string = self.hs.config.email_notif_from % { - "app": self.app_name - } - except TypeError: - from_string = self.hs.config.email_notif_from - - raw_from = email.utils.parseaddr(from_string)[1] - raw_to = email.utils.parseaddr(email_address)[1] - - if raw_to == '': - raise RuntimeError("Invalid 'to' address") - + """Send email regarding a user's room notifications""" rooms_in_order = deduped_ordered_list( [pa['room_id'] for pa in push_actions] ) @@ -176,14 +203,36 @@ class Mailer(object): "reason": reason, } - html_text = self.notif_template_html.render(**template_vars) + yield self.send_email( + email_address, + "[%s] %s" % (self.app_name, summary_text), + template_vars, + ) + + @defer.inlineCallbacks + def send_email(self, email_address, subject, template_vars): + """Send an email with the given information and template text""" + try: + from_string = self.hs.config.email_notif_from % { + "app": self.app_name + } + except TypeError: + from_string = self.hs.config.email_notif_from + + raw_from = email.utils.parseaddr(from_string)[1] + raw_to = email.utils.parseaddr(email_address)[1] + + if raw_to == '': + raise RuntimeError("Invalid 'to' address") + + html_text = self.template_html.render(**template_vars) html_part = MIMEText(html_text, "html", "utf8") - plain_text = self.notif_template_text.render(**template_vars) + plain_text = self.template_text.render(**template_vars) text_part = MIMEText(plain_text, "plain", "utf8") multipart_msg = MIMEMultipart('alternative') - multipart_msg['Subject'] = "[%s] %s" % (self.app_name, summary_text) + multipart_msg['Subject'] = subject multipart_msg['From'] = from_string multipart_msg['To'] = email_address multipart_msg['Date'] = email.utils.formatdate() diff --git a/synapse/push/pusher.py b/synapse/push/pusher.py index 14bc7823cf..aff85daeb5 100644 --- a/synapse/push/pusher.py +++ b/synapse/push/pusher.py @@ -70,8 +70,8 @@ class PusherFactory(object): mailer = Mailer( hs=self.hs, app_name=app_name, - notif_template_html=self.notif_template_html, - notif_template_text=self.notif_template_text, + template_html=self.notif_template_html, + template_text=self.notif_template_text, ) self.mailers[app_name] = mailer return EmailPusher(self.hs, pusherdict, mailer) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index f64baa4d58..c78f2cb15e 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -77,7 +77,7 @@ REQUIREMENTS = [ ] CONDITIONAL_REQUIREMENTS = { - "email.enable_notifs": ["Jinja2>=2.9", "bleach>=1.4.2"], + "email": ["Jinja2>=2.9", "bleach>=1.4.2"], "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"], # we use execute_batch, which arrived in psycopg 2.7. diff --git a/synapse/res/templates/password_reset.html b/synapse/res/templates/password_reset.html new file mode 100644 index 0000000000..4fa7b36734 --- /dev/null +++ b/synapse/res/templates/password_reset.html @@ -0,0 +1,9 @@ + + +

A password reset request has been received for your Matrix account. If this was you, please click the link below to confirm resetting your password:

+ + {{ link }} + +

If this was not you, please disregard this email and contact your server administrator. Thank you.

+ + diff --git a/synapse/res/templates/password_reset.txt b/synapse/res/templates/password_reset.txt new file mode 100644 index 0000000000..f0deff59a7 --- /dev/null +++ b/synapse/res/templates/password_reset.txt @@ -0,0 +1,7 @@ +A password reset request has been received for your Matrix account. If this +was you, please click the link below to confirm resetting your password: + +{{ link }} + +If this was not you, please disregard this email and contact your server +administrator. Thank you. diff --git a/synapse/res/templates/password_reset_failure.html b/synapse/res/templates/password_reset_failure.html new file mode 100644 index 0000000000..0b132cf8db --- /dev/null +++ b/synapse/res/templates/password_reset_failure.html @@ -0,0 +1,6 @@ + + + +

{{ failure_reason }}. Your password has not been reset.

+ + diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html new file mode 100644 index 0000000000..7b6fa5e6f0 --- /dev/null +++ b/synapse/res/templates/password_reset_success.html @@ -0,0 +1,6 @@ + + + +

Your password was successfully reset. You may now close this window.

+ + diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index ca35dc3c83..e4c63b69b9 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -15,19 +15,25 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +import re from six.moves import http_client +import jinja2 + from twisted.internet import defer from synapse.api.constants import LoginType -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, SynapseError, ThreepidValidationError +from synapse.http.server import finish_request from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_json_object_from_request, + parse_string, ) from synapse.util.msisdn import phone_number_to_msisdn +from synapse.util.stringutils import random_string from synapse.util.threepids import check_3pid_allowed from ._base import client_patterns, interactive_auth_handler @@ -41,17 +47,42 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): def __init__(self, hs): super(EmailPasswordRequestTokenRestServlet, self).__init__() self.hs = hs + self.datastore = hs.get_datastore() + self.config = hs.config self.identity_handler = hs.get_handlers().identity_handler + if self.config.email_password_reset_behaviour == "local": + from synapse.push.mailer import Mailer, load_jinja2_templates + templates = load_jinja2_templates( + config=hs.config, + template_html_name=hs.config.email_password_reset_template_html, + template_text_name=hs.config.email_password_reset_template_text, + ) + self.mailer = Mailer( + hs=self.hs, + app_name=self.config.email_app_name, + template_html=templates[0], + template_text=templates[1], + ) + @defer.inlineCallbacks def on_POST(self, request): + if self.config.email_password_reset_behaviour == "off": + raise SynapseError(400, "Password resets have been disabled on this server") + body = parse_json_object_from_request(request) assert_params_in_dict(body, [ - 'id_server', 'client_secret', 'email', 'send_attempt' + 'client_secret', 'email', 'send_attempt' ]) - if not check_3pid_allowed(self.hs, "email", body['email']): + # Extract params from body + client_secret = body["client_secret"] + email = body["email"] + send_attempt = body["send_attempt"] + next_link = body.get("next_link") # Optional param + + if not check_3pid_allowed(self.hs, "email", email): raise SynapseError( 403, "Your email domain is not authorized on this server", @@ -59,15 +90,100 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): ) existingUid = yield self.hs.get_datastore().get_user_id_by_threepid( - 'email', body['email'] + 'email', email, ) if existingUid is None: raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) - ret = yield self.identity_handler.requestEmailToken(**body) + if self.config.email_password_reset_behaviour == "remote": + if 'id_server' not in body: + raise SynapseError(400, "Missing 'id_server' param in body") + + # Have the identity server handle the password reset flow + ret = yield self.identity_handler.requestEmailToken( + body["id_server"], email, client_secret, send_attempt, next_link, + ) + else: + # Send password reset emails from Synapse + sid = yield self.send_password_reset( + email, client_secret, send_attempt, next_link, + ) + + # Wrap the session id in a JSON object + ret = {"sid": sid} + defer.returnValue((200, ret)) + @defer.inlineCallbacks + def send_password_reset( + self, + email, + client_secret, + send_attempt, + next_link=None, + ): + """Send a password reset email + + Args: + email (str): The user's email address + client_secret (str): The provided client secret + send_attempt (int): Which send attempt this is + + Returns: + The new session_id upon success + + Raises: + SynapseError is an error occurred when sending the email + """ + # Check that this email/client_secret/send_attempt combo is new or + # greater than what we've seen previously + session = yield self.datastore.get_threepid_validation_session( + "email", client_secret, address=email, validated=False, + ) + + # Check to see if a session already exists and that it is not yet + # marked as validated + if session and session.get("validated_at") is None: + session_id = session['session_id'] + last_send_attempt = session['last_send_attempt'] + + # Check that the send_attempt is higher than previous attempts + if send_attempt <= last_send_attempt: + # If not, just return a success without sending an email + defer.returnValue(session_id) + else: + # An non-validated session does not exist yet. + # Generate a session id + session_id = random_string(16) + + # Generate a new validation token + token = random_string(32) + + # Send the mail with the link containing the token, client_secret + # and session_id + try: + yield self.mailer.send_password_reset_mail( + email, token, client_secret, session_id, + ) + except Exception: + logger.exception( + "Error sending a password reset email to %s", email, + ) + raise SynapseError( + 500, "An error was encountered when sending the password reset email" + ) + + token_expires = (self.hs.clock.time_msec() + + self.config.email_validation_token_lifetime) + + yield self.datastore.start_or_continue_validation_session( + "email", email, session_id, client_secret, send_attempt, + next_link, token, token_expires, + ) + + defer.returnValue(session_id) + class MsisdnPasswordRequestTokenRestServlet(RestServlet): PATTERNS = client_patterns("/account/password/msisdn/requestToken$") @@ -80,6 +196,9 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): + if not self.config.email_password_reset_behaviour == "off": + raise SynapseError(400, "Password resets have been disabled on this server") + body = parse_json_object_from_request(request) assert_params_in_dict(body, [ @@ -107,6 +226,118 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): defer.returnValue((200, ret)) +class PasswordResetSubmitTokenServlet(RestServlet): + """Handles 3PID validation token submission""" + PATTERNS = [ + re.compile("^/_synapse/password_reset/(?P[^/]*)/submit_token/*$"), + ] + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + super(PasswordResetSubmitTokenServlet, self).__init__() + self.hs = hs + self.auth = hs.get_auth() + self.config = hs.config + self.clock = hs.get_clock() + self.datastore = hs.get_datastore() + + @defer.inlineCallbacks + def on_GET(self, request, medium): + if medium != "email": + raise SynapseError( + 400, + "This medium is currently not supported for password resets", + ) + + sid = parse_string(request, "sid") + client_secret = parse_string(request, "client_secret") + token = parse_string(request, "token") + + # Attempt to validate a 3PID sesssion + try: + # Mark the session as valid + next_link = yield self.datastore.validate_threepid_session( + sid, + client_secret, + token, + self.clock.time_msec(), + ) + + # Perform a 302 redirect if next_link is set + if next_link: + if next_link.startswith("file:///"): + logger.warn( + "Not redirecting to next_link as it is a local file: address" + ) + else: + request.setResponseCode(302) + request.setHeader("Location", next_link) + finish_request(request) + defer.returnValue(None) + + # Otherwise show the success template + html = self.config.email_password_reset_success_html_content + request.setResponseCode(200) + except ThreepidValidationError as e: + # Show a failure page with a reason + html = self.load_jinja2_template( + self.config.email_template_dir, + self.config.email_password_reset_failure_template, + template_vars={ + "failure_reason": e.msg, + } + ) + request.setResponseCode(e.code) + + request.write(html.encode('utf-8')) + finish_request(request) + defer.returnValue(None) + + def load_jinja2_template(self, template_dir, template_filename, template_vars): + """Loads a jinja2 template with variables to insert + + Args: + template_dir (str): The directory where templates are stored + template_filename (str): The name of the template in the template_dir + template_vars (Dict): Dictionary of keys in the template + alongside their values to insert + + Returns: + str containing the contents of the rendered template + """ + loader = jinja2.FileSystemLoader(template_dir) + env = jinja2.Environment(loader=loader) + + template = env.get_template(template_filename) + return template.render(**template_vars) + + @defer.inlineCallbacks + def on_POST(self, request, medium): + if medium != "email": + raise SynapseError( + 400, + "This medium is currently not supported for password resets", + ) + + body = parse_json_object_from_request(request) + assert_params_in_dict(body, [ + 'sid', 'client_secret', 'token', + ]) + + valid, _ = yield self.datastore.validate_threepid_validation_token( + body['sid'], + body['client_secret'], + body['token'], + self.clock.time_msec(), + ) + response_code = 200 if valid else 400 + + defer.returnValue((response_code, {"success": valid})) + + class PasswordRestServlet(RestServlet): PATTERNS = client_patterns("/account/password$") @@ -144,6 +375,7 @@ class PasswordRestServlet(RestServlet): result, params, _ = yield self.auth_handler.check_auth( [[LoginType.EMAIL_IDENTITY], [LoginType.MSISDN]], body, self.hs.get_ip_from_request(request), + password_servlet=True, ) if LoginType.EMAIL_IDENTITY in result: @@ -417,6 +649,7 @@ class WhoamiRestServlet(RestServlet): def register_servlets(hs, http_server): EmailPasswordRequestTokenRestServlet(hs).register(http_server) MsisdnPasswordRequestTokenRestServlet(hs).register(http_server) + PasswordResetSubmitTokenServlet(hs).register(http_server) PasswordRestServlet(hs).register(http_server) DeactivateAccountRestServlet(hs).register(http_server) EmailThreepidRequestTokenRestServlet(hs).register(http_server) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 52891bb9eb..ae891aa332 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -588,6 +588,10 @@ class SQLBaseStore(object): Args: table : string giving the table name values : dict of new column names and values for them + or_ignore : bool stating whether an exception should be raised + when a conflicting row already exists. If True, False will be + returned by the function instead + desc : string giving a description of the transaction Returns: bool: Whether the row was inserted or not. Only useful when @@ -1228,8 +1232,8 @@ class SQLBaseStore(object): ) txn.execute(select_sql, list(keyvalues.values())) - row = txn.fetchone() + if not row: if allow_none: return None diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index c1711bc8bd..23a4baa484 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -25,7 +25,7 @@ logger = logging.getLogger(__name__) # Remember to update this number every time a change is made to database # schema files, so the users will be informed on server restarts. -SCHEMA_VERSION = 54 +SCHEMA_VERSION = 55 dir_path = os.path.abspath(os.path.dirname(__file__)) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 4cf159ba81..9b41cbd757 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -17,17 +17,20 @@ import re +from six import iterkeys from six.moves import range from twisted.internet import defer from synapse.api.constants import UserTypes -from synapse.api.errors import Codes, StoreError +from synapse.api.errors import Codes, StoreError, ThreepidValidationError from synapse.storage import background_updates from synapse.storage._base import SQLBaseStore from synapse.types import UserID from synapse.util.caches.descriptors import cached, cachedInlineCallbacks +THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 + class RegistrationWorkerStore(SQLBaseStore): def __init__(self, db_conn, hs): @@ -422,7 +425,7 @@ class RegistrationWorkerStore(SQLBaseStore): defer.returnValue(None) @defer.inlineCallbacks - def get_user_id_by_threepid(self, medium, address): + def get_user_id_by_threepid(self, medium, address, require_verified=False): """Returns user id from threepid Args: @@ -595,6 +598,11 @@ class RegistrationStore( "user_threepids_grandfather", self._bg_user_threepids_grandfather, ) + # Create a background job for culling expired 3PID validity tokens + hs.get_clock().looping_call( + self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS, + ) + @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id=None): """Adds an access token for the given user. @@ -963,7 +971,6 @@ class RegistrationStore( We do this by grandfathering in existing user threepids assuming that they used one of the server configured trusted identity servers. """ - id_servers = set(self.config.trusted_third_party_id_servers) def _bg_user_threepids_grandfather_txn(txn): @@ -984,3 +991,280 @@ class RegistrationStore( yield self._end_background_update("user_threepids_grandfather") defer.returnValue(1) + + def get_threepid_validation_session( + self, + medium, + client_secret, + address=None, + sid=None, + validated=None, + ): + """Gets a session_id and last_send_attempt (if available) for a + client_secret/medium/(address|session_id) combo + + Args: + medium (str|None): The medium of the 3PID + address (str|None): The address of the 3PID + sid (str|None): The ID of the validation session + client_secret (str|None): A unique string provided by the client to + help identify this validation attempt + validated (bool|None): Whether sessions should be filtered by + whether they have been validated already or not. None to + perform no filtering + + Returns: + deferred {str, int}|None: A dict containing the + latest session_id and send_attempt count for this 3PID. + Otherwise None if there hasn't been a previous attempt + """ + keyvalues = { + "medium": medium, + "client_secret": client_secret, + } + if address: + keyvalues["address"] = address + if sid: + keyvalues["session_id"] = sid + + assert(address or sid) + + def get_threepid_validation_session_txn(txn): + sql = """ + SELECT address, session_id, medium, client_secret, + last_send_attempt, validated_at + FROM threepid_validation_session WHERE %s + """ % (" AND ".join("%s = ?" % k for k in iterkeys(keyvalues)),) + + if validated is not None: + sql += " AND validated_at IS " + ("NOT NULL" if validated else "NULL") + + sql += " LIMIT 1" + + txn.execute(sql, list(keyvalues.values())) + rows = self.cursor_to_dict(txn) + if not rows: + return None + + return rows[0] + + return self.runInteraction( + "get_threepid_validation_session", + get_threepid_validation_session_txn, + ) + + def validate_threepid_session( + self, + session_id, + client_secret, + token, + current_ts, + ): + """Attempt to validate a threepid session using a token + + Args: + session_id (str): The id of a validation session + client_secret (str): A unique string provided by the client to + help identify this validation attempt + token (str): A validation token + current_ts (int): The current unix time in milliseconds. Used for + checking token expiry status + + Returns: + deferred str|None: A str representing a link to redirect the user + to if there is one. + """ + # Insert everything into a transaction in order to run atomically + def validate_threepid_session_txn(txn): + row = self._simple_select_one_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + retcols=["client_secret", "validated_at"], + allow_none=True, + ) + + if not row: + raise ThreepidValidationError(400, "Unknown session_id") + retrieved_client_secret = row["client_secret"] + validated_at = row["validated_at"] + + if retrieved_client_secret != client_secret: + raise ThreepidValidationError( + 400, "This client_secret does not match the provided session_id", + ) + + row = self._simple_select_one_txn( + txn, + table="threepid_validation_token", + keyvalues={"session_id": session_id, "token": token}, + retcols=["expires", "next_link"], + allow_none=True, + ) + + if not row: + raise ThreepidValidationError( + 400, "Validation token not found or has expired", + ) + expires = row["expires"] + next_link = row["next_link"] + + # If the session is already validated, no need to revalidate + if validated_at: + return next_link + + if expires <= current_ts: + raise ThreepidValidationError( + 400, "This token has expired. Please request a new one", + ) + + # Looks good. Validate the session + self._simple_update_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + updatevalues={"validated_at": self.clock.time_msec()}, + ) + + return next_link + + # Return next_link if it exists + return self.runInteraction( + "validate_threepid_session_txn", + validate_threepid_session_txn, + ) + + def upsert_threepid_validation_session( + self, + medium, + address, + client_secret, + send_attempt, + session_id, + validated_at=None, + ): + """Upsert a threepid validation session + Args: + medium (str): The medium of the 3PID + address (str): The address of the 3PID + client_secret (str): A unique string provided by the client to + help identify this validation attempt + send_attempt (int): The latest send_attempt on this session + session_id (str): The id of this validation session + validated_at (int|None): The unix timestamp in milliseconds of + when the session was marked as valid + """ + insertion_values = { + "medium": medium, + "address": address, + "client_secret": client_secret, + } + + if validated_at: + insertion_values["validated_at"] = validated_at + + return self._simple_upsert( + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + values={"last_send_attempt": send_attempt}, + insertion_values=insertion_values, + desc="upsert_threepid_validation_session", + ) + + def start_or_continue_validation_session( + self, + medium, + address, + session_id, + client_secret, + send_attempt, + next_link, + token, + token_expires, + ): + """Creates a new threepid validation session if it does not already + exist and associates a new validation token with it + + Args: + medium (str): The medium of the 3PID + address (str): The address of the 3PID + session_id (str): The id of this validation session + client_secret (str): A unique string provided by the client to + help identify this validation attempt + send_attempt (int): The latest send_attempt on this session + next_link (str|None): The link to redirect the user to upon + successful validation + token (str): The validation token + token_expires (int): The timestamp for which after the token + will no longer be valid + """ + def start_or_continue_validation_session_txn(txn): + # Create or update a validation session + self._simple_upsert_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + values={"last_send_attempt": send_attempt}, + insertion_values={ + "medium": medium, + "address": address, + "client_secret": client_secret, + }, + ) + + # Create a new validation token with this session ID + self._simple_insert_txn( + txn, + table="threepid_validation_token", + values={ + "session_id": session_id, + "token": token, + "next_link": next_link, + "expires": token_expires, + }, + ) + + return self.runInteraction( + "start_or_continue_validation_session", + start_or_continue_validation_session_txn, + ) + + def cull_expired_threepid_validation_tokens(self): + """Remove threepid validation tokens with expiry dates that have passed""" + def cull_expired_threepid_validation_tokens_txn(txn, ts): + sql = """ + DELETE FROM threepid_validation_token WHERE + expires < ? + """ + return txn.execute(sql, (ts,)) + + return self.runInteraction( + "cull_expired_threepid_validation_tokens", + cull_expired_threepid_validation_tokens_txn, + self.clock.time_msec(), + ) + + def delete_threepid_session(self, session_id): + """Removes a threepid validation session from the database. This can + be done after validation has been performed and whatever action was + waiting on it has been carried out + + Args: + session_id (str): The ID of the session to delete + """ + def delete_threepid_session_txn(txn): + self._simple_delete_txn( + txn, + table="threepid_validation_token", + keyvalues={"session_id": session_id}, + ) + self._simple_delete_txn( + txn, + table="threepid_validation_session", + keyvalues={"session_id": session_id}, + ) + + return self.runInteraction( + "delete_threepid_session", + delete_threepid_session_txn, + ) diff --git a/synapse/storage/schema/delta/55/track_threepid_validations.sql b/synapse/storage/schema/delta/55/track_threepid_validations.sql new file mode 100644 index 0000000000..a8eced2e0a --- /dev/null +++ b/synapse/storage/schema/delta/55/track_threepid_validations.sql @@ -0,0 +1,31 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +CREATE TABLE IF NOT EXISTS threepid_validation_session ( + session_id TEXT PRIMARY KEY, + medium TEXT NOT NULL, + address TEXT NOT NULL, + client_secret TEXT NOT NULL, + last_send_attempt BIGINT NOT NULL, + validated_at BIGINT +); + +CREATE TABLE IF NOT EXISTS threepid_validation_token ( + token TEXT PRIMARY KEY, + session_id TEXT NOT NULL, + next_link TEXT, + expires BIGINT NOT NULL +); + +CREATE INDEX threepid_validation_token_session_id ON threepid_validation_token(session_id); diff --git a/tests/utils.py b/tests/utils.py index 200c1ceabe..b2817cf22c 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -131,7 +131,6 @@ def default_config(name, parse=False): "password_providers": [], "worker_replication_url": "", "worker_app": None, - "email_enable_notifs": False, "block_non_admin_invites": False, "federation_domain_whitelist": None, "filter_timeline_limit": 5000, From ed872db8df61f5ee019bdfdb68e1e83e9e2b7298 Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 7 Jun 2019 02:53:47 +1000 Subject: [PATCH 351/429] fix maybe --- .../full_schemas/54/stream_positions.sql | 22 ++----------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/synapse/storage/schema/full_schemas/54/stream_positions.sql b/synapse/storage/schema/full_schemas/54/stream_positions.sql index 575ab6b354..c265fd20e2 100644 --- a/synapse/storage/schema/full_schemas/54/stream_positions.sql +++ b/synapse/storage/schema/full_schemas/54/stream_positions.sql @@ -2,24 +2,6 @@ INSERT INTO appservice_stream_position (stream_ordering) SELECT COALESCE(MAX(stream_ordering), 0) FROM events; INSERT INTO federation_stream_position (type, stream_id) VALUES ('federation', -1); INSERT INTO federation_stream_position (type, stream_id) SELECT 'events', coalesce(max(stream_ordering), -1) FROM events; -INSERT INTO user_directory_stream_pos (stream_id) VALUES (null); -INSERT INTO stats_stream_pos (stream_id) VALUES (null); +INSERT INTO user_directory_stream_pos (stream_id) VALUES (0); +INSERT INTO stats_stream_pos (stream_id) VALUES (0); INSERT INTO event_push_summary_stream_ordering (stream_ordering) VALUES (0); - ---- User dir population - --- Set up staging tables -INSERT INTO background_updates (update_name, progress_json) VALUES - ('populate_user_directory_createtables', '{}'); - --- Run through each room and update the user directory according to who is in it -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_rooms', '{}', 'populate_user_directory_createtables'); - --- Insert all users, if search_all_users is on -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_process_users', '{}', 'populate_user_directory_process_rooms'); - --- Clean up staging tables -INSERT INTO background_updates (update_name, progress_json, depends_on) VALUES - ('populate_user_directory_cleanup', '{}', 'populate_user_directory_process_users'); From 8acde3dc4749be714beb79a61298f430d7c4e701 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Jun 2019 18:00:06 +0100 Subject: [PATCH 352/429] remove bloat --- INSTALL.md | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index d3a450f40f..a1ff91a98e 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -406,20 +406,11 @@ this is required to support the 'password reset' feature. To configure an SMTP server for Synapse, modify the configuration section headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port`` and ``notif_from`` fields filled out. You may also need to set ``smtp_user``, -``smtp_pass``, and ``require_transport_security``.. +``smtp_pass``, and ``require_transport_security``. If Synapse is not configured with an SMTP server, password reset via email will be disabled by default. -Alternatively it is possible delegate the sending of email to the server's -identity server. Doing so is convenient but not recommended, since a malicious -or compromised identity server could theoretically hijack a given user's -account by redirecting mail. - -If you are absolutely certain that you wish to use the server's identity server -for password resets, set ``trust_identity_server_for_password_resets`` to -``true`` under the ``email:`` configuration section. - ## Registering a user You will need at least one user on your server in order to use a Matrix From a11865016e4fc8f691ce94ec25e8f40290df8329 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 6 Jun 2019 20:13:47 +0100 Subject: [PATCH 353/429] Set default room version to v4. (#5379) Set default room version to v4. --- changelog.d/5379.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- tests/storage/test_cleanup_extrems.py | 6 ++++++ tests/utils.py | 3 ++- 5 files changed, 11 insertions(+), 3 deletions(-) create mode 100644 changelog.d/5379.feature diff --git a/changelog.d/5379.feature b/changelog.d/5379.feature new file mode 100644 index 0000000000..7b64786fe6 --- /dev/null +++ b/changelog.d/5379.feature @@ -0,0 +1 @@ +Set default room version to v4. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index ea73306fb9..4d7e6f3eb5 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -91,7 +91,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "1" +#default_room_version: "4" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index 334921d421..7d56e2d141 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -36,7 +36,7 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ['::', '0.0.0.0'] -DEFAULT_ROOM_VERSION = "1" +DEFAULT_ROOM_VERSION = "4" class ServerConfig(Config): diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 6dda66ecd3..6aa8b8b3c6 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -25,6 +25,11 @@ from tests.unittest import HomeserverTestCase class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): """Test the background update to clean forward extremities table. """ + def make_homeserver(self, reactor, clock): + # Hack until we understand why test_forked_graph_cleanup fails with v4 + config = self.default_config() + config['default_room_version'] = '1' + return self.setup_test_homeserver(config=config) def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() @@ -220,6 +225,7 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): Where SF* are soft failed, and with them A, B and C marked as extremities. This should resolve to B and C being marked as extremity. """ + # Create the room graph event_id_a = self.create_and_send_event() event_id_b = self.create_and_send_event() diff --git a/tests/utils.py b/tests/utils.py index b2817cf22c..f8c7ad2604 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -31,6 +31,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import CodeMessageException, cs_error from synapse.api.room_versions import RoomVersions from synapse.config.homeserver import HomeServerConfig +from synapse.config.server import DEFAULT_ROOM_VERSION from synapse.federation.transport import server as federation_server from synapse.http.server import HttpServer from synapse.server import HomeServer @@ -173,7 +174,7 @@ def default_config(name, parse=False): "use_frozen_dicts": False, # We need a sane default_room_version, otherwise attempts to create # rooms will fail. - "default_room_version": "1", + "default_room_version": DEFAULT_ROOM_VERSION, # disable user directory updates, because they get done in the # background, which upsets the test runner. "update_user_directory": False, From 2d1d7b7e6f2bec3b96b0d23993369ce46aad4f32 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 6 Jun 2019 23:54:00 +0100 Subject: [PATCH 354/429] Prevent multiple device list updates from breaking a batch send (#5156) fixes #5153 --- changelog.d/5156.bugfix | 1 + .../sender/per_destination_queue.py | 5 +- synapse/storage/devices.py | 156 ++++++++++++++---- tests/storage/test_devices.py | 69 ++++++++ 4 files changed, 198 insertions(+), 33 deletions(-) create mode 100644 changelog.d/5156.bugfix diff --git a/changelog.d/5156.bugfix b/changelog.d/5156.bugfix new file mode 100644 index 0000000000..e8aa7d8241 --- /dev/null +++ b/changelog.d/5156.bugfix @@ -0,0 +1 @@ +Prevent federation device list updates breaking when processing multiple updates at once. \ No newline at end of file diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index fae8bea392..564c57203d 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -349,9 +349,10 @@ class PerDestinationQueue(object): @defer.inlineCallbacks def _get_new_device_messages(self, limit): last_device_list = self._last_device_list_stream_id - # Will return at most 20 entries + + # Retrieve list of new device updates to send to the destination now_stream_id, results = yield self._store.get_devices_by_remote( - self._destination, last_device_list + self._destination, last_device_list, limit=limit, ) edus = [ Edu( diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index fd869b934c..d102e07372 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -14,7 +14,7 @@ # limitations under the License. import logging -from six import iteritems, itervalues +from six import iteritems from canonicaljson import json @@ -72,11 +72,14 @@ class DeviceWorkerStore(SQLBaseStore): defer.returnValue({d["device_id"]: d for d in devices}) - def get_devices_by_remote(self, destination, from_stream_id): + @defer.inlineCallbacks + def get_devices_by_remote(self, destination, from_stream_id, limit): """Get stream of updates to send to remote servers Returns: - (int, list[dict]): current stream id and list of updates + Deferred[tuple[int, list[dict]]]: + current stream id (ie, the stream id of the last update included in the + response), and the list of updates """ now_stream_id = self._device_list_id_gen.get_current_token() @@ -84,55 +87,131 @@ class DeviceWorkerStore(SQLBaseStore): destination, int(from_stream_id) ) if not has_changed: - return (now_stream_id, []) + defer.returnValue((now_stream_id, [])) - return self.runInteraction( + # We retrieve n+1 devices from the list of outbound pokes where n is + # our outbound device update limit. We then check if the very last + # device has the same stream_id as the second-to-last device. If so, + # then we ignore all devices with that stream_id and only send the + # devices with a lower stream_id. + # + # If when culling the list we end up with no devices afterwards, we + # consider the device update to be too large, and simply skip the + # stream_id; the rationale being that such a large device list update + # is likely an error. + updates = yield self.runInteraction( "get_devices_by_remote", self._get_devices_by_remote_txn, destination, from_stream_id, now_stream_id, + limit + 1, ) - def _get_devices_by_remote_txn( - self, txn, destination, from_stream_id, now_stream_id - ): - sql = """ - SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes - WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? - GROUP BY user_id, device_id - LIMIT 20 - """ - txn.execute(sql, (destination, from_stream_id, now_stream_id, False)) + # Return an empty list if there are no updates + if not updates: + defer.returnValue((now_stream_id, [])) + # if we have exceeded the limit, we need to exclude any results with the + # same stream_id as the last row. + if len(updates) > limit: + stream_id_cutoff = updates[-1][2] + now_stream_id = stream_id_cutoff - 1 + else: + stream_id_cutoff = None + + # Perform the equivalent of a GROUP BY + # + # Iterate through the updates list and copy non-duplicate + # (user_id, device_id) entries into a map, with the value being + # the max stream_id across each set of duplicate entries + # # maps (user_id, device_id) -> stream_id - query_map = {(r[0], r[1]): r[2] for r in txn} + # as long as their stream_id does not match that of the last row + query_map = {} + for update in updates: + if stream_id_cutoff is not None and update[2] >= stream_id_cutoff: + # Stop processing updates + break + + key = (update[0], update[1]) + query_map[key] = max(query_map.get(key, 0), update[2]) + + # If we didn't find any updates with a stream_id lower than the cutoff, it + # means that there are more than limit updates all of which have the same + # steam_id. + + # That should only happen if a client is spamming the server with new + # devices, in which case E2E isn't going to work well anyway. We'll just + # skip that stream_id and return an empty list, and continue with the next + # stream_id next time. if not query_map: - return (now_stream_id, []) + defer.returnValue((stream_id_cutoff, [])) - if len(query_map) >= 20: - now_stream_id = max(stream_id for stream_id in itervalues(query_map)) + results = yield self._get_device_update_edus_by_remote( + destination, + from_stream_id, + query_map, + ) - devices = self._get_e2e_device_keys_txn( - txn, + defer.returnValue((now_stream_id, results)) + + def _get_devices_by_remote_txn( + self, txn, destination, from_stream_id, now_stream_id, limit + ): + """Return device update information for a given remote destination + + Args: + txn (LoggingTransaction): The transaction to execute + destination (str): The host the device updates are intended for + from_stream_id (int): The minimum stream_id to filter updates by, exclusive + now_stream_id (int): The maximum stream_id to filter updates by, inclusive + limit (int): Maximum number of device updates to return + + Returns: + List: List of device updates + """ + sql = """ + SELECT user_id, device_id, stream_id FROM device_lists_outbound_pokes + WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? + ORDER BY stream_id + LIMIT ? + """ + txn.execute(sql, (destination, from_stream_id, now_stream_id, False, limit)) + + return list(txn) + + @defer.inlineCallbacks + def _get_device_update_edus_by_remote( + self, destination, from_stream_id, query_map, + ): + """Returns a list of device update EDUs as well as E2EE keys + + Args: + destination (str): The host the device updates are intended for + from_stream_id (int): The minimum stream_id to filter updates by, exclusive + query_map (Dict[(str, str): int]): Dictionary mapping + user_id/device_id to update stream_id + + Returns: + List[Dict]: List of objects representing an device update EDU + + """ + devices = yield self.runInteraction( + "_get_e2e_device_keys_txn", + self._get_e2e_device_keys_txn, query_map.keys(), include_all_devices=True, include_deleted_devices=True, ) - prev_sent_id_sql = """ - SELECT coalesce(max(stream_id), 0) as stream_id - FROM device_lists_outbound_last_success - WHERE destination = ? AND user_id = ? AND stream_id <= ? - """ - results = [] for user_id, user_devices in iteritems(devices): # The prev_id for the first row is always the last row before # `from_stream_id` - txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id)) - rows = txn.fetchall() - prev_id = rows[0][0] + prev_id = yield self._get_last_device_update_for_remote_user( + destination, user_id, from_stream_id, + ) for device_id, device in iteritems(user_devices): stream_id = query_map[(user_id, device_id)] result = { @@ -156,7 +235,22 @@ class DeviceWorkerStore(SQLBaseStore): results.append(result) - return (now_stream_id, results) + defer.returnValue(results) + + def _get_last_device_update_for_remote_user( + self, destination, user_id, from_stream_id, + ): + def f(txn): + prev_sent_id_sql = """ + SELECT coalesce(max(stream_id), 0) as stream_id + FROM device_lists_outbound_last_success + WHERE destination = ? AND user_id = ? AND stream_id <= ? + """ + txn.execute(prev_sent_id_sql, (destination, user_id, from_stream_id)) + rows = txn.fetchall() + return rows[0][0] + + return self.runInteraction("get_last_device_update_for_remote_user", f) def mark_as_sent_devices_by_remote(self, destination, stream_id): """Mark that updates have successfully been sent to the destination. diff --git a/tests/storage/test_devices.py b/tests/storage/test_devices.py index aef4dfaf57..6396ccddb5 100644 --- a/tests/storage/test_devices.py +++ b/tests/storage/test_devices.py @@ -71,6 +71,75 @@ class DeviceStoreTestCase(tests.unittest.TestCase): res["device2"], ) + @defer.inlineCallbacks + def test_get_devices_by_remote(self): + device_ids = ["device_id1", "device_id2"] + + # Add two device updates with a single stream_id + yield self.store.add_device_change_to_streams( + "user_id", device_ids, ["somehost"], + ) + + # Get all device updates ever meant for this remote + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "somehost", -1, limit=100, + ) + + # Check original device_ids are contained within these updates + self._check_devices_in_updates(device_ids, device_updates) + + @defer.inlineCallbacks + def test_get_devices_by_remote_limited(self): + # Test breaking the update limit in 1, 101, and 1 device_id segments + + # first add one device + device_ids1 = ["device_id0"] + yield self.store.add_device_change_to_streams( + "user_id", device_ids1, ["someotherhost"], + ) + + # then add 101 + device_ids2 = ["device_id" + str(i + 1) for i in range(101)] + yield self.store.add_device_change_to_streams( + "user_id", device_ids2, ["someotherhost"], + ) + + # then one more + device_ids3 = ["newdevice"] + yield self.store.add_device_change_to_streams( + "user_id", device_ids3, ["someotherhost"], + ) + + # + # now read them back. + # + + # first we should get a single update + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "someotherhost", -1, limit=100, + ) + self._check_devices_in_updates(device_ids1, device_updates) + + # Then we should get an empty list back as the 101 devices broke the limit + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "someotherhost", now_stream_id, limit=100, + ) + self.assertEqual(len(device_updates), 0) + + # The 101 devices should've been cleared, so we should now just get one device + # update + now_stream_id, device_updates = yield self.store.get_devices_by_remote( + "someotherhost", now_stream_id, limit=100, + ) + self._check_devices_in_updates(device_ids3, device_updates) + + def _check_devices_in_updates(self, expected_device_ids, device_updates): + """Check that an specific device ids exist in a list of device update EDUs""" + self.assertEqual(len(device_updates), len(expected_device_ids)) + + received_device_ids = {update["device_id"] for update in device_updates} + self.assertEqual(received_device_ids, set(expected_device_ids)) + @defer.inlineCallbacks def test_update_device(self): yield self.store.store_device("user_id", "device_id", "display_name 1") From 4f581faa98b162b9949030a167e9a71f81e5e915 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 7 Jun 2019 00:20:17 +0100 Subject: [PATCH 355/429] Automatically retry builds when a buildkite agent is lost (#5380) Sometimes the build agents get lost or die (error codes -1 and 2). Retry automatically a maximum of 2 times if this happens. Error code reference: * -1: Agent was lost * 0: Build successful * 1: There was an error in your code * 2: The build stopped abruptly * 255: The build was cancelled --- .buildkite/pipeline.yml | 66 +++++++++++++++++++++++++++++++++++++++++ changelog.d/5380.misc | 1 + 2 files changed, 67 insertions(+) create mode 100644 changelog.d/5380.misc diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index b805b2d839..719f22b4e1 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -56,6 +56,12 @@ steps: - docker#v3.0.1: image: "python:2.7" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -67,6 +73,12 @@ steps: - docker#v3.0.1: image: "python:3.5" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -78,6 +90,12 @@ steps: - docker#v3.0.1: image: "python:3.6" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -89,6 +107,12 @@ steps: - docker#v3.0.1: image: "python:3.7" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - command: - "python -m pip install tox" @@ -100,6 +124,12 @@ steps: - docker#v3.0.1: image: "python:2.7" propagate-environment: true + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 2.7 / :postgres: 9.4" env: @@ -111,6 +141,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py27.pg94.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 2.7 / :postgres: 9.5" env: @@ -122,6 +158,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py27.pg95.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.5 / :postgres: 9.4" env: @@ -133,6 +175,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py35.pg94.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.5 / :postgres: 9.5" env: @@ -144,6 +192,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py35.pg95.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.7 / :postgres: 9.5" env: @@ -155,6 +209,12 @@ steps: run: testenv config: - .buildkite/docker-compose.py37.pg95.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 - label: ":python: 3.7 / :postgres: 11" env: @@ -166,3 +226,9 @@ steps: run: testenv config: - .buildkite/docker-compose.py37.pg11.yaml + retry: + automatic: + - exit_status: -1 + limit: 2 + - exit_status: 2 + limit: 2 diff --git a/changelog.d/5380.misc b/changelog.d/5380.misc new file mode 100644 index 0000000000..099bba414c --- /dev/null +++ b/changelog.d/5380.misc @@ -0,0 +1 @@ +Automatically retry buildkite builds (max twice) when an agent is lost. From 7c455a86bc6827687ec1c232a8c1621e528a6d78 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Fri, 7 Jun 2019 10:29:32 +0100 Subject: [PATCH 356/429] 1.0.0rc1 --- CHANGES.md | 83 ++++++++++++++++++++++++++++++++++++++++ changelog.d/4338.feature | 1 - changelog.d/5089.bugfix | 1 - changelog.d/5156.bugfix | 1 - changelog.d/5200.bugfix | 1 - changelog.d/5216.misc | 1 - changelog.d/5220.feature | 1 - changelog.d/5221.bugfix | 1 - changelog.d/5223.feature | 1 - changelog.d/5226.misc | 1 - changelog.d/5227.misc | 1 - changelog.d/5230.misc | 1 - changelog.d/5232.misc | 1 - changelog.d/5233.bugfix | 1 - changelog.d/5234.misc | 1 - changelog.d/5235.misc | 1 - changelog.d/5236.misc | 1 - changelog.d/5237.misc | 1 - changelog.d/5244.misc | 1 - changelog.d/5249.feature | 1 - changelog.d/5250.misc | 1 - changelog.d/5251.bugfix | 1 - changelog.d/5256.bugfix | 1 - changelog.d/5257.bugfix | 1 - changelog.d/5258.bugfix | 1 - changelog.d/5260.feature | 1 - changelog.d/5268.bugfix | 1 - changelog.d/5274.bugfix | 1 - changelog.d/5275.bugfix | 1 - changelog.d/5276.feature | 1 - changelog.d/5277.bugfix | 1 - changelog.d/5278.bugfix | 1 - changelog.d/5282.doc | 1 - changelog.d/5283.misc | 1 - changelog.d/5284.misc | 1 - changelog.d/5286.feature | 1 - changelog.d/5287.misc | 1 - changelog.d/5288.misc | 1 - changelog.d/5291.bugfix | 1 - changelog.d/5293.bugfix | 1 - changelog.d/5294.bugfix | 1 - changelog.d/5296.misc | 1 - changelog.d/5299.misc | 1 - changelog.d/5300.bugfix | 1 - changelog.d/5303.misc | 1 - changelog.d/5307.bugfix | 1 - changelog.d/5309.bugfix | 1 - changelog.d/5317.bugfix | 1 - changelog.d/5320.misc | 1 - changelog.d/5321.bugfix | 1 - changelog.d/5324.feature | 1 - changelog.d/5328.misc | 1 - changelog.d/5332.misc | 1 - changelog.d/5333.bugfix | 1 - changelog.d/5334.bugfix | 1 - changelog.d/5335.bugfix | 1 - changelog.d/5340.bugfix | 2 - changelog.d/5341.bugfix | 1 - changelog.d/5342.bugfix | 1 - changelog.d/5343.misc | 1 - changelog.d/5344.misc | 1 - changelog.d/5347.misc | 1 - changelog.d/5348.bugfix | 1 - changelog.d/5352.bugfix | 1 - changelog.d/5353.misc | 2 - changelog.d/5354.bugfix | 2 - changelog.d/5355.bugfix | 1 - changelog.d/5356.misc | 1 - changelog.d/5357.doc | 1 - changelog.d/5359.feature | 1 - changelog.d/5360.feature | 1 - changelog.d/5361.feature | 1 - changelog.d/5362.bugfix | 1 - changelog.d/5369.bugfix | 1 - changelog.d/5370.misc | 1 - changelog.d/5371.feature | 1 - changelog.d/5374.feature | 1 - changelog.d/5377.feature | 1 - changelog.d/5379.feature | 1 - changelog.d/5380.misc | 1 - synapse/__init__.py | 2 +- 81 files changed, 84 insertions(+), 83 deletions(-) delete mode 100644 changelog.d/4338.feature delete mode 100644 changelog.d/5089.bugfix delete mode 100644 changelog.d/5156.bugfix delete mode 100644 changelog.d/5200.bugfix delete mode 100644 changelog.d/5216.misc delete mode 100644 changelog.d/5220.feature delete mode 100644 changelog.d/5221.bugfix delete mode 100644 changelog.d/5223.feature delete mode 100644 changelog.d/5226.misc delete mode 100644 changelog.d/5227.misc delete mode 100644 changelog.d/5230.misc delete mode 100644 changelog.d/5232.misc delete mode 100644 changelog.d/5233.bugfix delete mode 100644 changelog.d/5234.misc delete mode 100644 changelog.d/5235.misc delete mode 100644 changelog.d/5236.misc delete mode 100644 changelog.d/5237.misc delete mode 100644 changelog.d/5244.misc delete mode 100644 changelog.d/5249.feature delete mode 100644 changelog.d/5250.misc delete mode 100644 changelog.d/5251.bugfix delete mode 100644 changelog.d/5256.bugfix delete mode 100644 changelog.d/5257.bugfix delete mode 100644 changelog.d/5258.bugfix delete mode 100644 changelog.d/5260.feature delete mode 100644 changelog.d/5268.bugfix delete mode 100644 changelog.d/5274.bugfix delete mode 100644 changelog.d/5275.bugfix delete mode 100644 changelog.d/5276.feature delete mode 100644 changelog.d/5277.bugfix delete mode 100644 changelog.d/5278.bugfix delete mode 100644 changelog.d/5282.doc delete mode 100644 changelog.d/5283.misc delete mode 100644 changelog.d/5284.misc delete mode 100644 changelog.d/5286.feature delete mode 100644 changelog.d/5287.misc delete mode 100644 changelog.d/5288.misc delete mode 100644 changelog.d/5291.bugfix delete mode 100644 changelog.d/5293.bugfix delete mode 100644 changelog.d/5294.bugfix delete mode 100644 changelog.d/5296.misc delete mode 100644 changelog.d/5299.misc delete mode 100644 changelog.d/5300.bugfix delete mode 100644 changelog.d/5303.misc delete mode 100644 changelog.d/5307.bugfix delete mode 100644 changelog.d/5309.bugfix delete mode 100644 changelog.d/5317.bugfix delete mode 100644 changelog.d/5320.misc delete mode 100644 changelog.d/5321.bugfix delete mode 100644 changelog.d/5324.feature delete mode 100644 changelog.d/5328.misc delete mode 100644 changelog.d/5332.misc delete mode 100644 changelog.d/5333.bugfix delete mode 100644 changelog.d/5334.bugfix delete mode 100644 changelog.d/5335.bugfix delete mode 100644 changelog.d/5340.bugfix delete mode 100644 changelog.d/5341.bugfix delete mode 100644 changelog.d/5342.bugfix delete mode 100644 changelog.d/5343.misc delete mode 100644 changelog.d/5344.misc delete mode 100644 changelog.d/5347.misc delete mode 100644 changelog.d/5348.bugfix delete mode 100644 changelog.d/5352.bugfix delete mode 100644 changelog.d/5353.misc delete mode 100644 changelog.d/5354.bugfix delete mode 100644 changelog.d/5355.bugfix delete mode 100644 changelog.d/5356.misc delete mode 100644 changelog.d/5357.doc delete mode 100644 changelog.d/5359.feature delete mode 100644 changelog.d/5360.feature delete mode 100644 changelog.d/5361.feature delete mode 100644 changelog.d/5362.bugfix delete mode 100644 changelog.d/5369.bugfix delete mode 100644 changelog.d/5370.misc delete mode 100644 changelog.d/5371.feature delete mode 100644 changelog.d/5374.feature delete mode 100644 changelog.d/5377.feature delete mode 100644 changelog.d/5379.feature delete mode 100644 changelog.d/5380.misc diff --git a/CHANGES.md b/CHANGES.md index 0ffdf1aaef..4dea0f6319 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,86 @@ +Synapse 1.0.0rc1 (2019-06-07) +============================= + +Features +-------- + +- Synapse now more efficiently collates room statistics. ([\#4338](https://github.com/matrix-org/synapse/issues/4338), [\#5260](https://github.com/matrix-org/synapse/issues/5260), [\#5324](https://github.com/matrix-org/synapse/issues/5324)) +- Add experimental support for relations (aka reactions and edits). ([\#5220](https://github.com/matrix-org/synapse/issues/5220)) +- Ability to configure default room version. ([\#5223](https://github.com/matrix-org/synapse/issues/5223), [\#5249](https://github.com/matrix-org/synapse/issues/5249)) +- Allow configuring a range for the account validity startup job. ([\#5276](https://github.com/matrix-org/synapse/issues/5276)) +- CAS login will now hit the r0 API, not the deprecated v1 one. ([\#5286](https://github.com/matrix-org/synapse/issues/5286)) +- Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). ([\#5359](https://github.com/matrix-org/synapse/issues/5359)) +- Update /_matrix/client/versions to reference support for r0.5.0. ([\#5360](https://github.com/matrix-org/synapse/issues/5360)) +- Add a script to generate new signing-key files. ([\#5361](https://github.com/matrix-org/synapse/issues/5361)) +- Update upgrade and installation guides ahead of 1.0. ([\#5371](https://github.com/matrix-org/synapse/issues/5371)) +- Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). ([\#5374](https://github.com/matrix-org/synapse/issues/5374)) +- Add ability to perform password reset via email without trusting the identity server. ([\#5377](https://github.com/matrix-org/synapse/issues/5377)) +- Set default room version to v4. ([\#5379](https://github.com/matrix-org/synapse/issues/5379)) + + +Bugfixes +-------- + +- Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! ([\#5089](https://github.com/matrix-org/synapse/issues/5089)) +- Prevent federation device list updates breaking when processing multiple updates at once. ([\#5156](https://github.com/matrix-org/synapse/issues/5156)) +- Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. ([\#5200](https://github.com/matrix-org/synapse/issues/5200)) +- Fix race when backfilling in rooms with worker mode. ([\#5221](https://github.com/matrix-org/synapse/issues/5221)) +- Fix appservice timestamp massaging. ([\#5233](https://github.com/matrix-org/synapse/issues/5233)) +- Ensure that server_keys fetched via a notary server are correctly signed. ([\#5251](https://github.com/matrix-org/synapse/issues/5251)) +- Show the correct error when logging out and access token is missing. ([\#5256](https://github.com/matrix-org/synapse/issues/5256)) +- Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms ([\#5257](https://github.com/matrix-org/synapse/issues/5257)) +- Fix error when downloading thumbnail with missing width/height parameter. ([\#5258](https://github.com/matrix-org/synapse/issues/5258)) +- Fix schema update for account validity. ([\#5268](https://github.com/matrix-org/synapse/issues/5268)) +- Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. ([\#5274](https://github.com/matrix-org/synapse/issues/5274), [\#5278](https://github.com/matrix-org/synapse/issues/5278), [\#5291](https://github.com/matrix-org/synapse/issues/5291)) +- Fix "db txn 'update_presence' from sentinel context" log messages. ([\#5275](https://github.com/matrix-org/synapse/issues/5275)) +- Fix dropped logcontexts during high outbound traffic. ([\#5277](https://github.com/matrix-org/synapse/issues/5277)) +- Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. ([\#5293](https://github.com/matrix-org/synapse/issues/5293)) +- Fix performance problems with the rooms stats background update. ([\#5294](https://github.com/matrix-org/synapse/issues/5294)) +- Fix noisy 'no key for server' logs. ([\#5300](https://github.com/matrix-org/synapse/issues/5300)) +- Fix bug where a notary server would sometimes forget old keys. ([\#5307](https://github.com/matrix-org/synapse/issues/5307)) +- Prevent users from setting huge displaynames and avatar URLs. ([\#5309](https://github.com/matrix-org/synapse/issues/5309)) +- Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. ([\#5317](https://github.com/matrix-org/synapse/issues/5317)) +- Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. ([\#5321](https://github.com/matrix-org/synapse/issues/5321)) +- Fix various problems which made the signing-key notary server time out for some requests. ([\#5333](https://github.com/matrix-org/synapse/issues/5333)) +- Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. ([\#5334](https://github.com/matrix-org/synapse/issues/5334)) +- Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. ([\#5335](https://github.com/matrix-org/synapse/issues/5335), [\#5340](https://github.com/matrix-org/synapse/issues/5340)) +- Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. ([\#5341](https://github.com/matrix-org/synapse/issues/5341)) +- Fix failure when fetching batches of events during backfill, etc. ([\#5342](https://github.com/matrix-org/synapse/issues/5342)) +- Add a new room version where the timestamps on events are checked against the validity periods on signing keys. ([\#5348](https://github.com/matrix-org/synapse/issues/5348), [\#5354](https://github.com/matrix-org/synapse/issues/5354)) +- Fix room stats and presence background updates to correctly handle missing events. ([\#5352](https://github.com/matrix-org/synapse/issues/5352)) +- Include left members in room summaries' heroes. ([\#5355](https://github.com/matrix-org/synapse/issues/5355)) +- Fix `federation_custom_ca_list` configuration option. ([\#5362](https://github.com/matrix-org/synapse/issues/5362)) +- Fix missing logcontext warnings on shutdown. ([\#5369](https://github.com/matrix-org/synapse/issues/5369)) + + +Improved Documentation +---------------------- + +- Fix docs on resetting the user directory. ([\#5282](https://github.com/matrix-org/synapse/issues/5282)) +- Fix notes about ACME in the MSC1711 faq. ([\#5357](https://github.com/matrix-org/synapse/issues/5357)) + + +Internal Changes +---------------- + +- Synapse will now serve the experimental "room complexity" API endpoint. ([\#5216](https://github.com/matrix-org/synapse/issues/5216)) +- The base classes for the v1 and v2_alpha REST APIs have been unified. ([\#5226](https://github.com/matrix-org/synapse/issues/5226), [\#5328](https://github.com/matrix-org/synapse/issues/5328)) +- Simplifications and comments in do_auth. ([\#5227](https://github.com/matrix-org/synapse/issues/5227)) +- Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. ([\#5230](https://github.com/matrix-org/synapse/issues/5230)) +- Preparatory work for key-validity features. ([\#5232](https://github.com/matrix-org/synapse/issues/5232), [\#5234](https://github.com/matrix-org/synapse/issues/5234), [\#5235](https://github.com/matrix-org/synapse/issues/5235), [\#5236](https://github.com/matrix-org/synapse/issues/5236), [\#5237](https://github.com/matrix-org/synapse/issues/5237), [\#5244](https://github.com/matrix-org/synapse/issues/5244), [\#5250](https://github.com/matrix-org/synapse/issues/5250), [\#5296](https://github.com/matrix-org/synapse/issues/5296), [\#5299](https://github.com/matrix-org/synapse/issues/5299), [\#5343](https://github.com/matrix-org/synapse/issues/5343), [\#5347](https://github.com/matrix-org/synapse/issues/5347), [\#5356](https://github.com/matrix-org/synapse/issues/5356)) +- Specify the type of reCAPTCHA key to use. ([\#5283](https://github.com/matrix-org/synapse/issues/5283)) +- Improve sample config for monthly active user blocking. ([\#5284](https://github.com/matrix-org/synapse/issues/5284)) +- Remove spurious debug from MatrixFederationHttpClient.get_json. ([\#5287](https://github.com/matrix-org/synapse/issues/5287)) +- Improve logging for logcontext leaks. ([\#5288](https://github.com/matrix-org/synapse/issues/5288)) +- Clarify that the admin change password API logs the user out. ([\#5303](https://github.com/matrix-org/synapse/issues/5303)) +- New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. ([\#5320](https://github.com/matrix-org/synapse/issues/5320)) +- Improve docstrings on MatrixFederationClient. ([\#5332](https://github.com/matrix-org/synapse/issues/5332)) +- Clean up FederationClient.get_events for clarity. ([\#5344](https://github.com/matrix-org/synapse/issues/5344)) +- Various improvements to debug logging. ([\#5353](https://github.com/matrix-org/synapse/issues/5353)) +- Don't run CI build checks until sample config check has passed. ([\#5370](https://github.com/matrix-org/synapse/issues/5370)) +- Automatically retry buildkite builds (max twice) when an agent is lost. ([\#5380](https://github.com/matrix-org/synapse/issues/5380)) + + Synapse 0.99.5.2 (2019-05-30) ============================= diff --git a/changelog.d/4338.feature b/changelog.d/4338.feature deleted file mode 100644 index 01285e965c..0000000000 --- a/changelog.d/4338.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. diff --git a/changelog.d/5089.bugfix b/changelog.d/5089.bugfix deleted file mode 100644 index 68643cebb7..0000000000 --- a/changelog.d/5089.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fixes client-server API not sending "m.heroes" to lazy-load /sync requests when a rooms name or its canonical alias are empty. Thanks to @dnaf for this work! diff --git a/changelog.d/5156.bugfix b/changelog.d/5156.bugfix deleted file mode 100644 index e8aa7d8241..0000000000 --- a/changelog.d/5156.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent federation device list updates breaking when processing multiple updates at once. \ No newline at end of file diff --git a/changelog.d/5200.bugfix b/changelog.d/5200.bugfix deleted file mode 100644 index f346c7b0cc..0000000000 --- a/changelog.d/5200.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix worker registration bug caused by ClientReaderSlavedStore being unable to see get_profileinfo. diff --git a/changelog.d/5216.misc b/changelog.d/5216.misc deleted file mode 100644 index dbfa29475f..0000000000 --- a/changelog.d/5216.misc +++ /dev/null @@ -1 +0,0 @@ -Synapse will now serve the experimental "room complexity" API endpoint. diff --git a/changelog.d/5220.feature b/changelog.d/5220.feature deleted file mode 100644 index 747098c166..0000000000 --- a/changelog.d/5220.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for relations (aka reactions and edits). diff --git a/changelog.d/5221.bugfix b/changelog.d/5221.bugfix deleted file mode 100644 index 03aa363d15..0000000000 --- a/changelog.d/5221.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix race when backfilling in rooms with worker mode. diff --git a/changelog.d/5223.feature b/changelog.d/5223.feature deleted file mode 100644 index cfdf1ad41b..0000000000 --- a/changelog.d/5223.feature +++ /dev/null @@ -1 +0,0 @@ -Ability to configure default room version. diff --git a/changelog.d/5226.misc b/changelog.d/5226.misc deleted file mode 100644 index e1b9dc58a3..0000000000 --- a/changelog.d/5226.misc +++ /dev/null @@ -1 +0,0 @@ -The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/changelog.d/5227.misc b/changelog.d/5227.misc deleted file mode 100644 index 32bd7b6009..0000000000 --- a/changelog.d/5227.misc +++ /dev/null @@ -1 +0,0 @@ -Simplifications and comments in do_auth. diff --git a/changelog.d/5230.misc b/changelog.d/5230.misc deleted file mode 100644 index c681bc9748..0000000000 --- a/changelog.d/5230.misc +++ /dev/null @@ -1 +0,0 @@ -Remove urllib3 pin as requests 2.22.0 has been released supporting urllib3 1.25.2. diff --git a/changelog.d/5232.misc b/changelog.d/5232.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5232.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5233.bugfix b/changelog.d/5233.bugfix deleted file mode 100644 index d71b962160..0000000000 --- a/changelog.d/5233.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix appservice timestamp massaging. diff --git a/changelog.d/5234.misc b/changelog.d/5234.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5234.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5235.misc b/changelog.d/5235.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5235.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5236.misc b/changelog.d/5236.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5236.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5237.misc b/changelog.d/5237.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5237.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5244.misc b/changelog.d/5244.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5244.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5249.feature b/changelog.d/5249.feature deleted file mode 100644 index cfdf1ad41b..0000000000 --- a/changelog.d/5249.feature +++ /dev/null @@ -1 +0,0 @@ -Ability to configure default room version. diff --git a/changelog.d/5250.misc b/changelog.d/5250.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5250.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5251.bugfix b/changelog.d/5251.bugfix deleted file mode 100644 index 9a053204b6..0000000000 --- a/changelog.d/5251.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ensure that server_keys fetched via a notary server are correctly signed. \ No newline at end of file diff --git a/changelog.d/5256.bugfix b/changelog.d/5256.bugfix deleted file mode 100644 index 86316ab5dd..0000000000 --- a/changelog.d/5256.bugfix +++ /dev/null @@ -1 +0,0 @@ -Show the correct error when logging out and access token is missing. diff --git a/changelog.d/5257.bugfix b/changelog.d/5257.bugfix deleted file mode 100644 index 8334af9b99..0000000000 --- a/changelog.d/5257.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error code when there is an invalid parameter on /_matrix/client/r0/publicRooms diff --git a/changelog.d/5258.bugfix b/changelog.d/5258.bugfix deleted file mode 100644 index fb5d44aedb..0000000000 --- a/changelog.d/5258.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix error when downloading thumbnail with missing width/height parameter. diff --git a/changelog.d/5260.feature b/changelog.d/5260.feature deleted file mode 100644 index 01285e965c..0000000000 --- a/changelog.d/5260.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. diff --git a/changelog.d/5268.bugfix b/changelog.d/5268.bugfix deleted file mode 100644 index 1a5a03bf0a..0000000000 --- a/changelog.d/5268.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix schema update for account validity. diff --git a/changelog.d/5274.bugfix b/changelog.d/5274.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5274.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5275.bugfix b/changelog.d/5275.bugfix deleted file mode 100644 index 45a554642a..0000000000 --- a/changelog.d/5275.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix "db txn 'update_presence' from sentinel context" log messages. diff --git a/changelog.d/5276.feature b/changelog.d/5276.feature deleted file mode 100644 index 403dee0862..0000000000 --- a/changelog.d/5276.feature +++ /dev/null @@ -1 +0,0 @@ -Allow configuring a range for the account validity startup job. diff --git a/changelog.d/5277.bugfix b/changelog.d/5277.bugfix deleted file mode 100644 index 371aa2e7fb..0000000000 --- a/changelog.d/5277.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix dropped logcontexts during high outbound traffic. diff --git a/changelog.d/5278.bugfix b/changelog.d/5278.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5282.doc b/changelog.d/5282.doc deleted file mode 100644 index 350e15bc03..0000000000 --- a/changelog.d/5282.doc +++ /dev/null @@ -1 +0,0 @@ -Fix docs on resetting the user directory. diff --git a/changelog.d/5283.misc b/changelog.d/5283.misc deleted file mode 100644 index 002721e566..0000000000 --- a/changelog.d/5283.misc +++ /dev/null @@ -1 +0,0 @@ -Specify the type of reCAPTCHA key to use. diff --git a/changelog.d/5284.misc b/changelog.d/5284.misc deleted file mode 100644 index c4d42ca3d9..0000000000 --- a/changelog.d/5284.misc +++ /dev/null @@ -1 +0,0 @@ -Improve sample config for monthly active user blocking. diff --git a/changelog.d/5286.feature b/changelog.d/5286.feature deleted file mode 100644 index 81860279a3..0000000000 --- a/changelog.d/5286.feature +++ /dev/null @@ -1 +0,0 @@ -CAS login will now hit the r0 API, not the deprecated v1 one. diff --git a/changelog.d/5287.misc b/changelog.d/5287.misc deleted file mode 100644 index 1286f1dd08..0000000000 --- a/changelog.d/5287.misc +++ /dev/null @@ -1 +0,0 @@ -Remove spurious debug from MatrixFederationHttpClient.get_json. diff --git a/changelog.d/5288.misc b/changelog.d/5288.misc deleted file mode 100644 index fbf049ba6a..0000000000 --- a/changelog.d/5288.misc +++ /dev/null @@ -1 +0,0 @@ -Improve logging for logcontext leaks. diff --git a/changelog.d/5291.bugfix b/changelog.d/5291.bugfix deleted file mode 100644 index 9e14d20289..0000000000 --- a/changelog.d/5291.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where we leaked extremities when we soft failed events, leading to performance degradation. diff --git a/changelog.d/5293.bugfix b/changelog.d/5293.bugfix deleted file mode 100644 index aa519a8433..0000000000 --- a/changelog.d/5293.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where it is not possible to get events in the federation format with the request `GET /_matrix/client/r0/rooms/{roomId}/messages`. diff --git a/changelog.d/5294.bugfix b/changelog.d/5294.bugfix deleted file mode 100644 index 5924bda319..0000000000 --- a/changelog.d/5294.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix performance problems with the rooms stats background update. diff --git a/changelog.d/5296.misc b/changelog.d/5296.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5296.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5299.misc b/changelog.d/5299.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5299.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5300.bugfix b/changelog.d/5300.bugfix deleted file mode 100644 index 049e93cd5a..0000000000 --- a/changelog.d/5300.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix noisy 'no key for server' logs. diff --git a/changelog.d/5303.misc b/changelog.d/5303.misc deleted file mode 100644 index f6a7f1f8e3..0000000000 --- a/changelog.d/5303.misc +++ /dev/null @@ -1 +0,0 @@ -Clarify that the admin change password API logs the user out. diff --git a/changelog.d/5307.bugfix b/changelog.d/5307.bugfix deleted file mode 100644 index 6b152f4854..0000000000 --- a/changelog.d/5307.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where a notary server would sometimes forget old keys. diff --git a/changelog.d/5309.bugfix b/changelog.d/5309.bugfix deleted file mode 100644 index 97b3527266..0000000000 --- a/changelog.d/5309.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent users from setting huge displaynames and avatar URLs. diff --git a/changelog.d/5317.bugfix b/changelog.d/5317.bugfix deleted file mode 100644 index 2709375214..0000000000 --- a/changelog.d/5317.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix handling of failures when processing incoming events where calling `/event_auth` on remote server fails. diff --git a/changelog.d/5320.misc b/changelog.d/5320.misc deleted file mode 100644 index 5b4bf05303..0000000000 --- a/changelog.d/5320.misc +++ /dev/null @@ -1 +0,0 @@ -New installs will now use the v54 full schema, rather than the full schema v14 and applying incremental updates to v54. diff --git a/changelog.d/5321.bugfix b/changelog.d/5321.bugfix deleted file mode 100644 index 943a61956d..0000000000 --- a/changelog.d/5321.bugfix +++ /dev/null @@ -1 +0,0 @@ -Ensure that we have an up-to-date copy of the signing key when validating incoming federation requests. diff --git a/changelog.d/5324.feature b/changelog.d/5324.feature deleted file mode 100644 index 01285e965c..0000000000 --- a/changelog.d/5324.feature +++ /dev/null @@ -1 +0,0 @@ -Synapse now more efficiently collates room statistics. diff --git a/changelog.d/5328.misc b/changelog.d/5328.misc deleted file mode 100644 index e1b9dc58a3..0000000000 --- a/changelog.d/5328.misc +++ /dev/null @@ -1 +0,0 @@ -The base classes for the v1 and v2_alpha REST APIs have been unified. diff --git a/changelog.d/5332.misc b/changelog.d/5332.misc deleted file mode 100644 index dcfac4eac9..0000000000 --- a/changelog.d/5332.misc +++ /dev/null @@ -1 +0,0 @@ -Improve docstrings on MatrixFederationClient. diff --git a/changelog.d/5333.bugfix b/changelog.d/5333.bugfix deleted file mode 100644 index cb05a6dd63..0000000000 --- a/changelog.d/5333.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix various problems which made the signing-key notary server time out for some requests. \ No newline at end of file diff --git a/changelog.d/5334.bugfix b/changelog.d/5334.bugfix deleted file mode 100644 index ed141e0918..0000000000 --- a/changelog.d/5334.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which would make certain operations (such as room joins) block for 20 minutes while attemoting to fetch verification keys. diff --git a/changelog.d/5335.bugfix b/changelog.d/5335.bugfix deleted file mode 100644 index 7318cbe35e..0000000000 --- a/changelog.d/5335.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. diff --git a/changelog.d/5340.bugfix b/changelog.d/5340.bugfix deleted file mode 100644 index 931ee904e1..0000000000 --- a/changelog.d/5340.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug where we could rapidly mark a server as unreachable even though it was only down for a few minutes. - diff --git a/changelog.d/5341.bugfix b/changelog.d/5341.bugfix deleted file mode 100644 index a7aaa95f39..0000000000 --- a/changelog.d/5341.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where account validity renewal emails could only be sent when email notifs were enabled. diff --git a/changelog.d/5342.bugfix b/changelog.d/5342.bugfix deleted file mode 100644 index 66a3076292..0000000000 --- a/changelog.d/5342.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix failure when fetching batches of events during backfill, etc. diff --git a/changelog.d/5343.misc b/changelog.d/5343.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5343.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5344.misc b/changelog.d/5344.misc deleted file mode 100644 index a20c563bf1..0000000000 --- a/changelog.d/5344.misc +++ /dev/null @@ -1 +0,0 @@ -Clean up FederationClient.get_events for clarity. diff --git a/changelog.d/5347.misc b/changelog.d/5347.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5347.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5348.bugfix b/changelog.d/5348.bugfix deleted file mode 100644 index 8d396c7990..0000000000 --- a/changelog.d/5348.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add a new room version where the timestamps on events are checked against the validity periods on signing keys. \ No newline at end of file diff --git a/changelog.d/5352.bugfix b/changelog.d/5352.bugfix deleted file mode 100644 index 2ffefe5a68..0000000000 --- a/changelog.d/5352.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix room stats and presence background updates to correctly handle missing events. diff --git a/changelog.d/5353.misc b/changelog.d/5353.misc deleted file mode 100644 index 436245fb11..0000000000 --- a/changelog.d/5353.misc +++ /dev/null @@ -1,2 +0,0 @@ -Various improvements to debug logging. - diff --git a/changelog.d/5354.bugfix b/changelog.d/5354.bugfix deleted file mode 100644 index 0c56032b30..0000000000 --- a/changelog.d/5354.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Add a new room version where the timestamps on events are checked against the validity periods on signing keys. - diff --git a/changelog.d/5355.bugfix b/changelog.d/5355.bugfix deleted file mode 100644 index e1955a7403..0000000000 --- a/changelog.d/5355.bugfix +++ /dev/null @@ -1 +0,0 @@ -Include left members in room summaries' heroes. diff --git a/changelog.d/5356.misc b/changelog.d/5356.misc deleted file mode 100644 index 8336bc55dc..0000000000 --- a/changelog.d/5356.misc +++ /dev/null @@ -1 +0,0 @@ -Preparatory work for key-validity features. diff --git a/changelog.d/5357.doc b/changelog.d/5357.doc deleted file mode 100644 index 27cba49641..0000000000 --- a/changelog.d/5357.doc +++ /dev/null @@ -1 +0,0 @@ -Fix notes about ACME in the MSC1711 faq. diff --git a/changelog.d/5359.feature b/changelog.d/5359.feature deleted file mode 100644 index 2a03939834..0000000000 --- a/changelog.d/5359.feature +++ /dev/null @@ -1 +0,0 @@ -Validate federation server TLS certificates by default (implements [MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md)). diff --git a/changelog.d/5360.feature b/changelog.d/5360.feature deleted file mode 100644 index 01fbb3b06d..0000000000 --- a/changelog.d/5360.feature +++ /dev/null @@ -1 +0,0 @@ -Update /_matrix/client/versions to reference support for r0.5.0. diff --git a/changelog.d/5361.feature b/changelog.d/5361.feature deleted file mode 100644 index 10768cdad3..0000000000 --- a/changelog.d/5361.feature +++ /dev/null @@ -1 +0,0 @@ -Add a script to generate new signing-key files. diff --git a/changelog.d/5362.bugfix b/changelog.d/5362.bugfix deleted file mode 100644 index 1c8b19182c..0000000000 --- a/changelog.d/5362.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `federation_custom_ca_list` configuration option. diff --git a/changelog.d/5369.bugfix b/changelog.d/5369.bugfix deleted file mode 100644 index cc61618f39..0000000000 --- a/changelog.d/5369.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix missing logcontext warnings on shutdown. diff --git a/changelog.d/5370.misc b/changelog.d/5370.misc deleted file mode 100644 index b0473ef280..0000000000 --- a/changelog.d/5370.misc +++ /dev/null @@ -1 +0,0 @@ -Don't run CI build checks until sample config check has passed. diff --git a/changelog.d/5371.feature b/changelog.d/5371.feature deleted file mode 100644 index 7f960630e0..0000000000 --- a/changelog.d/5371.feature +++ /dev/null @@ -1 +0,0 @@ -Update upgrade and installation guides ahead of 1.0. diff --git a/changelog.d/5374.feature b/changelog.d/5374.feature deleted file mode 100644 index 17937637ab..0000000000 --- a/changelog.d/5374.feature +++ /dev/null @@ -1 +0,0 @@ -Replace the `perspectives` configuration section with `trusted_key_servers`, and make validating the signatures on responses optional (since TLS will do this job for us). diff --git a/changelog.d/5377.feature b/changelog.d/5377.feature deleted file mode 100644 index 6aae41847a..0000000000 --- a/changelog.d/5377.feature +++ /dev/null @@ -1 +0,0 @@ -Add ability to perform password reset via email without trusting the identity server. diff --git a/changelog.d/5379.feature b/changelog.d/5379.feature deleted file mode 100644 index 7b64786fe6..0000000000 --- a/changelog.d/5379.feature +++ /dev/null @@ -1 +0,0 @@ -Set default room version to v4. diff --git a/changelog.d/5380.misc b/changelog.d/5380.misc deleted file mode 100644 index 099bba414c..0000000000 --- a/changelog.d/5380.misc +++ /dev/null @@ -1 +0,0 @@ -Automatically retry buildkite builds (max twice) when an agent is lost. diff --git a/synapse/__init__.py b/synapse/__init__.py index d0e8d7c21b..77a4cfc3a5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "0.99.5.2" +__version__ = "1.0.0rc1" From a46ef1e3a4eea55919d86da322628a0713e6ba2d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:29:35 +0100 Subject: [PATCH 357/429] Handle HttpResponseException when using federation client. Otherwise we just log exceptions everywhere. --- synapse/groups/attestations.py | 4 ++-- synapse/handlers/groups_local.py | 4 +--- synapse/handlers/profile.py | 29 ++++++++++++++++------------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index e5dda1975f..cacc6026fa 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -42,7 +42,7 @@ from signedjson.sign import sign_json from twisted.internet import defer -from synapse.api.errors import RequestSendFailed, SynapseError +from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError from synapse.metrics.background_process_metrics import run_as_background_process from synapse.types import get_domain_from_id from synapse.util.logcontext import run_in_background @@ -194,7 +194,7 @@ class GroupAttestionRenewer(object): yield self.store.update_attestation_renewal( group_id, user_id, attestation ) - except RequestSendFailed as e: + except (RequestSendFailed, HttpResponseException) as e: logger.warning( "Failed to renew attestation of %r in %r: %s", user_id, group_id, e, diff --git a/synapse/handlers/groups_local.py b/synapse/handlers/groups_local.py index 02c508acec..f60ace02e8 100644 --- a/synapse/handlers/groups_local.py +++ b/synapse/handlers/groups_local.py @@ -49,9 +49,7 @@ def _create_rerouter(func_name): def http_response_errback(failure): failure.trap(HttpResponseException) e = failure.value - if e.code == 403: - raise e.to_synapse_error() - return failure + raise e.to_synapse_error() def request_failed_errback(failure): failure.trap(RequestSendFailed) diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index a5fc6c5dbf..3e04233394 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -15,12 +15,15 @@ import logging +from six import raise_from + from twisted.internet import defer from synapse.api.errors import ( AuthError, - CodeMessageException, Codes, + HttpResponseException, + RequestSendFailed, StoreError, SynapseError, ) @@ -85,10 +88,10 @@ class BaseProfileHandler(BaseHandler): ignore_backoff=True, ) defer.returnValue(result) - except CodeMessageException as e: - if e.code != 404: - logger.exception("Failed to get displayname") - raise + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to fetch profile"), e) + except HttpResponseException as e: + raise e.to_synapse_error() @defer.inlineCallbacks def get_profile_from_cache(self, user_id): @@ -142,10 +145,10 @@ class BaseProfileHandler(BaseHandler): }, ignore_backoff=True, ) - except CodeMessageException as e: - if e.code != 404: - logger.exception("Failed to get displayname") - raise + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to fetch profile"), e) + except HttpResponseException as e: + raise e.to_synapse_error() defer.returnValue(result["displayname"]) @@ -208,10 +211,10 @@ class BaseProfileHandler(BaseHandler): }, ignore_backoff=True, ) - except CodeMessageException as e: - if e.code != 404: - logger.exception("Failed to get avatar_url") - raise + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to fetch profile"), e) + except HttpResponseException as e: + raise e.to_synapse_error() defer.returnValue(result["avatar_url"]) From 8e0cee90d291790a088fb88e4120f84bf3420a65 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 7 Jun 2019 10:31:48 +0100 Subject: [PATCH 358/429] Add a sponsor button (#5382) Add a sponsor button with links to matrixdotorg's patreon and liberapay accounts. --- .github/FUNDING.yml | 3 +++ changelog.d/5382.misc | 1 + 2 files changed, 4 insertions(+) create mode 100644 .github/FUNDING.yml create mode 100644 changelog.d/5382.misc diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000000..c21d66665e --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +# One username per supported platform and one custom link +patreon: matrixdotorg +custom: https://liberapay.com/matrixdotorg diff --git a/changelog.d/5382.misc b/changelog.d/5382.misc new file mode 100644 index 0000000000..060cbba2a9 --- /dev/null +++ b/changelog.d/5382.misc @@ -0,0 +1 @@ +Add a sponsor button to the repo. From a2419b27fe9df598e9c4f3236fed6c7600fc7c86 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:31:53 +0100 Subject: [PATCH 359/429] Newsfile --- changelog.d/5383.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5383.misc diff --git a/changelog.d/5383.misc b/changelog.d/5383.misc new file mode 100644 index 0000000000..9dd5d1df93 --- /dev/null +++ b/changelog.d/5383.misc @@ -0,0 +1 @@ +Don't log non-200 responses from federation queries as exceptions. From 6745b7de6d05ff99d70b2065a99a72efac10a5e7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:47:31 +0100 Subject: [PATCH 360/429] Handle failing to talk to master over replication --- synapse/replication/http/_base.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index e81456ab2b..0a432a16fa 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -17,11 +17,17 @@ import abc import logging import re +from six import raise_from from six.moves import urllib from twisted.internet import defer -from synapse.api.errors import CodeMessageException, HttpResponseException +from synapse.api.errors import ( + CodeMessageException, + HttpResponseException, + RequestSendFailed, + SynapseError, +) from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import random_string @@ -175,6 +181,8 @@ class ReplicationEndpoint(object): # on the master process that we should send to the client. (And # importantly, not stack traces everywhere) raise e.to_synapse_error() + except RequestSendFailed as e: + raise_from(SynapseError(502, "Failed to talk to master"), e) defer.returnValue(result) From 928d1ccd73ddce5af99539ad800987d2f5bd2942 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 10:57:39 +0100 Subject: [PATCH 361/429] Fix email notifications for large unnamed rooms. When we try and calculate a description for a room for with no name but multiple other users we threw an exception (due to trying to subscript result of `dict.values()`). --- synapse/push/presentable_names.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/synapse/push/presentable_names.py b/synapse/push/presentable_names.py index eef6e18c2e..0c66702325 100644 --- a/synapse/push/presentable_names.py +++ b/synapse/push/presentable_names.py @@ -162,6 +162,17 @@ def calculate_room_name(store, room_state_ids, user_id, fallback_to_members=True def descriptor_from_member_events(member_events): + """Get a description of the room based on the member events. + + Args: + member_events (Iterable[FrozenEvent]) + + Returns: + str + """ + + member_events = list(member_events) + if len(member_events) == 0: return "nobody" elif len(member_events) == 1: From 8182a1cfb523fb1e8d328716111e98be3a1c5c35 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 11:09:08 +0100 Subject: [PATCH 362/429] Refactor email tests --- tests/push/test_email.py | 64 ++++++++++++++++++++++++++-------------- 1 file changed, 42 insertions(+), 22 deletions(-) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 9cdde1a9bd..62b3c2a99d 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -15,6 +15,7 @@ import os +import attr import pkg_resources from twisted.internet.defer import Deferred @@ -30,6 +31,13 @@ except Exception: load_jinja2_templates = None +@attr.s +class _User(object): + "Helper wrapper for user ID and access token" + id = attr.ib() + token = attr.ib() + + class EmailPusherTests(HomeserverTestCase): skip = "No Jinja installed" if not load_jinja2_templates else None @@ -77,25 +85,32 @@ class EmailPusherTests(HomeserverTestCase): return hs - def test_sends_email(self): - + def prepare(self, reactor, clock, hs): # Register the user who gets notified - user_id = self.register_user("user", "pass") - access_token = self.login("user", "pass") + self.user_id = self.register_user("user", "pass") + self.access_token = self.login("user", "pass") - # Register the user who sends the message - other_user_id = self.register_user("otheruser", "pass") - other_access_token = self.login("otheruser", "pass") + # Register other users + self.others = [ + _User( + id=self.register_user("otheruser1", "pass"), + token=self.login("otheruser1", "pass"), + ), + _User( + id=self.register_user("otheruser2", "pass"), + token=self.login("otheruser2", "pass"), + ), + ] # Register the pusher user_tuple = self.get_success( - self.hs.get_datastore().get_user_by_access_token(access_token) + self.hs.get_datastore().get_user_by_access_token(self.access_token) ) token_id = user_tuple["token_id"] self.get_success( self.hs.get_pusherpool().add_pusher( - user_id=user_id, + user_id=self.user_id, access_token=token_id, kind="email", app_id="m.email", @@ -107,22 +122,27 @@ class EmailPusherTests(HomeserverTestCase): ) ) - # Create a room - room = self.helper.create_room_as(user_id, tok=access_token) - - # Invite the other person - self.helper.invite(room=room, src=user_id, tok=access_token, targ=other_user_id) - - # The other user joins - self.helper.join(room=room, user=other_user_id, tok=other_access_token) + def test_simple_sends_email(self): + # Create a simple room with two users + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id, + ) + self.helper.join(room=room, user=self.others[0].id, tok=self.others[0].token) # The other user sends some messages - self.helper.send(room, body="Hi!", tok=other_access_token) - self.helper.send(room, body="There!", tok=other_access_token) + self.helper.send(room, body="Hi!", tok=self.others[0].token) + self.helper.send(room, body="There!", tok=self.others[0].token) + + # We should get emailed about that message + self._check_for_mail() + + def _check_for_mail(self): + "Check that the user receives an email notification" # Get the stream ordering before it gets sent pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) ) self.assertEqual(len(pushers), 1) last_stream_ordering = pushers[0]["last_stream_ordering"] @@ -132,7 +152,7 @@ class EmailPusherTests(HomeserverTestCase): # It hasn't succeeded yet, so the stream ordering shouldn't have moved pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) ) self.assertEqual(len(pushers), 1) self.assertEqual(last_stream_ordering, pushers[0]["last_stream_ordering"]) @@ -149,7 +169,7 @@ class EmailPusherTests(HomeserverTestCase): # The stream ordering has increased pushers = self.get_success( - self.hs.get_datastore().get_pushers_by(dict(user_name=user_id)) + self.hs.get_datastore().get_pushers_by(dict(user_name=self.user_id)) ) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0]["last_stream_ordering"] > last_stream_ordering) From 2ebeda48b2e6ba522fe049ee7ef13450f6839e1b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:10:23 +0100 Subject: [PATCH 363/429] Add test --- synapse/push/emailpusher.py | 19 +++++++++++++++++++ synapse/push/pusherpool.py | 30 +++++++++++++++++++++++------- tests/push/test_email.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 70 insertions(+), 8 deletions(-) diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index e8ee67401f..c89a8438a9 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -114,6 +114,21 @@ class EmailPusher(object): run_as_background_process("emailpush.process", self._process) + def _pause_processing(self): + """Used by tests to temporarily pause processing of events. + + Asserts that its not currently processing. + """ + assert not self._is_processing + self._is_processing = True + + def _resume_processing(self): + """Used by tests to resume processing of events after pausing. + """ + assert self._is_processing + self._is_processing = False + self._start_processing() + @defer.inlineCallbacks def _process(self): # we should never get here if we are already processing @@ -215,6 +230,10 @@ class EmailPusher(object): @defer.inlineCallbacks def save_last_stream_ordering_and_success(self, last_stream_ordering): + if last_stream_ordering is None: + # This happens if we haven't yet processed anything + return + self.last_stream_ordering = last_stream_ordering yield self.store.update_pusher_last_stream_ordering_and_success( self.app_id, self.email, self.user_id, diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 40a7709c09..63c583565f 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -60,6 +60,11 @@ class PusherPool: def add_pusher(self, user_id, access_token, kind, app_id, app_display_name, device_display_name, pushkey, lang, data, profile_tag=""): + """Creates a new pusher and adds it to the pool + + Returns: + Deferred[EmailPusher|HttpPusher] + """ time_now_msec = self.clock.time_msec() # we try to create the pusher just to validate the config: it @@ -103,7 +108,9 @@ class PusherPool: last_stream_ordering=last_stream_ordering, profile_tag=profile_tag, ) - yield self.start_pusher_by_id(app_id, pushkey, user_id) + pusher = yield self.start_pusher_by_id(app_id, pushkey, user_id) + + defer.returnValue(pusher) @defer.inlineCallbacks def remove_pushers_by_app_id_and_pushkey_not_user(self, app_id, pushkey, @@ -184,7 +191,11 @@ class PusherPool: @defer.inlineCallbacks def start_pusher_by_id(self, app_id, pushkey, user_id): - """Look up the details for the given pusher, and start it""" + """Look up the details for the given pusher, and start it + + Returns: + Deferred[EmailPusher|HttpPusher|None]: The pusher started, if any + """ if not self._should_start_pushers: return @@ -192,13 +203,16 @@ class PusherPool: app_id, pushkey ) - p = None + pusher_dict = None for r in resultlist: if r['user_name'] == user_id: - p = r + pusher_dict = r - if p: - yield self._start_pusher(p) + pusher = None + if pusher_dict: + pusher = yield self._start_pusher(pusher_dict) + + defer.returnValue(pusher) @defer.inlineCallbacks def _start_pushers(self): @@ -224,7 +238,7 @@ class PusherPool: pusherdict (dict): Returns: - None + Deferred[EmailPusher|HttpPusher] """ try: p = self.pusher_factory.create_pusher(pusherdict) @@ -270,6 +284,8 @@ class PusherPool: p.on_started(have_notifs) + defer.returnValue(p) + @defer.inlineCallbacks def remove_pusher(self, app_id, pushkey, user_id): appid_pushkey = "%s:%s" % (app_id, pushkey) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 62b3c2a99d..c10b65d4b8 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -108,7 +108,7 @@ class EmailPusherTests(HomeserverTestCase): ) token_id = user_tuple["token_id"] - self.get_success( + self.pusher = self.get_success( self.hs.get_pusherpool().add_pusher( user_id=self.user_id, access_token=token_id, @@ -137,6 +137,33 @@ class EmailPusherTests(HomeserverTestCase): # We should get emailed about that message self._check_for_mail() + def test_multiple_members_email(self): + # We want to test multiple notifications, so we pause processing of push + # while we send messages. + self.pusher._pause_processing() + + # Create a simple room with multiple other users + room = self.helper.create_room_as(self.user_id, tok=self.access_token) + + for other in self.others: + self.helper.invite( + room=room, src=self.user_id, tok=self.access_token, targ=other.id, + ) + self.helper.join(room=room, user=other.id, tok=other.token) + + # The other users send some messages + self.helper.send(room, body="Hi!", tok=self.others[0].token) + self.helper.send(room, body="There!", tok=self.others[1].token) + self.helper.send(room, body="There!", tok=self.others[1].token) + + # Nothing should have happened yet, as we're paused. + assert not self.email_attempts + + self.pusher._resume_processing() + + # We should get emailed about those messages + self._check_for_mail() + def _check_for_mail(self): "Check that the user receives an email notification" From a099926fcc1737f5465a6a112496e8da68293fc9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:13:14 +0100 Subject: [PATCH 364/429] Newsfile --- changelog.d/5388.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5388.bugfix diff --git a/changelog.d/5388.bugfix b/changelog.d/5388.bugfix new file mode 100644 index 0000000000..503e830915 --- /dev/null +++ b/changelog.d/5388.bugfix @@ -0,0 +1 @@ +Fix email notifications for unnamed rooms with multiple people. From 837340bdce7d0175a8f5fae2c4ed34ac1334d431 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:24:07 +0100 Subject: [PATCH 365/429] Only start background group attestation renewals on master --- synapse/groups/attestations.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/synapse/groups/attestations.py b/synapse/groups/attestations.py index e5dda1975f..469ab8ac7b 100644 --- a/synapse/groups/attestations.py +++ b/synapse/groups/attestations.py @@ -132,9 +132,10 @@ class GroupAttestionRenewer(object): self.is_mine_id = hs.is_mine_id self.attestations = hs.get_groups_attestation_signing() - self._renew_attestations_loop = self.clock.looping_call( - self._start_renew_attestations, 30 * 60 * 1000, - ) + if not hs.config.worker_app: + self._renew_attestations_loop = self.clock.looping_call( + self._start_renew_attestations, 30 * 60 * 1000, + ) @defer.inlineCallbacks def on_renew_attestation(self, group_id, user_id, content): From 2cca90dd40a5411a3b01b57b5f74b472b2de2cfc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:26:59 +0100 Subject: [PATCH 366/429] Newsfile --- changelog.d/5389.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5389.bugfix diff --git a/changelog.d/5389.bugfix b/changelog.d/5389.bugfix new file mode 100644 index 0000000000..dd648e26c8 --- /dev/null +++ b/changelog.d/5389.bugfix @@ -0,0 +1 @@ +Fix exceptions in federation reader worker caused by attempting to renew attestations, which should only happen on master worker. From 95d38afe96bfb38e02de9767603b2655c07a7e0f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:34:52 +0100 Subject: [PATCH 367/429] Don't log exception when failing to fetch remote content. In particular, let's not log stack traces when we stop processing becuase the response body was too large. --- synapse/http/client.py | 13 +++++++++---- synapse/rest/media/v1/media_repository.py | 6 ++++-- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/synapse/http/client.py b/synapse/http/client.py index 77fe68818b..5c073fff07 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -17,7 +17,7 @@ import logging from io import BytesIO -from six import text_type +from six import raise_from, text_type from six.moves import urllib import treq @@ -542,10 +542,15 @@ class SimpleHttpClient(object): length = yield make_deferred_yieldable( _readBodyToFile(response, output_stream, max_size) ) + except SynapseError: + # This can happen e.g. because the body is too large. + raise except Exception as e: - logger.exception("Failed to download body") - raise SynapseError( - 502, ("Failed to download remote body: %s" % e), Codes.UNKNOWN + raise_from( + SynapseError( + 502, ("Failed to download remote body: %s" % e), + ), + e ) defer.returnValue( diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/rest/media/v1/media_repository.py index 8569677355..a4929dd5db 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/rest/media/v1/media_repository.py @@ -386,8 +386,10 @@ class MediaRepository(object): raise SynapseError(502, "Failed to fetch remote media") except SynapseError: - logger.exception("Failed to fetch remote media %s/%s", - server_name, media_id) + logger.warn( + "Failed to fetch remote media %s/%s", + server_name, media_id, + ) raise except NotRetryingDestination: logger.warn("Not retrying destination %r", server_name) From 5009d988da81c328d4f13fc8ceb89f85364a44bc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 7 Jun 2019 12:37:38 +0100 Subject: [PATCH 368/429] Newsfile --- changelog.d/5390.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5390.bugfix diff --git a/changelog.d/5390.bugfix b/changelog.d/5390.bugfix new file mode 100644 index 0000000000..e7b7483cf2 --- /dev/null +++ b/changelog.d/5390.bugfix @@ -0,0 +1 @@ +Fix handling of failures fetching remote content to not log failures as exceptions. From 2decc92e2f1f42323475efe54be9f672388fc713 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Sun, 9 Jun 2019 02:20:23 +0100 Subject: [PATCH 369/429] Liberapay is now officially recognised, update FUNDING.yml (#5386) --- .github/FUNDING.yml | 3 ++- changelog.d/5386.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5386.misc diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index c21d66665e..1a57677a0e 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,4 @@ # One username per supported platform and one custom link patreon: matrixdotorg -custom: https://liberapay.com/matrixdotorg +liberapay: matrixdotorg +custom: https://paypal.me/matrixdotorg diff --git a/changelog.d/5386.misc b/changelog.d/5386.misc new file mode 100644 index 0000000000..060cbba2a9 --- /dev/null +++ b/changelog.d/5386.misc @@ -0,0 +1 @@ +Add a sponsor button to the repo. From c2b6e945e1c455c10e09684a0e43e19937db604c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Sun, 9 Jun 2019 14:01:32 +0100 Subject: [PATCH 370/429] Share an SSL context object between SSL connections This involves changing how the info callbacks work. --- synapse/crypto/context_factory.py | 153 ++++++++++++++++++------------ 1 file changed, 91 insertions(+), 62 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 59ea087e66..aa8b20fe7d 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -15,10 +15,12 @@ import logging +from service_identity import VerificationError +from service_identity.pyopenssl import verify_hostname from zope.interface import implementer from OpenSSL import SSL, crypto -from twisted.internet._sslverify import ClientTLSOptions, _defaultCurveName +from twisted.internet._sslverify import _defaultCurveName from twisted.internet.abstract import isIPAddress, isIPv6Address from twisted.internet.interfaces import IOpenSSLClientConnectionCreator from twisted.internet.ssl import CertificateOptions, ContextFactory, platformTrust @@ -70,65 +72,19 @@ def _idnaBytes(text): return idna.encode(text) -def _tolerateErrors(wrapped): - """ - Wrap up an info_callback for pyOpenSSL so that if something goes wrong - the error is immediately logged and the connection is dropped if possible. - This is a copy of twisted.internet._sslverify._tolerateErrors. For - documentation, see the twisted documentation. - """ - - def infoCallback(connection, where, ret): - try: - return wrapped(connection, where, ret) - except: # noqa: E722, taken from the twisted implementation - f = Failure() - logger.exception("Error during info_callback") - connection.get_app_data().failVerification(f) - - return infoCallback - - -@implementer(IOpenSSLClientConnectionCreator) -class ClientTLSOptionsNoVerify(object): - """ - Client creator for TLS without certificate identity verification. This is a - copy of twisted.internet._sslverify.ClientTLSOptions with the identity - verification left out. For documentation, see the twisted documentation. - """ - - def __init__(self, hostname, ctx): - self._ctx = ctx - - if isIPAddress(hostname) or isIPv6Address(hostname): - self._hostnameBytes = hostname.encode('ascii') - self._sendSNI = False - else: - self._hostnameBytes = _idnaBytes(hostname) - self._sendSNI = True - - ctx.set_info_callback(_tolerateErrors(self._identityVerifyingInfoCallback)) - - def clientConnectionForTLS(self, tlsProtocol): - context = self._ctx - connection = SSL.Connection(context, None) - connection.set_app_data(tlsProtocol) - return connection - - def _identityVerifyingInfoCallback(self, connection, where, ret): - # Literal IPv4 and IPv6 addresses are not permitted - # as host names according to the RFCs - if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: - connection.set_tlsext_host_name(self._hostnameBytes) - - class ClientTLSOptionsFactory(object): - """Factory for Twisted ClientTLSOptions that are used to make connections - to remote servers for federation.""" + """Factory for Twisted SSLClientConnectionCreators that are used to make connections + to remote servers for federation. + + Uses one of two OpenSSL context objects for all connections, depending on whether + we should do SSL certificate verification. + + get_options decides whether we should do SSL certificate verification and + constructs an SSLClientConnectionCreator factory accordingly. + """ def __init__(self, config): self._config = config - self._options_noverify = CertificateOptions() # Check if we're using a custom list of a CA certificates trust_root = config.federation_ca_trust_root @@ -136,11 +92,13 @@ class ClientTLSOptionsFactory(object): # Use CA root certs provided by OpenSSL trust_root = platformTrust() - self._options_verify = CertificateOptions(trustRoot=trust_root) + self._verify_ssl_context = CertificateOptions(trustRoot=trust_root).getContext() + self._verify_ssl_context.set_info_callback(self._context_info_cb) + + self._no_verify_ssl_context = CertificateOptions().getContext() + self._no_verify_ssl_context.set_info_callback(self._context_info_cb) def get_options(self, host): - # Use _makeContext so that we get a fresh OpenSSL CTX each time. - # Check if certificate verification has been enabled should_verify = self._config.federation_verify_certificates @@ -151,6 +109,77 @@ class ClientTLSOptionsFactory(object): should_verify = False break - if should_verify: - return ClientTLSOptions(host, self._options_verify._makeContext()) - return ClientTLSOptionsNoVerify(host, self._options_noverify._makeContext()) + ssl_context = ( + self._verify_ssl_context if should_verify else self._no_verify_ssl_context + ) + + return SSLClientConnectionCreator(host, ssl_context, should_verify) + + @staticmethod + def _context_info_cb(ssl_connection, where, ret): + """The 'information callback' for our openssl context object.""" + # we assume that the app_data on the connection object has been set to + # a TLSMemoryBIOProtocol object. (This is done by SSLClientConnectionCreator) + tls_protocol = ssl_connection.get_app_data() + try: + # ... we further assume that SSLClientConnectionCreator has set the + # 'tls_verifier' attribute to a ConnectionVerifier object. + tls_protocol.tls_verifier.verify_context_info_cb(ssl_connection, where) + except: # noqa: E722, taken from the twisted implementation + logger.exception("Error during info_callback") + f = Failure() + tls_protocol.failVerification(f) + + +@implementer(IOpenSSLClientConnectionCreator) +class SSLClientConnectionCreator(object): + """Creates openssl connection objects for client connections. + + Replaces twisted.internet.ssl.ClientTLSOptions + """ + def __init__(self, hostname, ctx, verify_certs): + self._ctx = ctx + self._verifier = ConnectionVerifier(hostname, verify_certs) + + def clientConnectionForTLS(self, tls_protocol): + context = self._ctx + connection = SSL.Connection(context, None) + + # as per twisted.internet.ssl.ClientTLSOptions, we set the application + # data to our TLSMemoryBIOProtocol... + connection.set_app_data(tls_protocol) + + # ... and we also gut-wrench a 'tls_verifier' attribute into the + # tls_protocol so that the SSL context's info callback has something to + # call to do the cert verification. + setattr(tls_protocol, "tls_verifier", self._verifier) + return connection + + +class ConnectionVerifier(object): + """Set the SNI, and do cert verification + + This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by + the ssl context's info callback. + """ + def __init__(self, hostname, verify_certs): + self._verify_certs = verify_certs + if isIPAddress(hostname) or isIPv6Address(hostname): + self._hostnameBytes = hostname.encode('ascii') + self._sendSNI = False + else: + self._hostnameBytes = _idnaBytes(hostname) + self._sendSNI = True + + self._hostnameASCII = self._hostnameBytes.decode("ascii") + + def verify_context_info_cb(self, ssl_connection, where): + if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: + ssl_connection.set_tlsext_host_name(self._hostnameBytes) + if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs: + try: + verify_hostname(ssl_connection, self._hostnameASCII) + except VerificationError: + f = Failure() + tls_protocol = ssl_connection.get_app_data() + tls_protocol.failVerification(f) From 88d7182adaef8711bf3cc80ff604e566e517b6e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 10 Jun 2019 10:33:00 +0100 Subject: [PATCH 371/429] Improve startup checks for insecure notary configs (#5392) It's not really a problem to trust notary responses signed by the old key so long as we are also doing TLS validation. This commit adds a check to the config parsing code at startup to check that we do not have the insecure matrix.org key without tls validation, and refuses to start without it. This allows us to remove the rather alarming-looking warning which happens at runtime. --- changelog.d/5392.bugfix | 1 + synapse/config/key.py | 27 +++++++++++++++++++++++---- synapse/crypto/keyring.py | 7 ------- 3 files changed, 24 insertions(+), 11 deletions(-) create mode 100644 changelog.d/5392.bugfix diff --git a/changelog.d/5392.bugfix b/changelog.d/5392.bugfix new file mode 100644 index 0000000000..295a7cfce1 --- /dev/null +++ b/changelog.d/5392.bugfix @@ -0,0 +1 @@ +Remove redundant warning about key server response validation. diff --git a/synapse/config/key.py b/synapse/config/key.py index aba7092ccd..424875feae 100644 --- a/synapse/config/key.py +++ b/synapse/config/key.py @@ -41,6 +41,15 @@ validation or TLS certificate validation. This is likely to be very insecure. If you are *sure* you want to do this, set 'accept_keys_insecurely' on the keyserver configuration.""" +RELYING_ON_MATRIX_KEY_ERROR = """\ +Your server is configured to accept key server responses without TLS certificate +validation, and which are only signed by the old (possibly compromised) +matrix.org signing key 'ed25519:auto'. This likely isn't what you want to do, +and you should enable 'federation_verify_certificates' in your configuration. + +If you are *sure* you want to do this, set 'accept_keys_insecurely' on the +trusted_key_server configuration.""" + logger = logging.getLogger(__name__) @@ -340,10 +349,20 @@ def _parse_key_servers(key_servers, federation_verify_certificates): result.verify_keys[key_id] = verify_key if ( - not verify_keys - and not server.get("accept_keys_insecurely") - and not federation_verify_certificates + not federation_verify_certificates and + not server.get("accept_keys_insecurely") ): - raise ConfigError(INSECURE_NOTARY_ERROR) + _assert_keyserver_has_verify_keys(result) yield result + + +def _assert_keyserver_has_verify_keys(trusted_key_server): + if not trusted_key_server.verify_keys: + raise ConfigError(INSECURE_NOTARY_ERROR) + + # also check that they are not blindly checking the old matrix.org key + if trusted_key_server.server_name == "matrix.org" and any( + key_id == "ed25519:auto" for key_id in trusted_key_server.verify_keys + ): + raise ConfigError(RELYING_ON_MATRIX_KEY_ERROR) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 96964b0d50..6f603f1961 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -750,13 +750,6 @@ class PerspectivesKeyFetcher(BaseV2KeyFetcher): verify_signed_json(response, perspective_name, perspective_keys[key_id]) verified = True - if perspective_name == "matrix.org" and key_id == "ed25519:auto": - logger.warning( - "Trusting trusted_key_server responses signed by the " - "compromised matrix.org signing key 'ed25519:auto'. " - "This is a placebo." - ) - if not verified: raise KeyLookupError( "Response not signed with a known key: signed with: %r, known keys: %r" From 4914a8882939337cc04d7e3e3162a9401489a437 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 10 Jun 2019 11:34:45 +0100 Subject: [PATCH 372/429] Doc --- synapse/api/auth.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index e24d942553..a04be32890 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -195,6 +195,11 @@ class Auth(object): Args: request - An HTTP request with an access_token query parameter. + allow_expired - Whether to allow the request through even if the account is + expired. If true, Synapse will still require the access token to be + provided but won't check if the account it belongs to has expired. This + works thanks to /login delivering access tokens regardless of accounts' + expiration. Returns: defer.Deferred: resolves to a ``synapse.types.Requester`` object Raises: From 028f674cd323cc12f2e03e5c734c77bb4095f457 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 10 Jun 2019 11:35:54 +0100 Subject: [PATCH 373/429] Better wording --- synapse/api/auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/api/auth.py b/synapse/api/auth.py index a04be32890..79e2808dc5 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -196,7 +196,7 @@ class Auth(object): Args: request - An HTTP request with an access_token query parameter. allow_expired - Whether to allow the request through even if the account is - expired. If true, Synapse will still require the access token to be + expired. If true, Synapse will still require an access token to be provided but won't check if the account it belongs to has expired. This works thanks to /login delivering access tokens regardless of accounts' expiration. From ab157e61a27c48b5f90a0c9168d534d760c0c80c Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 17:31:56 +0530 Subject: [PATCH 374/429] - Fix https://github.com/matrix-org/synapse/issues/4130 - Add parser argument "--no-daemonize" Signed-off-by: sohamg --- synctl | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/synctl b/synctl index 07a68e6d85..601ca41fc3 100755 --- a/synctl +++ b/synctl @@ -69,10 +69,14 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile): +def start(configfile, daemonize = True): write("Starting ...") args = SYNAPSE - args.extend(["--daemonize", "-c", configfile]) + + if daemonize: + args.extend(["--daemonize", "-c", configfile]) + else: + args.extend(["-c", configfile]) try: subprocess.check_call(args) @@ -143,12 +147,20 @@ def main(): help="start or stop all the workers in the given directory" " and the main synapse process", ) + parser.add_argument( + "--no-daemonize", + action="store_false", + help="Run synapse in the foreground (for debugging)" + ) options = parser.parse_args() if options.worker and options.all_processes: write('Cannot use "--worker" with "--all-processes"', stream=sys.stderr) sys.exit(1) + if options.no_daemonize and options.all_processes: + write('Cannot use "--no-daemonize" with "--all-processes"', stream=sys.stderr) + sys.exit(1) configfile = options.configfile @@ -276,7 +288,7 @@ def main(): # Check if synapse is already running if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())): abort("synapse.app.homeserver already running") - start(configfile) + start(configfile, bool(options.no_daemonize)) for worker in workers: env = os.environ.copy() From b56a224e22e20258fb72c5d8888be91a61bf9e11 Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 17:54:29 +0530 Subject: [PATCH 375/429] Added changelog file. --- changelog.d/5412.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5412.feature diff --git a/changelog.d/5412.feature b/changelog.d/5412.feature new file mode 100644 index 0000000000..ec1503860a --- /dev/null +++ b/changelog.d/5412.feature @@ -0,0 +1 @@ +Add --no-daemonize option to run synapse in the foreground, per issue #4130. Contributed by Soham Gumaste. \ No newline at end of file From 0afcbc65cbbceb78cd65ec21b13d729ba60e2f8c Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 18:28:20 +0530 Subject: [PATCH 376/429] Resolved pep8 extra spacing issue --- synctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synctl b/synctl index 601ca41fc3..651cf396d7 100755 --- a/synctl +++ b/synctl @@ -69,7 +69,7 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile, daemonize = True): +def start(configfile, daemonize=True): write("Starting ...") args = SYNAPSE From 12f49b22ec23c9e7d6f1f3d0dce01304545958a1 Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 18:47:35 +0530 Subject: [PATCH 377/429] Edited description to note that the arg will not work with daemonize set in the config. --- synctl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/synctl b/synctl index 651cf396d7..665eda5132 100755 --- a/synctl +++ b/synctl @@ -69,7 +69,7 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile, daemonize=True): +def start(configfile, daemonize = True): write("Starting ...") args = SYNAPSE @@ -150,7 +150,8 @@ def main(): parser.add_argument( "--no-daemonize", action="store_false", - help="Run synapse in the foreground (for debugging)" + help="Run synapse in the foreground for debugging. " + "Will work only if the daemonize option is not set in the config." ) options = parser.parse_args() From ca7abb129c7a50066cbb39e6cfd4a198e7022d3b Mon Sep 17 00:00:00 2001 From: sohamg Date: Mon, 10 Jun 2019 19:09:14 +0530 Subject: [PATCH 378/429] Accidentally reversed pep8 fixed before, fixed now --- synctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synctl b/synctl index 665eda5132..30d751236f 100755 --- a/synctl +++ b/synctl @@ -69,7 +69,7 @@ def abort(message, colour=RED, stream=sys.stderr): sys.exit(1) -def start(configfile, daemonize = True): +def start(configfile, daemonize=True): write("Starting ...") args = SYNAPSE From 43badd2cd4315c3f3ed45b0092c4479a43a3eb52 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 14:31:05 +0100 Subject: [PATCH 379/429] Fix key verification when key stored with null valid_until_ms Some keys are stored in the synapse database with a null valid_until_ms which caused an exception to be thrown when using that key. We fix this by treating nulls as zeroes, i.e. they keys will match verification requests with a minimum_valid_until_ms of zero (i.e. don't validate ts) but will not match requests with a non-zero minimum_valid_until_ms. Fixes #5391. --- synapse/storage/keys.py | 8 ++++++ tests/crypto/test_keyring.py | 50 +++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/synapse/storage/keys.py b/synapse/storage/keys.py index 5300720dbb..e3655ad8d7 100644 --- a/synapse/storage/keys.py +++ b/synapse/storage/keys.py @@ -80,6 +80,14 @@ class KeyStore(SQLBaseStore): for row in txn: server_name, key_id, key_bytes, ts_valid_until_ms = row + + if ts_valid_until_ms is None: + # Old keys may be stored with a ts_valid_until_ms of null, + # in which case we treat this as if it was set to `0`, i.e. + # it won't match key requests that define a minimum + # `ts_valid_until_ms`. + ts_valid_until_ms = 0 + res = FetchKeyResult( verify_key=decode_verify_key_bytes(key_id, bytes(key_bytes)), valid_until_ts=ts_valid_until_ms, diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 4b1901ce31..5a355f00cc 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -25,7 +25,11 @@ from twisted.internet import defer from synapse.api.errors import SynapseError from synapse.crypto import keyring -from synapse.crypto.keyring import PerspectivesKeyFetcher, ServerKeyFetcher +from synapse.crypto.keyring import ( + PerspectivesKeyFetcher, + ServerKeyFetcher, + StoreKeyFetcher, +) from synapse.storage.keys import FetchKeyResult from synapse.util import logcontext from synapse.util.logcontext import LoggingContext @@ -219,6 +223,50 @@ class KeyringTestCase(unittest.HomeserverTestCase): # self.assertFalse(d.called) self.get_success(d) + def test_verify_json_for_server_with_null_valid_until_ms(self): + """Tests that we correctly handle key requests for keys we've stored + with a null `ts_valid_until_ms` + """ + mock_fetcher = keyring.KeyFetcher() + mock_fetcher.get_keys = Mock(return_value=defer.succeed({})) + + kr = keyring.Keyring( + self.hs, key_fetchers=(StoreKeyFetcher(self.hs), mock_fetcher) + ) + + key1 = signedjson.key.generate_signing_key(1) + r = self.hs.datastore.store_server_verify_keys( + "server9", + time.time() * 1000, + [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))], + ) + self.get_success(r) + + json1 = {} + signedjson.sign.sign_json(json1, "server9", key1) + + # should fail immediately on an unsigned object + d = _verify_json_for_server(kr, "server9", {}, 0, "test unsigned") + self.failureResultOf(d, SynapseError) + + # should fail on a signed object with a non-zero minimum_valid_until_ms, + # as it tries to refetch the keys and fails. + d = _verify_json_for_server( + kr, "server9", json1, 500, "test signed non-zero min" + ) + self.get_failure(d, SynapseError) + + # We expect the keyring tried to refetch the key once. + mock_fetcher.get_keys.assert_called_once_with( + {"server9": {get_key_id(key1): 500}} + ) + + # should succeed on a signed object with a 0 minimum_valid_until_ms + d = _verify_json_for_server( + kr, "server9", json1, 0, "test signed with zero min" + ) + self.get_success(d) + def test_verify_json_dedupes_key_requests(self): """Two requests for the same key should be deduped.""" key1 = signedjson.key.generate_signing_key(1) From 9bc7768ad37c7b3aaed154b4cc54d6fb5eabe596 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 14:41:00 +0100 Subject: [PATCH 380/429] Newsfile --- changelog.d/5415.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5415.bugfix diff --git a/changelog.d/5415.bugfix b/changelog.d/5415.bugfix new file mode 100644 index 0000000000..83629e193d --- /dev/null +++ b/changelog.d/5415.bugfix @@ -0,0 +1 @@ +Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. From d11c634ced532ed5ecdbefb45f0b5ae5cb2f9826 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 15:55:12 +0100 Subject: [PATCH 381/429] clean up impl, and import idna directly --- synapse/crypto/context_factory.py | 26 +++++++++++--------------- synapse/python_dependencies.py | 1 + 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index aa8b20fe7d..2f23782630 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -15,6 +15,7 @@ import logging +import idna from service_identity import VerificationError from service_identity.pyopenssl import verify_hostname from zope.interface import implementer @@ -58,20 +59,6 @@ class ServerContextFactory(ContextFactory): return self._context -def _idnaBytes(text): - """ - Convert some text typed by a human into some ASCII bytes. This is a - copy of twisted.internet._idna._idnaBytes. For documentation, see the - twisted documentation. - """ - try: - import idna - except ImportError: - return text.encode("idna") - else: - return idna.encode(text) - - class ClientTLSOptionsFactory(object): """Factory for Twisted SSLClientConnectionCreators that are used to make connections to remote servers for federation. @@ -162,13 +149,21 @@ class ConnectionVerifier(object): This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by the ssl context's info callback. """ + # This code is based on twisted.internet.ssl.ClientTLSOptions. + def __init__(self, hostname, verify_certs): self._verify_certs = verify_certs + if isIPAddress(hostname) or isIPv6Address(hostname): self._hostnameBytes = hostname.encode('ascii') self._sendSNI = False else: - self._hostnameBytes = _idnaBytes(hostname) + # twisted's ClientTLSOptions falls back to the stdlib impl here if + # idna is not installed, but points out that lacks support for + # IDNA2008 (http://bugs.python.org/issue17305). + # + # We can rely on having idna. + self._hostnameBytes = idna.encode(hostname) self._sendSNI = True self._hostnameASCII = self._hostnameBytes.decode("ascii") @@ -176,6 +171,7 @@ class ConnectionVerifier(object): def verify_context_info_cb(self, ssl_connection, where): if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: ssl_connection.set_tlsext_host_name(self._hostnameBytes) + if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs: try: verify_hostname(ssl_connection, self._hostnameASCII) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index c78f2cb15e..db09ff285f 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -44,6 +44,7 @@ REQUIREMENTS = [ "canonicaljson>=1.1.3", "signedjson>=1.0.0", "pynacl>=1.2.1", + "idna>=2", "service_identity>=16.0.0", # our logcontext handling relies on the ability to cancel inlineCallbacks From efe7b3176ecfe81cb7eb94a6882228ba5682278d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 15:58:35 +0100 Subject: [PATCH 382/429] Fix federation connections to literal IP addresses turns out we need a shiny version of service_identity to enforce this correctly. --- synapse/crypto/context_factory.py | 13 ++++++++----- synapse/python_dependencies.py | 4 +++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 2f23782630..0639c228cb 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -17,7 +17,7 @@ import logging import idna from service_identity import VerificationError -from service_identity.pyopenssl import verify_hostname +from service_identity.pyopenssl import verify_hostname, verify_ip_address from zope.interface import implementer from OpenSSL import SSL, crypto @@ -156,7 +156,7 @@ class ConnectionVerifier(object): if isIPAddress(hostname) or isIPv6Address(hostname): self._hostnameBytes = hostname.encode('ascii') - self._sendSNI = False + self._is_ip_address = True else: # twisted's ClientTLSOptions falls back to the stdlib impl here if # idna is not installed, but points out that lacks support for @@ -164,17 +164,20 @@ class ConnectionVerifier(object): # # We can rely on having idna. self._hostnameBytes = idna.encode(hostname) - self._sendSNI = True + self._is_ip_address = False self._hostnameASCII = self._hostnameBytes.decode("ascii") def verify_context_info_cb(self, ssl_connection, where): - if where & SSL.SSL_CB_HANDSHAKE_START and self._sendSNI: + if where & SSL.SSL_CB_HANDSHAKE_START and not self._is_ip_address: ssl_connection.set_tlsext_host_name(self._hostnameBytes) if where & SSL.SSL_CB_HANDSHAKE_DONE and self._verify_certs: try: - verify_hostname(ssl_connection, self._hostnameASCII) + if self._is_ip_address: + verify_ip_address(ssl_connection, self._hostnameASCII) + else: + verify_hostname(ssl_connection, self._hostnameASCII) except VerificationError: f = Failure() tls_protocol = ssl_connection.get_app_data() diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index db09ff285f..6efd81f204 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -45,7 +45,9 @@ REQUIREMENTS = [ "signedjson>=1.0.0", "pynacl>=1.2.1", "idna>=2", - "service_identity>=16.0.0", + + # validating SSL certs for IP addresses requires service_identity 18.1. + "service_identity>=18.1.0", # our logcontext handling relies on the ability to cancel inlineCallbacks # (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7. From e01668122126a4b6b7d45e2e24f591bb8546623b Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 16:06:25 +0100 Subject: [PATCH 383/429] Tests for SSL certs for federation connections Add some tests for bad certificates for federation and .well-known connections --- tests/http/__init__.py | 126 +++++++++++-- tests/http/ca.crt | 19 ++ tests/http/ca.key | 27 +++ .../test_matrix_federation_agent.py | 169 ++++++++++++++++-- tests/http/server.key | 27 +++ tests/http/server.pem | 81 --------- 6 files changed, 343 insertions(+), 106 deletions(-) create mode 100644 tests/http/ca.crt create mode 100644 tests/http/ca.key create mode 100644 tests/http/server.key delete mode 100644 tests/http/server.pem diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 851fc0eb33..b03fff0945 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -13,28 +13,124 @@ # See the License for the specific language governing permissions and # limitations under the License. import os.path +import subprocess + +from zope.interface import implementer from OpenSSL import SSL +from OpenSSL.SSL import Connection +from twisted.internet.interfaces import IOpenSSLServerConnectionCreator -def get_test_cert_file(): - """get the path to the test cert""" +def get_test_ca_cert_file(): + """Get the path to the test CA cert - # the cert file itself is made with: - # - # openssl req -x509 -newkey rsa:4096 -keyout server.pem -out server.pem -days 36500 \ - # -nodes -subj '/CN=testserv' - return os.path.join(os.path.dirname(__file__), 'server.pem') + The keypair is generated with: + + openssl genrsa -out ca.key 2048 + openssl req -new -x509 -key ca.key -days 3650 -out ca.crt \ + -subj '/CN=synapse test CA' + """ + return os.path.join(os.path.dirname(__file__), "ca.crt") -class ServerTLSContext(object): - """A TLS Context which presents our test cert.""" +def get_test_key_file(): + """get the path to the test key - def __init__(self): - self.filename = get_test_cert_file() + The key file is made with: - def getContext(self): + openssl genrsa -out server.key 2048 + """ + return os.path.join(os.path.dirname(__file__), "server.key") + + +cert_file_count = 0 + +CONFIG_TEMPLATE = b"""\ +[default] +basicConstraints = CA:FALSE +keyUsage=nonRepudiation, digitalSignature, keyEncipherment +subjectAltName = %(sanentries)b +""" + + +def create_test_cert_file(sanlist): + """build an x509 certificate file + + Args: + sanlist: list[bytes]: a list of subjectAltName values for the cert + + Returns: + str: the path to the file + """ + global cert_file_count + csr_filename = "server.csr" + cnf_filename = "server.%i.cnf" % (cert_file_count,) + cert_filename = "server.%i.crt" % (cert_file_count,) + cert_file_count += 1 + + # first build a CSR + subprocess.run( + [ + "openssl", + "req", + "-new", + "-key", + get_test_key_file(), + "-subj", + "/", + "-out", + csr_filename, + ], + check=True, + ) + + # now a config file describing the right SAN entries + sanentries = b",".join(sanlist) + with open(cnf_filename, "wb") as f: + f.write(CONFIG_TEMPLATE % {b"sanentries": sanentries}) + + # finally the cert + ca_key_filename = os.path.join(os.path.dirname(__file__), "ca.key") + ca_cert_filename = get_test_ca_cert_file() + subprocess.run( + [ + "openssl", + "x509", + "-req", + "-in", + csr_filename, + "-CA", + ca_cert_filename, + "-CAkey", + ca_key_filename, + "-set_serial", + "1", + "-extfile", + cnf_filename, + "-out", + cert_filename, + ], + check=True, + ) + + return cert_filename + + +@implementer(IOpenSSLServerConnectionCreator) +class TestServerTLSConnectionFactory(object): + """An SSL connection creator which returns connections which present a certificate + signed by our test CA.""" + + def __init__(self, sanlist): + """ + Args: + sanlist: list[bytes]: a list of subjectAltName values for the cert + """ + self._cert_file = create_test_cert_file(sanlist) + + def serverConnectionForTLS(self, tlsProtocol): ctx = SSL.Context(SSL.TLSv1_METHOD) - ctx.use_certificate_file(self.filename) - ctx.use_privatekey_file(self.filename) - return ctx + ctx.use_certificate_file(self._cert_file) + ctx.use_privatekey_file(get_test_key_file()) + return Connection(ctx, None) diff --git a/tests/http/ca.crt b/tests/http/ca.crt new file mode 100644 index 0000000000..730f81e99c --- /dev/null +++ b/tests/http/ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDCjCCAfKgAwIBAgIJAPwHIHgH/jtjMA0GCSqGSIb3DQEBCwUAMBoxGDAWBgNV +BAMMD3N5bmFwc2UgdGVzdCBDQTAeFw0xOTA2MTAxMTI2NDdaFw0yOTA2MDcxMTI2 +NDdaMBoxGDAWBgNVBAMMD3N5bmFwc2UgdGVzdCBDQTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAOZOXCKuylf9jHzJXpU2nS+XEKrnGPgs2SAhQKrzBxg3 +/d8KT2Zsfsj1i3G7oGu7B0ZKO6qG5AxOPCmSMf9/aiSHFilfSh+r8rCpJyWMev2c +/w/xmhoFHgn+H90NnqlXvWb5y1YZCE3gWaituQSaa93GPKacRqXCgIrzjPUuhfeT +uwFQt4iyUhMNBYEy3aw4IuIHdyBqi4noUhR2ZeuflLJ6PswdJ8mEiAvxCbBGPerq +idhWcZwlo0fKu4u1uu5B8TnTsMg2fJgL6c5olBG90Urt22gA6anfP5W/U1ZdVhmB +T3Rv5SJMkGyMGE6sEUetLFyb2GJpgGD7ePkUCZr+IMMCAwEAAaNTMFEwHQYDVR0O +BBYEFLg7nTCYsvQXWTyS6upLc0YTlIwRMB8GA1UdIwQYMBaAFLg7nTCYsvQXWTyS +6upLc0YTlIwRMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBADqx +GX4Ul5OGQlcG+xTt4u3vMCeqGo8mh1AnJ7zQbyRmwjJiNxJVX+/EcqFSTsmkBNoe +xdYITI7Z6dyoiKw99yCZDE7gALcyACEU7r0XY7VY/hebAaX6uLaw1sZKKAIC04lD +KgCu82tG85n60Qyud5SiZZF0q1XVq7lbvOYVdzVZ7k8Vssy5p9XnaLJLMggYeOiX +psHIQjvYGnTTEBZZHzWOrc0WGThd69wxTOOkAbCsoTPEwZL8BGUsdtLWtvhp452O +npvaUBzKg39R5X3KTdhB68XptiQfzbQkd3FtrwNuYPUywlsg55Bxkv85n57+xDO3 +D9YkgUqEp0RGUXQgCsQ= +-----END CERTIFICATE----- diff --git a/tests/http/ca.key b/tests/http/ca.key new file mode 100644 index 0000000000..5c99cae186 --- /dev/null +++ b/tests/http/ca.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA5k5cIq7KV/2MfMlelTadL5cQqucY+CzZICFAqvMHGDf93wpP +Zmx+yPWLcbuga7sHRko7qobkDE48KZIx/39qJIcWKV9KH6vysKknJYx6/Zz/D/Ga +GgUeCf4f3Q2eqVe9ZvnLVhkITeBZqK25BJpr3cY8ppxGpcKAivOM9S6F95O7AVC3 +iLJSEw0FgTLdrDgi4gd3IGqLiehSFHZl65+Usno+zB0nyYSIC/EJsEY96uqJ2FZx +nCWjR8q7i7W67kHxOdOwyDZ8mAvpzmiUEb3RSu3baADpqd8/lb9TVl1WGYFPdG/l +IkyQbIwYTqwRR60sXJvYYmmAYPt4+RQJmv4gwwIDAQABAoIBAQCFuFG+wYYy+MCt +Y65LLN6vVyMSWAQjdMbM5QHLQDiKU1hQPIhFjBFBVXCVpL9MTde3dDqYlKGsk3BT +ItNs6eoTM2wmsXE0Wn4bHNvh7WMsBhACjeFP4lDCtI6DpvjMkmkidT8eyoIL1Yu5 +aMTYa2Dd79AfXPWYIQrJowfhBBY83KuW5fmYnKKDVLqkT9nf2dgmmQz85RgtNiZC +zFkIsNmPqH1zRbcw0wORfOBrLFvsMc4Tt8EY5Wz3NnH8Zfgf8Q3MgARH1yspz3Vp +B+EYHbsK17xZ+P59KPiX3yefvyYWEUjFF7ymVsVnDxLugYl4pXwWUpm19GxeDvFk +cgBUD5OBAoGBAP7lBdCp6lx6fYtxdxUm3n4MMQmYcac4qZdeBIrvpFMnvOBBuixl +eavcfFmFdwgAr8HyVYiu9ynac504IYvmtYlcpUmiRBbmMHbvLQEYHl7FYFKNz9ej +2ue4oJE3RsPdLsD3xIlc+xN8oT1j0knyorwsHdj0Sv77eZzZS9XZZfJzAoGBAOdO +CibYmoNqK/mqDHkp6PgsnbQGD5/CvPF/BLUWV1QpHxLzUQQeoBOQW5FatHe1H5zi +mbq3emBefVmsCLrRIJ4GQu4vsTMfjcpGLwviWmaK6pHbGPt8IYeEQ2MNyv59EtA2 +pQy4dX7/Oe6NLAR1UEQjXmCuXf+rxnxF3VJd1nRxAoGBANb9eusl9fusgSnVOTjJ +AQ7V36KVRv9hZoG6liBNwo80zDVmms4JhRd1MBkd3mkMkzIF4SkZUnWlwLBSANGM +dX/3eZ5i1AVwgF5Am/f5TNxopDbdT/o1RVT/P8dcFT7s1xuBn+6wU0F7dFBgWqVu +lt4aY85zNrJcj5XBHhqwdDGLAoGBAIksPNUAy9F3m5C6ih8o/aKAQx5KIeXrBUZq +v43tK+kbYfRJHBjHWMOBbuxq0G/VmGPf9q9GtGqGXuxZG+w+rYtJx1OeMQZShjIZ +ITl5CYeahrXtK4mo+fF2PMh3m5UE861LWuKKWhPwpJiWXC5grDNcjlHj1pcTdeip +PjHkuJPhAoGBAIh35DptqqdicOd3dr/+/m2YQywY8aSpMrR0bC06aAkscD7oq4tt +s/jwl0UlHIrEm/aMN7OnGIbpfkVdExfGKYaa5NRlgOwQpShwLufIo/c8fErd2zb8 +K3ptlwBxMrayMXpS3DP78r83Z0B8/FSK2guelzdRJ3ftipZ9io1Gss1C +-----END RSA PRIVATE KEY----- diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 05880a1048..ecce473b01 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -17,12 +17,14 @@ import logging from mock import Mock import treq +from service_identity import VerificationError from zope.interface import implementer from twisted.internet import defer from twisted.internet._sslverify import ClientTLSOptions, OpenSSLCertificateOptions from twisted.internet.protocol import Factory from twisted.protocols.tls import TLSMemoryBIOFactory +from twisted.web._newclient import ResponseNeverReceived from twisted.web.http import HTTPChannel from twisted.web.http_headers import Headers from twisted.web.iweb import IPolicyForHTTPS @@ -37,13 +39,29 @@ from synapse.http.federation.srv_resolver import Server from synapse.util.caches.ttlcache import TTLCache from synapse.util.logcontext import LoggingContext -from tests.http import ServerTLSContext +from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase from tests.utils import default_config logger = logging.getLogger(__name__) +test_server_connection_factory = None + + +def get_connection_factory(): + # this needs to happen once, but not until we are ready to run the first test + global test_server_connection_factory + if test_server_connection_factory is None: + test_server_connection_factory = TestServerTLSConnectionFactory(sanlist=[ + b'DNS:testserv', + b'DNS:target-server', + b'DNS:xn--bcher-kva.com', + b'IP:1.2.3.4', + b'IP:::1', + ]) + return test_server_connection_factory + class MatrixFederationAgentTests(TestCase): def setUp(self): @@ -53,12 +71,11 @@ class MatrixFederationAgentTests(TestCase): self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) - # for now, we disable cert verification for the test, since the cert we - # present will not be trusted. We should do better here, though. config_dict = default_config("test", parse=False) - config_dict["federation_verify_certificates"] = False - config_dict["trusted_key_servers"] = [] - config = HomeServerConfig() + config_dict["federation_custom_ca_list"] = [get_test_ca_cert_file()] + # config_dict["trusted_key_servers"] = [] + + self._config = config = HomeServerConfig() config.parse_config_dict(config_dict) self.agent = MatrixFederationAgent( @@ -77,7 +94,7 @@ class MatrixFederationAgentTests(TestCase): """ # build the test server - server_tls_protocol = _build_test_server() + server_tls_protocol = _build_test_server(get_connection_factory()) # now, tell the client protocol factory to build the client protocol (it will be a # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an @@ -328,6 +345,88 @@ class MatrixFederationAgentTests(TestCase): self.reactor.pump((0.1,)) self.successResultOf(test_d) + def test_get_hostname_bad_cert(self): + """ + Test the behaviour when the certificate on the server doesn't match the hostname + """ + self.mock_resolver.resolve_service.side_effect = lambda _: [] + self.reactor.lookups["testserv1"] = "1.2.3.4" + + test_d = self._make_get_request(b"matrix://testserv1/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # No SRV record lookup yet + self.mock_resolver.resolve_service.assert_not_called() + + # there should be an attempt to connect on port 443 for the .well-known + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, '1.2.3.4') + self.assertEqual(port, 443) + + # fonx the connection + client_factory.clientConnectionFailed(None, Exception("nope")) + + # attemptdelay on the hostnameendpoint is 0.3, so takes that long before the + # .well-known request fails. + self.reactor.pump((0.4,)) + + # now there should be a SRV lookup + self.mock_resolver.resolve_service.assert_called_once_with( + b"_matrix._tcp.testserv1" + ) + + # we should fall back to a direct connection + self.assertEqual(len(clients), 2) + (host, port, client_factory, _timeout, _bindAddress) = clients[1] + self.assertEqual(host, '1.2.3.4') + self.assertEqual(port, 8448) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=b'testserv1') + + # there should be no requests + self.assertEqual(len(http_server.requests), 0) + + # ... and the request should have failed + e = self.failureResultOf(test_d, ResponseNeverReceived) + failure_reason = e.value.reasons[0] + self.assertIsInstance(failure_reason.value, VerificationError) + + def test_get_ip_address_bad_cert(self): + """ + Test the behaviour when the server name contains an explicit IP, but + the server cert doesn't cover it + """ + # there will be a getaddrinfo on the IP + self.reactor.lookups["1.2.3.5"] = "1.2.3.5" + + test_d = self._make_get_request(b"matrix://1.2.3.5/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # Make sure treq is trying to connect + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, '1.2.3.5') + self.assertEqual(port, 8448) + + # make a test server, and wire up the client + http_server = self._make_connection(client_factory, expected_sni=None) + + # there should be no requests + self.assertEqual(len(http_server.requests), 0) + + # ... and the request should have failed + e = self.failureResultOf(test_d, ResponseNeverReceived) + failure_reason = e.value.reasons[0] + self.assertIsInstance(failure_reason.value, VerificationError) + def test_get_no_srv_no_well_known(self): """ Test the behaviour when the server name has no port, no SRV, and no well-known @@ -585,6 +684,49 @@ class MatrixFederationAgentTests(TestCase): self.reactor.pump((0.1,)) self.successResultOf(test_d) + def test_get_well_known_unsigned_cert(self): + """Test the behaviour when the .well-known server presents a cert + not signed by a CA + """ + + # we use the same test server as the other tests, but use an agent + # with _well_known_tls_policy left to the default, which will not + # trust it (since the presented cert is signed by a test CA) + + self.mock_resolver.resolve_service.side_effect = lambda _: [] + self.reactor.lookups["testserv"] = "1.2.3.4" + + agent = MatrixFederationAgent( + reactor=self.reactor, + tls_client_options_factory=ClientTLSOptionsFactory(self._config), + _srv_resolver=self.mock_resolver, + _well_known_cache=self.well_known_cache, + ) + + test_d = agent.request(b"GET", b"matrix://testserv/foo/bar") + + # Nothing happened yet + self.assertNoResult(test_d) + + # there should be an attempt to connect on port 443 for the .well-known + clients = self.reactor.tcpClients + self.assertEqual(len(clients), 1) + (host, port, client_factory, _timeout, _bindAddress) = clients[0] + self.assertEqual(host, '1.2.3.4') + self.assertEqual(port, 443) + + http_proto = self._make_connection( + client_factory, expected_sni=b"testserv", + ) + + # there should be no requests + self.assertEqual(len(http_proto.requests), 0) + + # and there should be a SRV lookup instead + self.mock_resolver.resolve_service.assert_called_once_with( + b"_matrix._tcp.testserv" + ) + def test_get_hostname_srv(self): """ Test the behaviour when there is a single SRV record @@ -918,11 +1060,17 @@ def _check_logcontext(context): raise AssertionError("Expected logcontext %s but was %s" % (context, current)) -def _build_test_server(): +def _build_test_server(connection_creator): """Construct a test server This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol + Args: + connection_creator (IOpenSSLServerConnectionCreator): thing to build + SSL connections + sanlist (list[bytes]): list of the SAN entries for the cert returned + by the server + Returns: TLSMemoryBIOProtocol """ @@ -931,7 +1079,7 @@ def _build_test_server(): server_factory.log = _log_request server_tls_factory = TLSMemoryBIOFactory( - ServerTLSContext(), isClient=False, wrappedFactory=server_factory + connection_creator, isClient=False, wrappedFactory=server_factory ) return server_tls_factory.buildProtocol(None) @@ -944,7 +1092,8 @@ def _log_request(request): @implementer(IPolicyForHTTPS) class TrustingTLSPolicyForHTTPS(object): - """An IPolicyForHTTPS which doesn't do any certificate verification""" + """An IPolicyForHTTPS which checks that the certificate belongs to the + right server, but doesn't check the certificate chain.""" def creatorForNetloc(self, hostname, port): certificateOptions = OpenSSLCertificateOptions() diff --git a/tests/http/server.key b/tests/http/server.key new file mode 100644 index 0000000000..c53ee02b21 --- /dev/null +++ b/tests/http/server.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAvUAWLOE6TEp3FYSfEnJMwYtJg3KIW5BjiAOOvFVOVQfJ5eEa +vzyJ1Z+8DUgLznFnUkAeD9GjPvP7awl3NPJKLQSMkV5Tp+ea4YyV+Aa4R7flROEa +zCGvmleydZw0VqN1atVZ0ikEoglM/APJQd70ec7KSR3QoxaV2/VNCHmyAPdP+0WI +llV54VXX1CZrWSHaCSn1gzo3WjnGbxTOCQE5Z4k5hqJAwLWWhxDv+FX/jD38Sq3H +gMFNpXJv6FYwwaKU8awghHdSY/qlBPE/1rU83vIBFJ3jW6I1WnQDfCQ69of5vshK +N4v4hok56ScwdUnk8lw6xvJx1Uav/XQB9qGh4QIDAQABAoIBAQCHLO5p8hotAgdb +JFZm26N9nxrMPBOvq0ucjEX4ucnwrFaGzynGrNwa7TRqHCrqs0/EjS2ryOacgbL0 +eldeRy26SASLlN+WD7UuI7e+6DXabDzj3RHB+tGuIbPDk+ZCeBDXVTsKBOhdQN1v +KNkpJrJjCtSsMxKiWvCBow353srJKqCDZcF5NIBYBeDBPMoMbfYn5dJ9JhEf+2h4 +0iwpnWDX1Vqf46pCRa0hwEyMXycGeV2CnfJSyV7z52ZHQrvkz8QspSnPpnlCnbOE +UAvc8kZ5e8oZE7W+JfkK38vHbEGM1FCrBmrC/46uUGMRpZfDferGs91RwQVq/F0n +JN9hLzsBAoGBAPh2pm9Xt7a4fWSkX0cDgjI7PT2BvLUjbRwKLV+459uDa7+qRoGE +sSwb2QBqmQ1kbr9JyTS+Ld8dyUTsGHZK+YbTieAxI3FBdKsuFtcYJO/REN0vik+6 +fMaBHPvDHSU2ioq7spZ4JBFskzqs38FvZ0lX7aa3fguMk8GMLnofQ8QxAoGBAML9 +o5sJLN9Tk9bv2aFgnERgfRfNjjV4Wd99TsktnCD04D1GrP2eDSLfpwFlCnguck6b +jxikqcolsNhZH4dgYHqRNj+IljSdl+sYZiygO6Ld0XU+dEFO86N3E9NzZhKcQ1at +85VdwNPCS7JM2fIxEvS9xfbVnsmK6/37ZZ5iI7yxAoGBALw2vRtJGmy60pojfd1A +hibhAyINnlKlFGkSOI7zdgeuRTf6l9BTIRclvTt4hJpFgzM6hMWEbyE94hJoupsZ +bm443o/LCWsox2VI05p6urhD6f9znNWKkiyY78izY+elqksvpjgfqEresaTYAeP5 +LQe9KNSK2VuMUP1j4G04M9BxAoGAWe8ITZJuytZOgrz/YIohqPvj1l2tcIYA1a6C +7xEFSMIIxtpZIWSLZIFJEsCakpHBkPX4iwIveZfmt/JrM1JFTWK6ZZVGyh/BmOIZ +Bg4lU1oBqJTUo+aZQtTCJS29b2n5OPpkNYkXTdP4e9UsVKNDvfPlYZJneUeEzxDr +bqCPIRECgYA544KMwrWxDQZg1dsKWgdVVKx80wEFZAiQr9+0KF6ch6Iu7lwGJHFY +iI6O85paX41qeC/Fo+feIWJVJU2GvG6eBsbO4bmq+KSg4NkABJSYxodgBp9ftNeD +jo1tfw+gudlNe5jXHu7oSX93tqGjR4Cnlgan/KtfkB96yHOumGmOhQ== +-----END RSA PRIVATE KEY----- diff --git a/tests/http/server.pem b/tests/http/server.pem deleted file mode 100644 index 0584cf1a80..0000000000 --- a/tests/http/server.pem +++ /dev/null @@ -1,81 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCgF43/3lAgJ+p0 -x7Rn8UcL8a4fctvdkikvZrCngw96LkB34Evfq8YGWlOVjU+f9naUJLAKMatmAfEN -r+rMX4VOXmpTwuu6iLtqwreUrRFMESyrmvQxa15p+y85gkY0CFmXMblv6ORbxHTG -ncBGwST4WK4Poewcgt6jcISFCESTUKu1zc3cw1ANIDRyDLB5K44KwIe36dcKckyN -Kdtv4BJ+3fcIZIkPJH62zqCypgFF1oiFt40uJzClxgHdJZlKYpgkfnDTckw4Y/Mx -9k8BbE310KAzUNMV9H7I1eEolzrNr66FQj1eN64X/dqO8lTbwCqAd4diCT4sIUk0 -0SVsAUjNd3g8j651hx+Qb1t8fuOjrny8dmeMxtUgIBHoQcpcj76R55Fs7KZ9uar0 -8OFTyGIze51W1jG2K/7/5M1zxIqrA+7lsXu5OR81s7I+Ng/UUAhiHA/z+42/aiNa -qEuk6tqj3rHfLctnCbtZ+JrRNqSSwEi8F0lMA021ivEd2eJV+284OyJjhXOmKHrX -QADHrmS7Sh4syTZvRNm9n+qWID0KdDr2Sji/KnS3Enp44HDQ4xriT6/xhwEGsyuX -oH5aAkdLznulbWkHBbyx1SUQSTLpOqzaioF9m1vRrLsFvrkrY3D253mPJ5eU9HM/ -dilduFcUgj4rz+6cdXUAh+KK/v95zwIDAQABAoICAFG5tJPaOa0ws0/KYx5s3YgL -aIhFalhCNSQtmCDrlwsYcXDA3/rfBchYdDL0YKGYgBBAal3J3WXFt/j0xThvyu2m -5UC9UPl4s7RckrsjXqEmY1d3UxGnbhtMT19cUdpeKN42VCP9EBaIw9Rg07dLAkSF -gNYaIx6q8F0fI4eGIPvTQtUcqur4CfWpaxyNvckdovV6M85/YXfDwbCOnacPDGIX -jfSK3i0MxGMuOHr6o8uzKR6aBUh6WStHWcw7VXXTvzdiFNbckmx3Gb93rf1b/LBw -QFfx+tBKcC62gKroCOzXso/0sL9YTVeSD/DJZOiJwSiz3Dj/3u1IUMbVvfTU8wSi -CYS7Z+jHxwSOCSSNTXm1wO/MtDsNKbI1+R0cohr/J9pOMQvrVh1+2zSDOFvXAQ1S -yvjn+uqdmijRoV2VEGVHd+34C+ci7eJGAhL/f92PohuuFR2shUETgGWzpACZSJwg -j1d90Hs81hj07vWRb+xCeDh00vimQngz9AD8vYvv/S4mqRGQ6TZdfjLoUwSTg0JD -6sQgRXX026gQhLhn687vLKZfHwzQPZkpQdxOR0dTZ/ho/RyGGRJXH4kN4cA2tPr+ -AKYQ29YXGlEzGG7OqikaZcprNWG6UFgEpuXyBxCgp9r4ladZo3J+1Rhgus8ZYatd -uO98q3WEBmP6CZ2n32mBAoIBAQDS/c/ybFTos0YpGHakwdmSfj5OOQJto2y8ywfG -qDHwO0ebcpNnS1+MA+7XbKUQb/3Iq7iJljkkzJG2DIJ6rpKynYts1ViYpM7M/t0T -W3V1gvUcUL62iqkgws4pnpWmubFkqV31cPSHcfIIclnzeQ1aOEGsGHNAvhty0ciC -DnkJACbqApvopFLOR5f6UFTtKExE+hDH0WqgpsCAKJ1L4g6pBzZatI32/CN9JEVU -tDbxLV75hHlFFjUrG7nT1rPyr/gI8Ceh9/2xeXPfjJUR0PrG3U1nwLqUCZkvFzO6 -XpN2+A+/v4v5xqMjKDKDFy1oq6SCMomwv/viw6wl/84TMbolAoIBAQDCPiMecnR8 -REik6tqVzQO/uSe9ZHjz6J15t5xdwaI6HpSwLlIkQPkLTjyXtFpemK5DOYRxrJvQ -remfrZrN2qtLlb/DKpuGPWRsPOvWCrSuNEp48ivUehtclljrzxAFfy0sM+fWeJ48 -nTnR+td9KNhjNtZixzWdAy/mE+jdaMsXVnk66L73Uz+2WsnvVMW2R6cpCR0F2eP/ -B4zDWRqlT2w47sePAB81mFYSQLvPC6Xcgg1OqMubfiizJI49c8DO6Jt+FFYdsxhd -kG52Eqa/Net6rN3ueiS6yXL5TU3Y6g96bPA2KyNCypucGcddcBfqaiVx/o4AH6yT -NrdsrYtyvk/jAoIBAQDHUwKVeeRJJbvdbQAArCV4MI155n+1xhMe1AuXkCQFWGtQ -nlBE4D72jmyf1UKnIbW2Uwv15xY6/ouVWYIWlj9+QDmMaozVP7Uiko+WDuwLRNl8 -k4dn+dzHV2HejbPBG2JLv3lFOx23q1zEwArcaXrExaq9Ayg2fKJ/uVHcFAIiD6Oz -pR1XDY4w1A/uaN+iYFSVQUyDCQLbnEz1hej73CaPZoHh9Pq83vxD5/UbjVjuRTeZ -L55FNzKpc/r89rNvTPBcuUwnxplDhYKDKVNWzn9rSXwrzTY2Tk8J3rh+k4RqevSd -6D47jH1n5Dy7/TRn0ueKHGZZtTUnyEUkbOJo3ayFAoIBAHKDyZaQqaX9Z8p6fwWj -yVsFoK0ih8BcWkLBAdmwZ6DWGJjJpjmjaG/G3ygc9s4gO1R8m12dAnuDnGE8KzDD -gwtbrKM2Alyg4wyA2hTlWOH/CAzH0RlCJ9Fs/d1/xJVJBeuyajLiB3/6vXTS6qnq -I7BSSxAPG8eGcn21LSsjNeB7ZZtaTgNnu/8ZBUYo9yrgkWc67TZe3/ChldYxOOlO -qqHh/BqNWtjxB4VZTp/g4RbgQVInZ2ozdXEv0v/dt0UEk29ANAjsZif7F3RayJ2f -/0TilzCaJ/9K9pKNhaClVRy7Dt8QjYg6BIWCGSw4ApF7pLnQ9gySn95mersCkVzD -YDsCggEAb0E/TORjQhKfNQvahyLfQFm151e+HIoqBqa4WFyfFxe/IJUaLH/JSSFw -VohbQqPdCmaAeuQ8ERL564DdkcY5BgKcax79fLLCOYP5bT11aQx6uFpfl2Dcm6Z9 -QdCRI4jzPftsd5fxLNH1XtGyC4t6vTic4Pji2O71WgWzx0j5v4aeDY4sZQeFxqCV -/q7Ee8hem1Rn5RFHu14FV45RS4LAWl6wvf5pQtneSKzx8YL0GZIRRytOzdEfnGKr -FeUlAj5uL+5/p0ZEgM7gPsEBwdm8scF79qSUn8UWSoXNeIauF9D4BDg8RZcFFxka -KILVFsq3cQC+bEnoM4eVbjEQkGs1RQ== ------END PRIVATE KEY----- ------BEGIN CERTIFICATE----- -MIIE/jCCAuagAwIBAgIJANFtVaGvJWZlMA0GCSqGSIb3DQEBCwUAMBMxETAPBgNV -BAMMCHRlc3RzZXJ2MCAXDTE5MDEyNzIyMDIzNloYDzIxMTkwMTAzMjIwMjM2WjAT -MREwDwYDVQQDDAh0ZXN0c2VydjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC -ggIBAKAXjf/eUCAn6nTHtGfxRwvxrh9y292SKS9msKeDD3ouQHfgS9+rxgZaU5WN -T5/2dpQksAoxq2YB8Q2v6sxfhU5ealPC67qIu2rCt5StEUwRLKua9DFrXmn7LzmC -RjQIWZcxuW/o5FvEdMadwEbBJPhYrg+h7ByC3qNwhIUIRJNQq7XNzdzDUA0gNHIM -sHkrjgrAh7fp1wpyTI0p22/gEn7d9whkiQ8kfrbOoLKmAUXWiIW3jS4nMKXGAd0l -mUpimCR+cNNyTDhj8zH2TwFsTfXQoDNQ0xX0fsjV4SiXOs2vroVCPV43rhf92o7y -VNvAKoB3h2IJPiwhSTTRJWwBSM13eDyPrnWHH5BvW3x+46OufLx2Z4zG1SAgEehB -ylyPvpHnkWzspn25qvTw4VPIYjN7nVbWMbYr/v/kzXPEiqsD7uWxe7k5HzWzsj42 -D9RQCGIcD/P7jb9qI1qoS6Tq2qPesd8ty2cJu1n4mtE2pJLASLwXSUwDTbWK8R3Z -4lX7bzg7ImOFc6YoetdAAMeuZLtKHizJNm9E2b2f6pYgPQp0OvZKOL8qdLcSenjg -cNDjGuJPr/GHAQazK5egfloCR0vOe6VtaQcFvLHVJRBJMuk6rNqKgX2bW9GsuwW+ -uStjcPbneY8nl5T0cz92KV24VxSCPivP7px1dQCH4or+/3nPAgMBAAGjUzBRMB0G -A1UdDgQWBBQcQZpzLzTk5KdS/Iz7sGCV7gTd/zAfBgNVHSMEGDAWgBQcQZpzLzTk -5KdS/Iz7sGCV7gTd/zAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IC -AQAr/Pgha57jqYsDDX1LyRrVdqoVBpLBeB7x/p9dKYm7S6tBTDFNMZ0SZyQP8VEG -7UoC9/OQ9nCdEMoR7ZKpQsmipwcIqpXHS6l4YOkf5EEq5jpMgvlEesHmBJJeJew/ -FEPDl1bl8d0tSrmWaL3qepmwzA+2lwAAouWk2n+rLiP8CZ3jZeoTXFqYYrUlEqO9 -fHMvuWqTV4KCSyNY+GWCrnHetulgKHlg+W2J1mZnrCKcBhWf9C2DesTJO+JldIeM -ornTFquSt21hZi+k3aySuMn2N3MWiNL8XsZVsAnPSs0zA+2fxjJkShls8Gc7cCvd -a6XrNC+PY6pONguo7rEU4HiwbvnawSTngFFglmH/ImdA/HkaAekW6o82aI8/UxFx -V9fFMO3iKDQdOrg77hI1bx9RlzKNZZinE2/Pu26fWd5d2zqDWCjl8ykGQRAfXgYN -H3BjgyXLl+ao5/pOUYYtzm3ruTXTgRcy5hhL6hVTYhSrf9vYh4LNIeXNKnZ78tyG -TX77/kU2qXhBGCFEUUMqUNV/+ITir2lmoxVjknt19M07aGr8C7SgYt6Rs+qDpMiy -JurgvRh8LpVq4pHx1efxzxCFmo58DMrG40I0+CF3y/niNpOb1gp2wAqByRiORkds -f0ytW6qZ0TpHbD6gOtQLYDnhx3ISuX+QYSekVwQUpffeWQ== ------END CERTIFICATE----- From c413540fb9e4b6ee2ec975a98676ea56d12249c8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 16:21:42 +0100 Subject: [PATCH 384/429] Fix bug sending federation transactions with lots of EDUs If we try and send a transaction with lots of EDUs and we run out of space, we call get_new_device_msgs_for_remote with a limit of 0, which then failed. --- synapse/storage/deviceinbox.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/synapse/storage/deviceinbox.py b/synapse/storage/deviceinbox.py index 9b0a99cb49..4ea0deea4f 100644 --- a/synapse/storage/deviceinbox.py +++ b/synapse/storage/deviceinbox.py @@ -138,6 +138,10 @@ class DeviceInboxWorkerStore(SQLBaseStore): if not has_changed or last_stream_id == current_stream_id: return defer.succeed(([], current_stream_id)) + if limit <= 0: + # This can happen if we run out of room for EDUs in the transaction. + return defer.succeed(([], last_stream_id)) + def get_new_messages_for_remote_destination_txn(txn): sql = ( "SELECT stream_id, messages_json FROM device_federation_outbox" From 8d0bd9bb6054911589f82f71b5886bcfcade0de3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 16:23:07 +0100 Subject: [PATCH 385/429] fix build fails --- MANIFEST.in | 4 +++- changelog.d/5417.bugfix | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/5417.bugfix diff --git a/MANIFEST.in b/MANIFEST.in index ad1523e387..2c59c7bdc2 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -18,8 +18,10 @@ recursive-include docs * recursive-include scripts * recursive-include scripts-dev * recursive-include synapse *.pyi -recursive-include tests *.pem recursive-include tests *.py +include tests/http/ca.crt +include tests/http/ca.key +include tests/http/server.key recursive-include synapse/res * recursive-include synapse/static *.css diff --git a/changelog.d/5417.bugfix b/changelog.d/5417.bugfix new file mode 100644 index 0000000000..54be963a4e --- /dev/null +++ b/changelog.d/5417.bugfix @@ -0,0 +1 @@ +Fix excessive memory using with default `federation_verify_certificates: true` configuration. \ No newline at end of file From 1fb6f686165442c12f0c1a723de49884325e1486 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 16:26:36 +0100 Subject: [PATCH 386/429] Newsfile --- changelog.d/5418.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5418.bugfix diff --git a/changelog.d/5418.bugfix b/changelog.d/5418.bugfix new file mode 100644 index 0000000000..018f0df2a6 --- /dev/null +++ b/changelog.d/5418.bugfix @@ -0,0 +1 @@ +Fix bug where attemptint to send transactions with large number of EDUs can fail. From 48748c00c416e2fd4fcf7dbc41ce72c02e6fdf6b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 16:28:45 +0100 Subject: [PATCH 387/429] Update changelog.d/5418.bugfix Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- changelog.d/5418.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/5418.bugfix b/changelog.d/5418.bugfix index 018f0df2a6..3fd4d2a882 100644 --- a/changelog.d/5418.bugfix +++ b/changelog.d/5418.bugfix @@ -1 +1 @@ -Fix bug where attemptint to send transactions with large number of EDUs can fail. +Fix bug where attempting to send transactions with large number of EDUs can fail. From 19780a521ec1d200bbc1d25bf5041f8fc5691b40 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 17:41:10 +0100 Subject: [PATCH 388/429] fix CI on python 2.7 --- tests/http/__init__.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/http/__init__.py b/tests/http/__init__.py index b03fff0945..126826fd3f 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -70,7 +70,7 @@ def create_test_cert_file(sanlist): cert_file_count += 1 # first build a CSR - subprocess.run( + subprocess.check_call( [ "openssl", "req", @@ -81,8 +81,7 @@ def create_test_cert_file(sanlist): "/", "-out", csr_filename, - ], - check=True, + ] ) # now a config file describing the right SAN entries @@ -93,7 +92,7 @@ def create_test_cert_file(sanlist): # finally the cert ca_key_filename = os.path.join(os.path.dirname(__file__), "ca.key") ca_cert_filename = get_test_ca_cert_file() - subprocess.run( + subprocess.check_call( [ "openssl", "x509", @@ -110,8 +109,7 @@ def create_test_cert_file(sanlist): cnf_filename, "-out", cert_filename, - ], - check=True, + ] ) return cert_filename From 81b8fdedf2e2504555b76ebbedfac88521d3c93f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 17:51:11 +0100 Subject: [PATCH 389/429] rename gutwrenched attr --- synapse/crypto/context_factory.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/synapse/crypto/context_factory.py b/synapse/crypto/context_factory.py index 0639c228cb..2bc5cc3807 100644 --- a/synapse/crypto/context_factory.py +++ b/synapse/crypto/context_factory.py @@ -110,8 +110,10 @@ class ClientTLSOptionsFactory(object): tls_protocol = ssl_connection.get_app_data() try: # ... we further assume that SSLClientConnectionCreator has set the - # 'tls_verifier' attribute to a ConnectionVerifier object. - tls_protocol.tls_verifier.verify_context_info_cb(ssl_connection, where) + # '_synapse_tls_verifier' attribute to a ConnectionVerifier object. + tls_protocol._synapse_tls_verifier.verify_context_info_cb( + ssl_connection, where + ) except: # noqa: E722, taken from the twisted implementation logger.exception("Error during info_callback") f = Failure() @@ -124,6 +126,7 @@ class SSLClientConnectionCreator(object): Replaces twisted.internet.ssl.ClientTLSOptions """ + def __init__(self, hostname, ctx, verify_certs): self._ctx = ctx self._verifier = ConnectionVerifier(hostname, verify_certs) @@ -136,10 +139,10 @@ class SSLClientConnectionCreator(object): # data to our TLSMemoryBIOProtocol... connection.set_app_data(tls_protocol) - # ... and we also gut-wrench a 'tls_verifier' attribute into the + # ... and we also gut-wrench a '_synapse_tls_verifier' attribute into the # tls_protocol so that the SSL context's info callback has something to # call to do the cert verification. - setattr(tls_protocol, "tls_verifier", self._verifier) + setattr(tls_protocol, "_synapse_tls_verifier", self._verifier) return connection @@ -149,13 +152,14 @@ class ConnectionVerifier(object): This is a thing which is attached to the TLSMemoryBIOProtocol, and is called by the ssl context's info callback. """ + # This code is based on twisted.internet.ssl.ClientTLSOptions. def __init__(self, hostname, verify_certs): self._verify_certs = verify_certs if isIPAddress(hostname) or isIPv6Address(hostname): - self._hostnameBytes = hostname.encode('ascii') + self._hostnameBytes = hostname.encode("ascii") self._is_ip_address = True else: # twisted's ClientTLSOptions falls back to the stdlib impl here if From db74c4fc6ce2982a4e563c98b3affca3169b3f18 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 10 Jun 2019 17:55:01 +0100 Subject: [PATCH 390/429] fix ci on py2, again --- tests/http/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 126826fd3f..2d5dba6464 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -50,7 +50,7 @@ CONFIG_TEMPLATE = b"""\ [default] basicConstraints = CA:FALSE keyUsage=nonRepudiation, digitalSignature, keyEncipherment -subjectAltName = %(sanentries)b +subjectAltName = %(sanentries)s """ From 01674479651cd12c995581911cfb58e5d5493495 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 18:17:43 +0100 Subject: [PATCH 391/429] 1.0.0rc2 --- CHANGES.md | 11 +++++++++++ changelog.d/5392.bugfix | 1 - changelog.d/5415.bugfix | 1 - changelog.d/5417.bugfix | 1 - synapse/__init__.py | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/5392.bugfix delete mode 100644 changelog.d/5415.bugfix delete mode 100644 changelog.d/5417.bugfix diff --git a/CHANGES.md b/CHANGES.md index 4dea0f6319..523cdb1153 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.0.0rc2 (2019-06-10) +============================= + +Bugfixes +-------- + +- Remove redundant warning about key server response validation. ([\#5392](https://github.com/matrix-org/synapse/issues/5392)) +- Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. ([\#5415](https://github.com/matrix-org/synapse/issues/5415)) +- Fix excessive memory using with default `federation_verify_certificates: true` configuration. ([\#5417](https://github.com/matrix-org/synapse/issues/5417)) + + Synapse 1.0.0rc1 (2019-06-07) ============================= diff --git a/changelog.d/5392.bugfix b/changelog.d/5392.bugfix deleted file mode 100644 index 295a7cfce1..0000000000 --- a/changelog.d/5392.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove redundant warning about key server response validation. diff --git a/changelog.d/5415.bugfix b/changelog.d/5415.bugfix deleted file mode 100644 index 83629e193d..0000000000 --- a/changelog.d/5415.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where old keys stored in the database with a null valid until timestamp caused all verification requests for that key to fail. diff --git a/changelog.d/5417.bugfix b/changelog.d/5417.bugfix deleted file mode 100644 index 54be963a4e..0000000000 --- a/changelog.d/5417.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix excessive memory using with default `federation_verify_certificates: true` configuration. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 77a4cfc3a5..8dc07fe73c 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "1.0.0rc1" +__version__ = "1.0.0rc2" From 49e01e5710fdbd9bb8da24844718eb2f5d6ee5c7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 10 Jun 2019 23:09:31 +0100 Subject: [PATCH 392/429] Fix defaults on checking threepids --- synapse/handlers/auth.py | 1 + synapse/storage/registration.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7f8ddc99c6..a0cf37a9f9 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -479,6 +479,7 @@ class AuthHandler(BaseHandler): medium, threepid_creds["client_secret"], sid=threepid_creds["sid"], + validated=True, ) threepid = { diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9b41cbd757..1dd1182e82 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -998,7 +998,7 @@ class RegistrationStore( client_secret, address=None, sid=None, - validated=None, + validated=True, ): """Gets a session_id and last_send_attempt (if available) for a client_secret/medium/(address|session_id) combo From 94dac0f3e55938916ec76a2183d6703af6ea4362 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Mon, 10 Jun 2019 23:33:59 +0100 Subject: [PATCH 393/429] add monthly active users to phonehome stats (#5252) * add monthly active users to phonehome stats --- changelog.d/5252.feature | 1 + synapse/app/homeserver.py | 1 + synapse/storage/__init__.py | 44 ++++++++++++++++++++++++------------- 3 files changed, 31 insertions(+), 15 deletions(-) create mode 100644 changelog.d/5252.feature diff --git a/changelog.d/5252.feature b/changelog.d/5252.feature new file mode 100644 index 0000000000..44115b0382 --- /dev/null +++ b/changelog.d/5252.feature @@ -0,0 +1 @@ +Add monthly active users to phonehome stats. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index df524a23dd..811b547dd3 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -541,6 +541,7 @@ def run(hs): stats["total_room_count"] = room_count stats["daily_active_users"] = yield hs.get_datastore().count_daily_users() + stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users() stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms() stats["daily_messages"] = yield hs.get_datastore().count_daily_messages() diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index 71316f7d09..0ca6f6121f 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -279,23 +279,37 @@ class DataStore( """ Counts the number of users who used this homeserver in the last 24 hours. """ + yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) + return self.runInteraction("count_daily_users", self._count_users, yesterday,) - def _count_users(txn): - yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24) + def count_monthly_users(self): + """ + Counts the number of users who used this homeserver in the last 30 days. + Note this method is intended for phonehome metrics only and is different + from the mau figure in synapse.storage.monthly_active_users which, + amongst other things, includes a 3 day grace period before a user counts. + """ + thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) + return self.runInteraction( + "count_monthly_users", + self._count_users, + thirty_days_ago, + ) - sql = """ - SELECT COALESCE(count(*), 0) FROM ( - SELECT user_id FROM user_ips - WHERE last_seen > ? - GROUP BY user_id - ) u - """ - - txn.execute(sql, (yesterday,)) - count, = txn.fetchone() - return count - - return self.runInteraction("count_users", _count_users) + def _count_users(self, txn, time_from): + """ + Returns number of users seen in the past time_from period + """ + sql = """ + SELECT COALESCE(count(*), 0) FROM ( + SELECT user_id FROM user_ips + WHERE last_seen > ? + GROUP BY user_id + ) u + """ + txn.execute(sql, (time_from,)) + count, = txn.fetchone() + return count def count_r30_users(self): """ From 6bac9ca6d70fc5bf9a828379a7abbd6e9d064137 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 00:06:39 +0100 Subject: [PATCH 394/429] 1.0.0rc3 --- CHANGES.md | 6 ++++++ synapse/__init__.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 523cdb1153..f4a3ab71ca 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.0.0rc3 (2019-06-10) +============================= + +Security: Fix authentication bug introduced in 1.0.0rc1. Please upgrade to rc3 immediately + + Synapse 1.0.0rc2 (2019-06-10) ============================= diff --git a/synapse/__init__.py b/synapse/__init__.py index 8dc07fe73c..9c75a0a27f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "1.0.0rc2" +__version__ = "1.0.0rc3" From 2ddc13577c93505b887880fa715def9addeafafe Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 11 Jun 2019 00:25:07 +0100 Subject: [PATCH 395/429] Don't warn user about password reset disabling through config code (#5387) Moves the warning about password resets being disabled to the point where a user actually tries to reset their password. Is this an appropriate place for it to happen? Also removed the disabling of msisdn password resets when you don't have an email config, as that just doesn't make sense. Also change the error a user receives upon disabled passwords to specify that only email-based password reset is disabled. --- changelog.d/5387.bugfix | 1 + synapse/config/emailconfig.py | 11 +++++------ synapse/rest/client/v2_alpha/account.py | 19 +++++++++++++++---- 3 files changed, 21 insertions(+), 10 deletions(-) create mode 100644 changelog.d/5387.bugfix diff --git a/changelog.d/5387.bugfix b/changelog.d/5387.bugfix new file mode 100644 index 0000000000..2c6c94efc4 --- /dev/null +++ b/changelog.d/5387.bugfix @@ -0,0 +1 @@ +Warn about disabling email-based password resets when a reset occurs, and remove warning when someone attempts a phone-based reset. diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index ae04252906..86018dfcce 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -19,15 +19,12 @@ from __future__ import print_function # This file can't be called email.py because if it is, we cannot: import email.utils -import logging import os import pkg_resources from ._base import Config, ConfigError -logger = logging.getLogger(__name__) - class EmailConfig(Config): def read_config(self, config): @@ -85,10 +82,12 @@ class EmailConfig(Config): self.email_password_reset_behaviour = ( "remote" if email_trust_identity_server_for_password_resets else "local" ) + self.password_resets_were_disabled_due_to_email_config = False if self.email_password_reset_behaviour == "local" and email_config == {}: - logger.warn( - "User password resets have been disabled due to lack of email config" - ) + # We cannot warn the user this has happened here + # Instead do so when a user attempts to reset their password + self.password_resets_were_disabled_due_to_email_config = True + self.email_password_reset_behaviour = "off" # Get lifetime of a validation token in milliseconds diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index e4c63b69b9..7cfd7ae7dc 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -68,7 +68,13 @@ class EmailPasswordRequestTokenRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): if self.config.email_password_reset_behaviour == "off": - raise SynapseError(400, "Password resets have been disabled on this server") + if self.config.password_resets_were_disabled_due_to_email_config: + logger.warn( + "User password resets have been disabled due to lack of email config" + ) + raise SynapseError( + 400, "Email-based password resets have been disabled on this server", + ) body = parse_json_object_from_request(request) @@ -196,9 +202,6 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): @defer.inlineCallbacks def on_POST(self, request): - if not self.config.email_password_reset_behaviour == "off": - raise SynapseError(400, "Password resets have been disabled on this server") - body = parse_json_object_from_request(request) assert_params_in_dict(body, [ @@ -251,6 +254,14 @@ class PasswordResetSubmitTokenServlet(RestServlet): 400, "This medium is currently not supported for password resets", ) + if self.config.email_password_reset_behaviour == "off": + if self.config.password_resets_were_disabled_due_to_email_config: + logger.warn( + "User password resets have been disabled due to lack of email config" + ) + raise SynapseError( + 400, "Email-based password resets have been disabled on this server", + ) sid = parse_string(request, "sid") client_secret = parse_string(request, "client_secret") From 10383e6e6fefe29b007d11220841c17ad9cfc3e1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 11:31:12 +0100 Subject: [PATCH 396/429] Change password reset links to /_matrix. --- synapse/app/homeserver.py | 1 - synapse/push/mailer.py | 2 +- .../res/templates/password_reset_success.html | 2 +- synapse/rest/client/v2_alpha/account.py | 9 +- tests/rest/client/v2_alpha/test_account.py | 241 ++++++++++++++++++ tests/unittest.py | 12 + 6 files changed, 260 insertions(+), 7 deletions(-) create mode 100644 tests/rest/client/v2_alpha/test_account.py diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index df524a23dd..1045d28949 100755 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -176,7 +176,6 @@ class SynapseHomeServer(HomeServer): resources.update({ "/_matrix/client/api/v1": client_resource, - "/_synapse/password_reset": client_resource, "/_matrix/client/r0": client_resource, "/_matrix/client/unstable": client_resource, "/_matrix/client/v2_alpha": client_resource, diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 4bc9eb7313..099f9545ab 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -117,7 +117,7 @@ class Mailer(object): link = ( self.hs.config.public_baseurl + - "_synapse/password_reset/email/submit_token" + "_matrix/client/unstable/password_reset/email/submit_token" "?token=%s&client_secret=%s&sid=%s" % (token, client_secret, sid) ) diff --git a/synapse/res/templates/password_reset_success.html b/synapse/res/templates/password_reset_success.html index 7b6fa5e6f0..7324d66d1e 100644 --- a/synapse/res/templates/password_reset_success.html +++ b/synapse/res/templates/password_reset_success.html @@ -1,6 +1,6 @@ -

Your password was successfully reset. You may now close this window.

+

Your email has now been validated, please return to your client to reset your password. You may now close this window.

diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index e4c63b69b9..7025f486e1 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -15,7 +15,6 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -import re from six.moves import http_client @@ -228,9 +227,11 @@ class MsisdnPasswordRequestTokenRestServlet(RestServlet): class PasswordResetSubmitTokenServlet(RestServlet): """Handles 3PID validation token submission""" - PATTERNS = [ - re.compile("^/_synapse/password_reset/(?P[^/]*)/submit_token/*$"), - ] + PATTERNS = client_patterns( + "/password_reset/(?P[^/]*)/submit_token/*$", + releases=(), + unstable=True, + ) def __init__(self, hs): """ diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py new file mode 100644 index 0000000000..0d1c0868ce --- /dev/null +++ b/tests/rest/client/v2_alpha/test_account.py @@ -0,0 +1,241 @@ +# -*- coding: utf-8 -*- +# Copyright 2015-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import re +from email.parser import Parser + +import pkg_resources + +import synapse.rest.admin +from synapse.api.constants import LoginType +from synapse.rest.client.v1 import login +from synapse.rest.client.v2_alpha import account, register + +from tests import unittest + + +class PasswordResetTestCase(unittest.HomeserverTestCase): + + servlets = [ + account.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + register.register_servlets, + login.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + + # Email config. + self.email_attempts = [] + + def sendmail(smtphost, from_addr, to_addrs, msg, **kwargs): + self.email_attempts.append(msg) + return + + config["email"] = { + "enable_notifs": False, + "template_dir": os.path.abspath( + pkg_resources.resource_filename("synapse", "res/templates") + ), + "smtp_host": "127.0.0.1", + "smtp_port": 20, + "require_transport_security": False, + "smtp_user": None, + "smtp_pass": None, + "notif_from": "test@example.com", + } + config["public_baseurl"] = "https://example.com" + + hs = self.setup_test_homeserver(config=config, sendmail=sendmail) + return hs + + def prepare(self, reactor, clock, hs): + self.store = hs.get_datastore() + + def test_basic_password_reset(self): + """Test basic password reset flow + """ + old_password = "monkey" + new_password = "kangeroo" + + user_id = self.register_user("kermit", old_password) + self.login("kermit", old_password) + + email = "test@example.com" + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address=email, + validated_at=0, + added_at=0, + ) + ) + + client_secret = "foobar" + session_id = self._request_token(email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + link = self._get_link_from_email() + + self._validate_token(link) + + self._reset_password(new_password, session_id, client_secret) + + # Assert we can log in with the new password + self.login("kermit", new_password) + + # Assert we can't log in with the old password + self.attempt_wrong_password_login("kermit", old_password) + + def test_cant_reset_password_without_clicking_link(self): + """Test that we do actually need to click the link in the email + """ + old_password = "monkey" + new_password = "kangeroo" + + user_id = self.register_user("kermit", old_password) + self.login("kermit", old_password) + + email = "test@example.com" + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address=email, + validated_at=0, + added_at=0, + ) + ) + + client_secret = "foobar" + session_id = self._request_token(email, client_secret) + + self.assertEquals(len(self.email_attempts), 1) + + # Attempt to reset password without clicking the link + self._reset_password( + new_password, session_id, client_secret, expected_code=401, + ) + + # Assert we can log in with the old password + self.login("kermit", old_password) + + # Assert we can't log in with the new password + self.attempt_wrong_password_login("kermit", new_password) + + def test_no_valid_token(self): + """Test that we do actually need to request a token and can't just + make a session up. + """ + old_password = "monkey" + new_password = "kangeroo" + + user_id = self.register_user("kermit", old_password) + self.login("kermit", old_password) + + email = "test@example.com" + + # Add a threepid + self.get_success( + self.store.user_add_threepid( + user_id=user_id, + medium="email", + address=email, + validated_at=0, + added_at=0, + ) + ) + + client_secret = "foobar" + session_id = "weasle" + + # Attempt to reset password without even requesting an email + self._reset_password( + new_password, session_id, client_secret, expected_code=401, + ) + + # Assert we can log in with the old password + self.login("kermit", old_password) + + # Assert we can't log in with the new password + self.attempt_wrong_password_login("kermit", new_password) + + def _request_token(self, email, client_secret): + request, channel = self.make_request( + "POST", + b"account/password/email/requestToken", + {"client_secret": client_secret, "email": email, "send_attempt": 1}, + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + return channel.json_body["sid"] + + def _validate_token(self, link): + # Remove the host + path = link.replace("https://example.com", "") + + request, channel = self.make_request("GET", path, shorthand=False) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + def _get_link_from_email(self): + assert self.email_attempts, "No emails have been sent" + + raw_msg = self.email_attempts[-1].decode("UTF-8") + mail = Parser().parsestr(raw_msg) + + text = None + for part in mail.walk(): + if part.get_content_type() == "text/plain": + text = part.get_payload(decode=True).decode("UTF-8") + break + + if not text: + self.fail("Could not find text portion of email to parse") + + match = re.search(r"https://example.com\S+", text) + assert match, "Could not find link in email" + + return match.group(0) + + def _reset_password( + self, new_password, session_id, client_secret, expected_code=200 + ): + request, channel = self.make_request( + "POST", + b"account/password", + { + "new_password": new_password, + "auth": { + "type": LoginType.EMAIL_IDENTITY, + "threepid_creds": { + "client_secret": client_secret, + "sid": session_id, + }, + }, + }, + ) + self.render(request) + self.assertEquals(expected_code, channel.code, channel.result) diff --git a/tests/unittest.py b/tests/unittest.py index 26204470b1..7dbb64af59 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -441,3 +441,15 @@ class HomeserverTestCase(TestCase): access_token = channel.json_body["access_token"] return access_token + + def attempt_wrong_password_login(self, username, password): + """Attempts to login as the user with the given password, asserting + that the attempt *fails*. + """ + body = {"type": "m.login.password", "user": username, "password": password} + + request, channel = self.make_request( + "POST", "/_matrix/client/r0/login", json.dumps(body).encode('utf8') + ) + self.render(request) + self.assertEqual(channel.code, 403, channel.result) From 453aaaadc0579a715b779cf3e7eeb6872d054396 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 11:33:14 +0100 Subject: [PATCH 397/429] Newsfile --- changelog.d/5424.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5424.misc diff --git a/changelog.d/5424.misc b/changelog.d/5424.misc new file mode 100644 index 0000000000..b92b50e317 --- /dev/null +++ b/changelog.d/5424.misc @@ -0,0 +1 @@ +Move password reset links to /_matrix/client/unstable namespace. From 426218323b8475a71b3c58d7d291f0046faa62ab Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Tue, 11 Jun 2019 12:17:43 +0100 Subject: [PATCH 398/429] Neilj/improve federation docs (#5419) Add FAQ questions to federate.md. Add a health warning making it clear that the 1711 upgrade FAQ is now out of date. --- INSTALL.md | 27 +++++------ changelog.d/5419.doc | 1 + docs/MSC1711_certificates_FAQ.md | 17 +++++++ docs/federate.md | 77 ++++++++++++++++++++++++++++++-- 4 files changed, 106 insertions(+), 16 deletions(-) create mode 100644 changelog.d/5419.doc diff --git a/INSTALL.md b/INSTALL.md index a1ff91a98e..2df686b19b 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -1,14 +1,14 @@ -* [Installing Synapse](#installing-synapse) - * [Installing from source](#installing-from-source) - * [Platform-Specific Instructions](#platform-specific-instructions) - * [Troubleshooting Installation](#troubleshooting-installation) - * [Prebuilt packages](#prebuilt-packages) -* [Setting up Synapse](#setting-up-synapse) - * [TLS certificates](#tls-certificates) - * [Email](#email) - * [Registering a user](#registering-a-user) - * [Setting up a TURN server](#setting-up-a-turn-server) - * [URL previews](#url-previews) +- [Installing Synapse](#installing-synapse) + - [Installing from source](#installing-from-source) + - [Platform-Specific Instructions](#platform-specific-instructions) + - [Troubleshooting Installation](#troubleshooting-installation) + - [Prebuilt packages](#prebuilt-packages) +- [Setting up Synapse](#setting-up-synapse) + - [TLS certificates](#tls-certificates) + - [Email](#email) + - [Registering a user](#registering-a-user) + - [Setting up a TURN server](#setting-up-a-turn-server) + - [URL previews](#url-previews) # Installing Synapse @@ -395,8 +395,9 @@ To configure Synapse to expose an HTTPS port, you will need to edit instance, if using certbot, use `fullchain.pem` as your certificate, not `cert.pem`). -For those of you upgrading your TLS certificate for Synapse 1.0 compliance, -please take a look at [our guide](docs/MSC1711_certificates_FAQ.md#configuring-certificates-for-compatibility-with-synapse-100). +For a more detailed guide to configuring your server for federation, see +[federate.md](docs/federate.md) + ## Email diff --git a/changelog.d/5419.doc b/changelog.d/5419.doc new file mode 100644 index 0000000000..74cf5eea8b --- /dev/null +++ b/changelog.d/5419.doc @@ -0,0 +1 @@ +Expand the federation guide to include relevant content from the MSC1711 FAQ diff --git a/docs/MSC1711_certificates_FAQ.md b/docs/MSC1711_certificates_FAQ.md index 599462bdcb..7f9a23ff31 100644 --- a/docs/MSC1711_certificates_FAQ.md +++ b/docs/MSC1711_certificates_FAQ.md @@ -1,5 +1,22 @@ # MSC1711 Certificates FAQ +## Historical Note +This document was originally written to guide server admins through the upgrade +path towards Synapse 1.0. Specifically, +[MSC1711](https://github.com/matrix-org/matrix-doc/blob/master/proposals/1711-x509-for-federation.md) +required that all servers present valid TLS certificates on their federation +API. Admins were encouraged to achieve compliance from version 0.99.0 (released +in February 2019) ahead of version 1.0 (released June 2019) enforcing the +certificate checks. + +Much of what follows is now outdated since most admins will have already +upgraded, however it may be of use to those with old installs returning to the +project. + +If you are setting up a server from scratch you almost certainly should look at +the [installation guide](INSTALL.md) instead. + +## Introduction The goal of Synapse 0.99.0 is to act as a stepping stone to Synapse 1.0.0. It supports the r0.1 release of the server to server specification, but is compatible with both the legacy Matrix federation behaviour (pre-r0.1) as well diff --git a/docs/federate.md b/docs/federate.md index b7fc09661c..6d6bb85e15 100644 --- a/docs/federate.md +++ b/docs/federate.md @@ -14,9 +14,9 @@ up and will work provided you set the ``server_name`` to match your machine's public DNS hostname, and provide Synapse with a TLS certificate which is valid for your ``server_name``. -Once you have completed the steps necessary to federate, you should be able to -join a room via federation. (A good place to start is ``#synapse:matrix.org`` - a -room for Synapse admins.) +Once federation has been configured, you should be able to join a room over +federation. A good place to start is ``#synapse:matrix.org`` - a room for +Synapse admins. ## Delegation @@ -98,6 +98,77 @@ _matrix._tcp.``. In our example, we would expect this: Note that the target of a SRV record cannot be an alias (CNAME record): it has to point directly to the server hosting the synapse instance. +### Delegation FAQ +#### When do I need a SRV record or .well-known URI? + +If your homeserver listens on the default federation port (8448), and your +`server_name` points to the host that your homeserver runs on, you do not need an SRV +record or `.well-known/matrix/server` URI. + +For instance, if you registered `example.com` and pointed its DNS A record at a +fresh server, you could install Synapse on that host, +giving it a `server_name` of `example.com`, and once [ACME](acme.md) support is enabled, +it would automatically generate a valid TLS certificate for you via Let's Encrypt +and no SRV record or .well-known URI would be needed. + +This is the common case, although you can add an SRV record or +`.well-known/matrix/server` URI for completeness if you wish. + +**However**, if your server does not listen on port 8448, or if your `server_name` +does not point to the host that your homeserver runs on, you will need to let +other servers know how to find it. The way to do this is via .well-known or an +SRV record. + +#### I have created a .well-known URI. Do I still need an SRV record? + +As of Synapse 0.99, Synapse will first check for the existence of a .well-known +URI and follow any delegation it suggests. It will only then check for the +existence of an SRV record. + +That means that the SRV record will often be redundant. However, you should +remember that there may still be older versions of Synapse in the federation +which do not understand .well-known URIs, so if you removed your SRV record +you would no longer be able to federate with them. + +It is therefore best to leave the SRV record in place for now. Synapse 0.34 and +earlier will follow the SRV record (and not care about the invalid +certificate). Synapse 0.99 and later will follow the .well-known URI, with the +correct certificate chain. + +#### Can I manage my own certificates rather than having Synapse renew certificates itself? + +Yes, you are welcome to manage your certificates yourself. Synapse will only +attempt to obtain certificates from Let's Encrypt if you configure it to do +so.The only requirement is that there is a valid TLS cert present for +federation end points. + +#### Do you still recommend against using a reverse proxy on the federation port? + +We no longer actively recommend against using a reverse proxy. Many admins will +find it easier to direct federation traffic to a reverse proxy and manage their +own TLS certificates, and this is a supported configuration. + +See [reverse_proxy.rst](reverse_proxy.rst) for information on setting up a +reverse proxy. + +#### Do I still need to give my TLS certificates to Synapse if I am using a reverse proxy? + +Practically speaking, this is no longer necessary. + +If you are using a reverse proxy for all of your TLS traffic, then you can set +`no_tls: True` in the Synapse config. In that case, the only reason Synapse +needs the certificate is to populate a legacy `tls_fingerprints` field in the +federation API. This is ignored by Synapse 0.99.0 and later, and the only time +pre-0.99 Synapses will check it is when attempting to fetch the server keys - +and generally this is delegated via `matrix.org`, which will be running a modern +version of Synapse. + +#### Do I need the same certificate for the client and federation port? + +No. There is nothing stopping you from using different certificates, +particularly if you are using a reverse proxy. However, Synapse will use the +same certificate on any ports where TLS is configured. + ## Troubleshooting You can use the [federation tester]( From a766c41d258b48ca1690723c1aa51684baa05e6a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 11:46:32 +0100 Subject: [PATCH 399/429] Bump bleach version so that tests can run on old deps. --- synapse/python_dependencies.py | 2 +- tests/push/test_email.py | 6 ------ tests/push/test_http.py | 6 ------ tests/rest/client/test_consent.py | 6 ------ tests/rest/client/v2_alpha/test_register.py | 6 ------ 5 files changed, 1 insertion(+), 25 deletions(-) diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 6efd81f204..7dfa78dadb 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -80,7 +80,7 @@ REQUIREMENTS = [ ] CONDITIONAL_REQUIREMENTS = { - "email": ["Jinja2>=2.9", "bleach>=1.4.2"], + "email": ["Jinja2>=2.9", "bleach>=1.4.3"], "matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"], # we use execute_batch, which arrived in psycopg 2.7. diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 9cdde1a9bd..9bc5f07de1 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -24,15 +24,9 @@ from synapse.rest.client.v1 import login, room from tests.unittest import HomeserverTestCase -try: - from synapse.push.mailer import load_jinja2_templates -except Exception: - load_jinja2_templates = None - class EmailPusherTests(HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/push/test_http.py b/tests/push/test_http.py index aba618b2be..22c3f73ef3 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -23,15 +23,9 @@ from synapse.util.logcontext import make_deferred_yieldable from tests.unittest import HomeserverTestCase -try: - from synapse.push.mailer import load_jinja2_templates -except Exception: - load_jinja2_templates = None - class HTTPPusherTests(HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index 88f8f1abdc..efc5a99db3 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -23,14 +23,8 @@ from synapse.rest.consent import consent_resource from tests import unittest from tests.server import render -try: - from synapse.push.mailer import load_jinja2_templates -except Exception: - load_jinja2_templates = None - class ConsentResourceTestCase(unittest.HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 0cb6a363d6..e9d8f3c734 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -30,11 +30,6 @@ from synapse.rest.client.v2_alpha import account_validity, register, sync from tests import unittest -try: - from synapse.push.mailer import load_jinja2_templates -except ImportError: - load_jinja2_templates = None - class RegisterRestServletTestCase(unittest.HomeserverTestCase): @@ -307,7 +302,6 @@ class AccountValidityTestCase(unittest.HomeserverTestCase): class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): - skip = "No Jinja installed" if not load_jinja2_templates else None servlets = [ register.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, From 97174780ce726962ca1beb3788b62f16e9fad270 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 11 Jun 2019 17:10:01 +0100 Subject: [PATCH 400/429] 1.0.0 --- CHANGES.md | 21 +++++++++++++++++++++ changelog.d/5418.bugfix | 1 - changelog.d/5419.doc | 1 - changelog.d/5424.misc | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 6 files changed, 28 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/5418.bugfix delete mode 100644 changelog.d/5419.doc delete mode 100644 changelog.d/5424.misc diff --git a/CHANGES.md b/CHANGES.md index f4a3ab71ca..1b827c8079 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,24 @@ +Synapse 1.0.0 (2019-06-11) +========================== + +Bugfixes +-------- + +- Fix bug where attempting to send transactions with large number of EDUs can fail. ([\#5418](https://github.com/matrix-org/synapse/issues/5418)) + + +Improved Documentation +---------------------- + +- Expand the federation guide to include relevant content from the MSC1711 FAQ ([\#5419](https://github.com/matrix-org/synapse/issues/5419)) + + +Internal Changes +---------------- + +- Move password reset links to /_matrix/client/unstable namespace. ([\#5424](https://github.com/matrix-org/synapse/issues/5424)) + + Synapse 1.0.0rc3 (2019-06-10) ============================= diff --git a/changelog.d/5418.bugfix b/changelog.d/5418.bugfix deleted file mode 100644 index 3fd4d2a882..0000000000 --- a/changelog.d/5418.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where attempting to send transactions with large number of EDUs can fail. diff --git a/changelog.d/5419.doc b/changelog.d/5419.doc deleted file mode 100644 index 74cf5eea8b..0000000000 --- a/changelog.d/5419.doc +++ /dev/null @@ -1 +0,0 @@ -Expand the federation guide to include relevant content from the MSC1711 FAQ diff --git a/changelog.d/5424.misc b/changelog.d/5424.misc deleted file mode 100644 index b92b50e317..0000000000 --- a/changelog.d/5424.misc +++ /dev/null @@ -1 +0,0 @@ -Move password reset links to /_matrix/client/unstable namespace. diff --git a/debian/changelog b/debian/changelog index 6a1a72c0e3..ef4edd7ac0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.0.0) stable; urgency=medium + + * New synapse release 1.0.0. + + -- Synapse Packaging team Tue, 11 Jun 2019 17:09:53 +0100 + matrix-synapse-py3 (0.99.5.2) stable; urgency=medium * New synapse release 0.99.5.2. diff --git a/synapse/__init__.py b/synapse/__init__.py index 9c75a0a27f..5bc24863d9 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -27,4 +27,4 @@ try: except ImportError: pass -__version__ = "1.0.0rc3" +__version__ = "1.0.0" From 09e9a26b7181e36af7e2a4a0795d68f962742738 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Wed, 12 Jun 2019 21:31:59 +1000 Subject: [PATCH 401/429] Remove Python 2.7 support. (#5425) * remove 2.7 from CI and publishing * fill out classifiers and also make it not be installed on 3.5 * some minor bumps so that the old deps work on python 3.5 --- .buildkite/docker-compose.py27.pg94.yaml | 21 ------- .buildkite/docker-compose.py27.pg95.yaml | 21 ------- .buildkite/pipeline.yml | 57 +------------------ .circleci/config.yml | 70 ------------------------ changelog.d/5425.removal | 1 + setup.py | 10 ++++ synapse/__init__.py | 7 +++ synapse/python_dependencies.py | 4 +- tox.ini | 4 +- 9 files changed, 25 insertions(+), 170 deletions(-) delete mode 100644 .buildkite/docker-compose.py27.pg94.yaml delete mode 100644 .buildkite/docker-compose.py27.pg95.yaml create mode 100644 changelog.d/5425.removal diff --git a/.buildkite/docker-compose.py27.pg94.yaml b/.buildkite/docker-compose.py27.pg94.yaml deleted file mode 100644 index 2d4b9eadd9..0000000000 --- a/.buildkite/docker-compose.py27.pg94.yaml +++ /dev/null @@ -1,21 +0,0 @@ -version: '3.1' - -services: - - postgres: - image: postgres:9.4 - environment: - POSTGRES_PASSWORD: postgres - - testenv: - image: python:2.7 - depends_on: - - postgres - env_file: .env - environment: - SYNAPSE_POSTGRES_HOST: postgres - SYNAPSE_POSTGRES_USER: postgres - SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /app - volumes: - - ..:/app diff --git a/.buildkite/docker-compose.py27.pg95.yaml b/.buildkite/docker-compose.py27.pg95.yaml deleted file mode 100644 index c6a41f1da0..0000000000 --- a/.buildkite/docker-compose.py27.pg95.yaml +++ /dev/null @@ -1,21 +0,0 @@ -version: '3.1' - -services: - - postgres: - image: postgres:9.5 - environment: - POSTGRES_PASSWORD: postgres - - testenv: - image: python:2.7 - depends_on: - - postgres - env_file: .env - environment: - SYNAPSE_POSTGRES_HOST: postgres - SYNAPSE_POSTGRES_USER: postgres - SYNAPSE_POSTGRES_PASSWORD: postgres - working_dir: /app - volumes: - - ..:/app diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 719f22b4e1..8eddf8b931 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -48,13 +48,13 @@ steps: - command: - "python -m pip install tox" - - "tox -e py27,codecov" - label: ":python: 2.7 / SQLite" + - "tox -e py35-old,codecov" + label: ":python: 3.5 / SQLite / Old Deps" env: TRIAL_FLAGS: "-j 2" plugins: - docker#v3.0.1: - image: "python:2.7" + image: "python:3.5" propagate-environment: true retry: automatic: @@ -114,57 +114,6 @@ steps: - exit_status: 2 limit: 2 - - command: - - "python -m pip install tox" - - "tox -e py27-old,codecov" - label: ":python: 2.7 / SQLite / Old Deps" - env: - TRIAL_FLAGS: "-j 2" - plugins: - - docker#v3.0.1: - image: "python:2.7" - propagate-environment: true - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: ":python: 2.7 / :postgres: 9.4" - env: - TRIAL_FLAGS: "-j 4" - command: - - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'" - plugins: - - docker-compose#v2.1.0: - run: testenv - config: - - .buildkite/docker-compose.py27.pg94.yaml - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - - label: ":python: 2.7 / :postgres: 9.5" - env: - TRIAL_FLAGS: "-j 4" - command: - - "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'" - plugins: - - docker-compose#v2.1.0: - run: testenv - config: - - .buildkite/docker-compose.py27.pg95.yaml - retry: - automatic: - - exit_status: -1 - limit: 2 - - exit_status: 2 - limit: 2 - - label: ":python: 3.5 / :postgres: 9.4" env: TRIAL_FLAGS: "-j 4" diff --git a/.circleci/config.yml b/.circleci/config.yml index 137747dae3..3c2b32c015 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -4,72 +4,18 @@ jobs: machine: true steps: - checkout - - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG}-py2 . - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:${CIRCLE_TAG} -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 . - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - run: docker push matrixdotorg/synapse:${CIRCLE_TAG} - - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py2 - run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3 dockerhubuploadlatest: machine: true steps: - checkout - - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest-py2 . - run: docker build -f docker/Dockerfile --label gitsha1=${CIRCLE_SHA1} -t matrixdotorg/synapse:latest -t matrixdotorg/synapse:latest-py3 --build-arg PYTHON_VERSION=3.6 . - run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD - run: docker push matrixdotorg/synapse:latest - - run: docker push matrixdotorg/synapse:latest-py2 - run: docker push matrixdotorg/synapse:latest-py3 - sytestpy2: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs - sytestpy2postgres: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: POSTGRES=1 /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs - sytestpy2merged: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: bash .circleci/merge_base_branch.sh - - run: /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs - sytestpy2postgresmerged: - docker: - - image: matrixdotorg/sytest-synapsepy2 - working_directory: /src - steps: - - checkout - - run: bash .circleci/merge_base_branch.sh - - run: POSTGRES=1 /synapse_sytest.sh - - store_artifacts: - path: /logs - destination: logs - - store_test_results: - path: /logs sytestpy3: docker: @@ -126,14 +72,6 @@ workflows: version: 2 build: jobs: - - sytestpy2: - filters: - branches: - only: /develop|master|release-.*/ - - sytestpy2postgres: - filters: - branches: - only: /develop|master|release-.*/ - sytestpy3: filters: branches: @@ -142,14 +80,6 @@ workflows: filters: branches: only: /develop|master|release-.*/ - - sytestpy2merged: - filters: - branches: - ignore: /develop|master|release-.*/ - - sytestpy2postgresmerged: - filters: - branches: - ignore: /develop|master|release-.*/ - sytestpy3merged: filters: branches: diff --git a/changelog.d/5425.removal b/changelog.d/5425.removal new file mode 100644 index 0000000000..30022ee63d --- /dev/null +++ b/changelog.d/5425.removal @@ -0,0 +1 @@ +Python 2.7 is no longer a supported platform. Synapse now requires Python 3.5+ to run. diff --git a/setup.py b/setup.py index 55663e9cac..3492cdc5a0 100755 --- a/setup.py +++ b/setup.py @@ -102,6 +102,16 @@ setup( include_package_data=True, zip_safe=False, long_description=long_description, + python_requires='~=3.5', + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Topic :: Communications :: Chat', + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 3 :: Only', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + ], scripts=["synctl"] + glob.glob("scripts/*"), cmdclass={'test': TestCommand}, ) diff --git a/synapse/__init__.py b/synapse/__init__.py index 5bc24863d9..0c01546789 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,6 +17,13 @@ """ This is a reference implementation of a Matrix home server. """ +import sys + +# Check that we're not running on an unsupported Python version. +if sys.version_info < (3, 5): + print("Synapse requires Python 3.5 or above.") + sys.exit(1) + try: from twisted.internet import protocol from twisted.internet.protocol import Factory diff --git a/synapse/python_dependencies.py b/synapse/python_dependencies.py index 7dfa78dadb..11ace2bfb1 100644 --- a/synapse/python_dependencies.py +++ b/synapse/python_dependencies.py @@ -44,7 +44,7 @@ REQUIREMENTS = [ "canonicaljson>=1.1.3", "signedjson>=1.0.0", "pynacl>=1.2.1", - "idna>=2", + "idna>=2.5", # validating SSL certs for IP addresses requires service_identity 18.1. "service_identity>=18.1.0", @@ -65,7 +65,7 @@ REQUIREMENTS = [ "sortedcontainers>=1.4.4", "psutil>=2.0.0", "pymacaroons>=0.13.0", - "msgpack>=0.5.0", + "msgpack>=0.5.2", "phonenumbers>=8.2.0", "six>=1.10", # prometheus_client 0.4.0 changed the format of counter metrics diff --git a/tox.ini b/tox.ini index 543b232ae7..0c4d562766 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = packaging, py27, py36, pep8, check_isort +envlist = packaging, py35, py36, py37, pep8, check_isort [base] deps = @@ -79,7 +79,7 @@ usedevelop=true # A test suite for the oldest supported versions of Python libraries, to catch # any uses of APIs not available in them. -[testenv:py27-old] +[testenv:py35-old] skip_install=True deps = # Old automat version for Twisted From 6312d6cc7c5bc80984758a70e2c368d8b4fb3bfd Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 13 Jun 2019 22:40:52 +1000 Subject: [PATCH 402/429] Expose statistics on extrems to prometheus (#5384) --- changelog.d/5384.feature | 1 + scripts/generate_signing_key.py | 2 +- synapse/metrics/__init__.py | 112 ++++++++++++++++++---- synapse/storage/events.py | 44 ++++++--- tests/storage/test_cleanup_extrems.py | 128 ++++++++++---------------- tests/storage/test_event_metrics.py | 97 +++++++++++++++++++ tests/unittest.py | 61 +++++++++++- 7 files changed, 331 insertions(+), 114 deletions(-) create mode 100644 changelog.d/5384.feature create mode 100644 tests/storage/test_event_metrics.py diff --git a/changelog.d/5384.feature b/changelog.d/5384.feature new file mode 100644 index 0000000000..9497f521c8 --- /dev/null +++ b/changelog.d/5384.feature @@ -0,0 +1 @@ +Statistics on forward extremities per room are now exposed via Prometheus. diff --git a/scripts/generate_signing_key.py b/scripts/generate_signing_key.py index ba3ba97395..36e9140b50 100755 --- a/scripts/generate_signing_key.py +++ b/scripts/generate_signing_key.py @@ -16,7 +16,7 @@ import argparse import sys -from signedjson.key import write_signing_keys, generate_signing_key +from signedjson.key import generate_signing_key, write_signing_keys from synapse.util.stringutils import random_string diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index ef48984fdd..539c353528 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -25,7 +25,7 @@ import six import attr from prometheus_client import Counter, Gauge, Histogram -from prometheus_client.core import REGISTRY, GaugeMetricFamily +from prometheus_client.core import REGISTRY, GaugeMetricFamily, HistogramMetricFamily from twisted.internet import reactor @@ -40,7 +40,6 @@ HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat") class RegistryProxy(object): - @staticmethod def collect(): for metric in REGISTRY.collect(): @@ -63,10 +62,7 @@ class LaterGauge(object): try: calls = self.caller() except Exception: - logger.exception( - "Exception running callback for LaterGauge(%s)", - self.name, - ) + logger.exception("Exception running callback for LaterGauge(%s)", self.name) yield g return @@ -116,9 +112,7 @@ class InFlightGauge(object): # Create a class which have the sub_metrics values as attributes, which # default to 0 on initialization. Used to pass to registered callbacks. self._metrics_class = attr.make_class( - "_MetricsEntry", - attrs={x: attr.ib(0) for x in sub_metrics}, - slots=True, + "_MetricsEntry", attrs={x: attr.ib(0) for x in sub_metrics}, slots=True ) # Counts number of in flight blocks for a given set of label values @@ -157,7 +151,9 @@ class InFlightGauge(object): Note: may be called by a separate thread. """ - in_flight = GaugeMetricFamily(self.name + "_total", self.desc, labels=self.labels) + in_flight = GaugeMetricFamily( + self.name + "_total", self.desc, labels=self.labels + ) metrics_by_key = {} @@ -179,7 +175,9 @@ class InFlightGauge(object): yield in_flight for name in self.sub_metrics: - gauge = GaugeMetricFamily("_".join([self.name, name]), "", labels=self.labels) + gauge = GaugeMetricFamily( + "_".join([self.name, name]), "", labels=self.labels + ) for key, metrics in six.iteritems(metrics_by_key): gauge.add_metric(key, getattr(metrics, name)) yield gauge @@ -193,12 +191,75 @@ class InFlightGauge(object): all_gauges[self.name] = self +@attr.s(hash=True) +class BucketCollector(object): + """ + Like a Histogram, but allows buckets to be point-in-time instead of + incrementally added to. + + Args: + name (str): Base name of metric to be exported to Prometheus. + data_collector (callable -> dict): A synchronous callable that + returns a dict mapping bucket to number of items in the + bucket. If these buckets are not the same as the buckets + given to this class, they will be remapped into them. + buckets (list[float]): List of floats/ints of the buckets to + give to Prometheus. +Inf is ignored, if given. + + """ + + name = attr.ib() + data_collector = attr.ib() + buckets = attr.ib() + + def collect(self): + + # Fetch the data -- this must be synchronous! + data = self.data_collector() + + buckets = {} + + res = [] + for x in data.keys(): + for i, bound in enumerate(self.buckets): + if x <= bound: + buckets[bound] = buckets.get(bound, 0) + data[x] + break + + for i in self.buckets: + res.append([i, buckets.get(i, 0)]) + + res.append(["+Inf", sum(data.values())]) + + metric = HistogramMetricFamily( + self.name, + "", + buckets=res, + sum_value=sum([x * y for x, y in data.items()]), + ) + yield metric + + def __attrs_post_init__(self): + self.buckets = [float(x) for x in self.buckets if x != "+Inf"] + if self.buckets != sorted(self.buckets): + raise ValueError("Buckets not sorted") + + self.buckets = tuple(self.buckets) + + if self.name in all_gauges.keys(): + logger.warning("%s already registered, reregistering" % (self.name,)) + REGISTRY.unregister(all_gauges.pop(self.name)) + + REGISTRY.register(self) + all_gauges[self.name] = self + + # # Detailed CPU metrics # -class CPUMetrics(object): +class CPUMetrics(object): def __init__(self): ticks_per_sec = 100 try: @@ -237,13 +298,28 @@ gc_time = Histogram( "python_gc_time", "Time taken to GC (sec)", ["gen"], - buckets=[0.0025, 0.005, 0.01, 0.025, 0.05, 0.10, 0.25, 0.50, 1.00, 2.50, - 5.00, 7.50, 15.00, 30.00, 45.00, 60.00], + buckets=[ + 0.0025, + 0.005, + 0.01, + 0.025, + 0.05, + 0.10, + 0.25, + 0.50, + 1.00, + 2.50, + 5.00, + 7.50, + 15.00, + 30.00, + 45.00, + 60.00, + ], ) class GCCounts(object): - def collect(self): cm = GaugeMetricFamily("python_gc_counts", "GC object counts", labels=["gen"]) for n, m in enumerate(gc.get_count()): @@ -279,9 +355,7 @@ sent_transactions_counter = Counter("synapse_federation_client_sent_transactions events_processed_counter = Counter("synapse_federation_client_events_processed", "") event_processing_loop_counter = Counter( - "synapse_event_processing_loop_count", - "Event processing loop iterations", - ["name"], + "synapse_event_processing_loop_count", "Event processing loop iterations", ["name"] ) event_processing_loop_room_count = Counter( @@ -311,7 +385,6 @@ last_ticked = time.time() class ReactorLastSeenMetric(object): - def collect(self): cm = GaugeMetricFamily( "python_twisted_reactor_last_seen", @@ -325,7 +398,6 @@ REGISTRY.register(ReactorLastSeenMetric()) def runUntilCurrentTimer(func): - @functools.wraps(func) def f(*args, **kwargs): now = reactor.seconds() diff --git a/synapse/storage/events.py b/synapse/storage/events.py index f9162be9b9..1578403f79 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -17,7 +17,7 @@ import itertools import logging -from collections import OrderedDict, deque, namedtuple +from collections import Counter as c_counter, OrderedDict, deque, namedtuple from functools import wraps from six import iteritems, text_type @@ -33,6 +33,7 @@ from synapse.api.constants import EventTypes from synapse.api.errors import SynapseError from synapse.events import EventBase # noqa: F401 from synapse.events.snapshot import EventContext # noqa: F401 +from synapse.metrics import BucketCollector from synapse.metrics.background_process_metrics import run_as_background_process from synapse.state import StateResolutionStore from synapse.storage.background_updates import BackgroundUpdateStore @@ -220,13 +221,38 @@ class EventsStore( EventsWorkerStore, BackgroundUpdateStore, ): - def __init__(self, db_conn, hs): super(EventsStore, self).__init__(db_conn, hs) self._event_persist_queue = _EventPeristenceQueue() self._state_resolution_handler = hs.get_state_resolution_handler() + # Collect metrics on the number of forward extremities that exist. + self._current_forward_extremities_amount = {} + + BucketCollector( + "synapse_forward_extremities", + lambda: self._current_forward_extremities_amount, + buckets=[1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"] + ) + + # Read the extrems every 60 minutes + hs.get_clock().looping_call(self._read_forward_extremities, 60 * 60 * 1000) + + @defer.inlineCallbacks + def _read_forward_extremities(self): + def fetch(txn): + txn.execute( + """ + select count(*) c from event_forward_extremities + group by room_id + """ + ) + return txn.fetchall() + + res = yield self.runInteraction("read_forward_extremities", fetch) + self._current_forward_extremities_amount = c_counter(list(x[0] for x in res)) + @defer.inlineCallbacks def persist_events(self, events_and_contexts, backfilled=False): """ @@ -568,17 +594,11 @@ class EventsStore( ) txn.execute(sql, batch) - results.extend( - r[0] - for r in txn - if not json.loads(r[1]).get("soft_failed") - ) + results.extend(r[0] for r in txn if not json.loads(r[1]).get("soft_failed")) for chunk in batch_iter(event_ids, 100): yield self.runInteraction( - "_get_events_which_are_prevs", - _get_events_which_are_prevs_txn, - chunk, + "_get_events_which_are_prevs", _get_events_which_are_prevs_txn, chunk ) defer.returnValue(results) @@ -640,9 +660,7 @@ class EventsStore( for chunk in batch_iter(event_ids, 100): yield self.runInteraction( - "_get_prevs_before_rejected", - _get_prevs_before_rejected_txn, - chunk, + "_get_prevs_before_rejected", _get_prevs_before_rejected_txn, chunk ) defer.returnValue(existing_prevs) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index 6aa8b8b3c6..f4c81ef77d 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -15,7 +15,6 @@ import os.path -from synapse.api.constants import EventTypes from synapse.storage import prepare_database from synapse.types import Requester, UserID @@ -23,17 +22,12 @@ from tests.unittest import HomeserverTestCase class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): - """Test the background update to clean forward extremities table. """ - def make_homeserver(self, reactor, clock): - # Hack until we understand why test_forked_graph_cleanup fails with v4 - config = self.default_config() - config['default_room_version'] = '1' - return self.setup_test_homeserver(config=config) + Test the background update to clean forward extremities table. + """ def prepare(self, reactor, clock, homeserver): self.store = homeserver.get_datastore() - self.event_creator = homeserver.get_event_creation_handler() self.room_creator = homeserver.get_room_creation_handler() # Create a test user and room @@ -42,56 +36,6 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): info = self.get_success(self.room_creator.create_room(self.requester, {})) self.room_id = info["room_id"] - def create_and_send_event(self, soft_failed=False, prev_event_ids=None): - """Create and send an event. - - Args: - soft_failed (bool): Whether to create a soft failed event or not - prev_event_ids (list[str]|None): Explicitly set the prev events, - or if None just use the default - - Returns: - str: The new event's ID. - """ - prev_events_and_hashes = None - if prev_event_ids: - prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] - - event, context = self.get_success( - self.event_creator.create_event( - self.requester, - { - "type": EventTypes.Message, - "room_id": self.room_id, - "sender": self.user.to_string(), - "content": {"body": "", "msgtype": "m.text"}, - }, - prev_events_and_hashes=prev_events_and_hashes, - ) - ) - - if soft_failed: - event.internal_metadata.soft_failed = True - - self.get_success( - self.event_creator.send_nonmember_event(self.requester, event, context) - ) - - return event.event_id - - def add_extremity(self, event_id): - """Add the given event as an extremity to the room. - """ - self.get_success( - self.store._simple_insert( - table="event_forward_extremities", - values={"room_id": self.room_id, "event_id": event_id}, - desc="test_add_extremity", - ) - ) - - self.store.get_latest_event_ids_in_room.invalidate((self.room_id,)) - def run_background_update(self): """Re run the background update to clean up the extremities. """ @@ -131,10 +75,16 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): """ # Create the room graph - event_id_1 = self.create_and_send_event() - event_id_2 = self.create_and_send_event(True, [event_id_1]) - event_id_3 = self.create_and_send_event(True, [event_id_2]) - event_id_4 = self.create_and_send_event(False, [event_id_3]) + event_id_1 = self.create_and_send_event(self.room_id, self.user) + event_id_2 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_1] + ) + event_id_3 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_2] + ) + event_id_4 = self.create_and_send_event( + self.room_id, self.user, False, [event_id_3] + ) # Check the latest events are as expected latest_event_ids = self.get_success( @@ -154,12 +104,16 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): Where SF* are soft failed, and with extremities of A and B """ # Create the room graph - event_id_a = self.create_and_send_event() - event_id_sf1 = self.create_and_send_event(True, [event_id_a]) - event_id_b = self.create_and_send_event(False, [event_id_sf1]) + event_id_a = self.create_and_send_event(self.room_id, self.user) + event_id_sf1 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a] + ) + event_id_b = self.create_and_send_event( + self.room_id, self.user, False, [event_id_sf1] + ) # Add the new extremity and check the latest events are as expected - self.add_extremity(event_id_a) + self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) @@ -185,13 +139,19 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): Where SF* are soft failed, and with extremities of A and B """ # Create the room graph - event_id_a = self.create_and_send_event() - event_id_sf1 = self.create_and_send_event(True, [event_id_a]) - event_id_sf2 = self.create_and_send_event(True, [event_id_sf1]) - event_id_b = self.create_and_send_event(False, [event_id_sf2]) + event_id_a = self.create_and_send_event(self.room_id, self.user) + event_id_sf1 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a] + ) + event_id_sf2 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_sf1] + ) + event_id_b = self.create_and_send_event( + self.room_id, self.user, False, [event_id_sf2] + ) # Add the new extremity and check the latest events are as expected - self.add_extremity(event_id_a) + self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) @@ -227,16 +187,26 @@ class CleanupExtremBackgroundUpdateStoreTestCase(HomeserverTestCase): """ # Create the room graph - event_id_a = self.create_and_send_event() - event_id_b = self.create_and_send_event() - event_id_sf1 = self.create_and_send_event(True, [event_id_a]) - event_id_sf2 = self.create_and_send_event(True, [event_id_a, event_id_b]) - event_id_sf3 = self.create_and_send_event(True, [event_id_sf1]) - self.create_and_send_event(True, [event_id_sf2, event_id_sf3]) # SF4 - event_id_c = self.create_and_send_event(False, [event_id_sf3]) + event_id_a = self.create_and_send_event(self.room_id, self.user) + event_id_b = self.create_and_send_event(self.room_id, self.user) + event_id_sf1 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a] + ) + event_id_sf2 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_a, event_id_b] + ) + event_id_sf3 = self.create_and_send_event( + self.room_id, self.user, True, [event_id_sf1] + ) + self.create_and_send_event( + self.room_id, self.user, True, [event_id_sf2, event_id_sf3] + ) # SF4 + event_id_c = self.create_and_send_event( + self.room_id, self.user, False, [event_id_sf3] + ) # Add the new extremity and check the latest events are as expected - self.add_extremity(event_id_a) + self.add_extremity(self.room_id, event_id_a) latest_event_ids = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id) diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py new file mode 100644 index 0000000000..20a068f1fc --- /dev/null +++ b/tests/storage/test_event_metrics.py @@ -0,0 +1,97 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.metrics import REGISTRY +from synapse.types import Requester, UserID + +from tests.unittest import HomeserverTestCase + + +class ExtremStatisticsTestCase(HomeserverTestCase): + def test_exposed_to_prometheus(self): + """ + Forward extremity counts are exposed via Prometheus. + """ + room_creator = self.hs.get_room_creation_handler() + + user = UserID("alice", "test") + requester = Requester(user, None, False, None, None) + + # Real events, forward extremities + events = [(3, 2), (6, 2), (4, 6)] + + for event_count, extrems in events: + info = self.get_success(room_creator.create_room(requester, {})) + room_id = info["room_id"] + + last_event = None + + # Make a real event chain + for i in range(event_count): + ev = self.create_and_send_event(room_id, user, False, last_event) + last_event = [ev] + + # Sprinkle in some extremities + for i in range(extrems): + ev = self.create_and_send_event(room_id, user, False, last_event) + + # Let it run for a while, then pull out the statistics from the + # Prometheus client registry + self.reactor.advance(60 * 60 * 1000) + self.pump(1) + + items = list( + filter( + lambda x: x.name == "synapse_forward_extremities", + list(REGISTRY.collect()), + ) + ) + + # Check the values are what we want + buckets = {} + _count = 0 + _sum = 0 + + for i in items[0].samples: + if i[0].endswith("_bucket"): + buckets[i[1]['le']] = i[2] + elif i[0].endswith("_count"): + _count = i[2] + elif i[0].endswith("_sum"): + _sum = i[2] + + # 3 buckets, 2 with 2 extrems, 1 with 6 extrems (bucketed as 7), and + # +Inf which is all + self.assertEqual( + buckets, + { + 1.0: 0, + 2.0: 2, + 3.0: 0, + 5.0: 0, + 7.0: 1, + 10.0: 0, + 15.0: 0, + 20.0: 0, + 50.0: 0, + 100.0: 0, + 200.0: 0, + 500.0: 0, + "+Inf": 3, + }, + ) + # 3 rooms, with 10 total events + self.assertEqual(_count, 3) + self.assertEqual(_sum, 10) diff --git a/tests/unittest.py b/tests/unittest.py index 7dbb64af59..b6dc7932ce 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -27,11 +27,12 @@ import twisted.logger from twisted.internet.defer import Deferred from twisted.trial import unittest +from synapse.api.constants import EventTypes from synapse.config.homeserver import HomeServerConfig from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest from synapse.server import HomeServer -from synapse.types import UserID, create_requester +from synapse.types import Requester, UserID, create_requester from synapse.util.logcontext import LoggingContext from tests.server import get_clock, make_request, render, setup_test_homeserver @@ -442,6 +443,64 @@ class HomeserverTestCase(TestCase): access_token = channel.json_body["access_token"] return access_token + def create_and_send_event( + self, room_id, user, soft_failed=False, prev_event_ids=None + ): + """ + Create and send an event. + + Args: + soft_failed (bool): Whether to create a soft failed event or not + prev_event_ids (list[str]|None): Explicitly set the prev events, + or if None just use the default + + Returns: + str: The new event's ID. + """ + event_creator = self.hs.get_event_creation_handler() + secrets = self.hs.get_secrets() + requester = Requester(user, None, False, None, None) + + prev_events_and_hashes = None + if prev_event_ids: + prev_events_and_hashes = [[p, {}, 0] for p in prev_event_ids] + + event, context = self.get_success( + event_creator.create_event( + requester, + { + "type": EventTypes.Message, + "room_id": room_id, + "sender": user.to_string(), + "content": {"body": secrets.token_hex(), "msgtype": "m.text"}, + }, + prev_events_and_hashes=prev_events_and_hashes, + ) + ) + + if soft_failed: + event.internal_metadata.soft_failed = True + + self.get_success( + event_creator.send_nonmember_event(requester, event, context) + ) + + return event.event_id + + def add_extremity(self, room_id, event_id): + """ + Add the given event as an extremity to the room. + """ + self.get_success( + self.hs.get_datastore()._simple_insert( + table="event_forward_extremities", + values={"room_id": room_id, "event_id": event_id}, + desc="test_add_extremity", + ) + ) + + self.hs.get_datastore().get_latest_event_ids_in_room.invalidate((room_id,)) + def attempt_wrong_password_login(self, username, password): """Attempts to login as the user with the given password, asserting that the attempt *fails*. From 5c15039e065d710459dac9e558c8ec94edf7b6c4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 13 Jun 2019 13:52:08 +0100 Subject: [PATCH 403/429] Clean up code for sending federation EDUs. (#5381) This code confused the hell out of me today. Split _get_new_device_messages into its two (unrelated) parts. --- changelog.d/5381.misc | 1 + .../sender/per_destination_queue.py | 40 ++++++++++++------- 2 files changed, 27 insertions(+), 14 deletions(-) create mode 100644 changelog.d/5381.misc diff --git a/changelog.d/5381.misc b/changelog.d/5381.misc new file mode 100644 index 0000000000..bbf70a0445 --- /dev/null +++ b/changelog.d/5381.misc @@ -0,0 +1 @@ +Clean up code for sending federation EDUs. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 564c57203d..22a2735405 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -189,11 +189,21 @@ class PerDestinationQueue(object): pending_pdus = [] while True: - device_message_edus, device_stream_id, dev_list_id = ( - # We have to keep 2 free slots for presence and rr_edus - yield self._get_new_device_messages(MAX_EDUS_PER_TRANSACTION - 2) + # We have to keep 2 free slots for presence and rr_edus + limit = MAX_EDUS_PER_TRANSACTION - 2 + + device_update_edus, dev_list_id = ( + yield self._get_device_update_edus(limit) ) + limit -= len(device_update_edus) + + to_device_edus, device_stream_id = ( + yield self._get_to_device_message_edus(limit) + ) + + pending_edus = device_update_edus + to_device_edus + # BEGIN CRITICAL SECTION # # In order to avoid a race condition, we need to make sure that @@ -208,10 +218,6 @@ class PerDestinationQueue(object): # We can only include at most 50 PDUs per transactions pending_pdus, self._pending_pdus = pending_pdus[:50], pending_pdus[50:] - pending_edus = [] - - # We can only include at most 100 EDUs per transactions - # rr_edus and pending_presence take at most one slot each pending_edus.extend(self._get_rr_edus(force_flush=False)) pending_presence = self._pending_presence self._pending_presence = {} @@ -232,7 +238,6 @@ class PerDestinationQueue(object): ) ) - pending_edus.extend(device_message_edus) pending_edus.extend( self._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus)) ) @@ -272,10 +277,13 @@ class PerDestinationQueue(object): sent_edus_by_type.labels(edu.edu_type).inc() # Remove the acknowledged device messages from the database # Only bother if we actually sent some device messages - if device_message_edus: + if to_device_edus: yield self._store.delete_device_msgs_for_remote( self._destination, device_stream_id ) + + # also mark the device updates as sent + if device_update_edus: logger.info( "Marking as sent %r %r", self._destination, dev_list_id ) @@ -347,7 +355,7 @@ class PerDestinationQueue(object): return pending_edus @defer.inlineCallbacks - def _get_new_device_messages(self, limit): + def _get_device_update_edus(self, limit): last_device_list = self._last_device_list_stream_id # Retrieve list of new device updates to send to the destination @@ -366,15 +374,19 @@ class PerDestinationQueue(object): assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs" + defer.returnValue((edus, now_stream_id)) + + @defer.inlineCallbacks + def _get_to_device_message_edus(self, limit): last_device_stream_id = self._last_device_stream_id to_device_stream_id = self._store.get_to_device_stream_token() contents, stream_id = yield self._store.get_new_device_msgs_for_remote( self._destination, last_device_stream_id, to_device_stream_id, - limit - len(edus), + limit, ) - edus.extend( + edus = [ Edu( origin=self._server_name, destination=self._destination, @@ -382,6 +394,6 @@ class PerDestinationQueue(object): content=content, ) for content in contents - ) + ] - defer.returnValue((edus, stream_id, now_stream_id)) + defer.returnValue((edus, stream_id)) From b59a4eba644d123fce03809ead2121e9e0da6645 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 13 Jun 2019 14:49:25 +0100 Subject: [PATCH 404/429] Updates to the federation_client script (#5447) * py3 fixes for federation_client * .well-known support for federation_client --- changelog.d/5447.misc | 1 + scripts-dev/federation_client.py | 43 ++++++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 5 deletions(-) create mode 100644 changelog.d/5447.misc diff --git a/changelog.d/5447.misc b/changelog.d/5447.misc new file mode 100644 index 0000000000..dd52068404 --- /dev/null +++ b/changelog.d/5447.misc @@ -0,0 +1 @@ +Update federation_client dev script to support `.well-known` and work with python3. diff --git a/scripts-dev/federation_client.py b/scripts-dev/federation_client.py index e0287c8c6c..41e7b24418 100755 --- a/scripts-dev/federation_client.py +++ b/scripts-dev/federation_client.py @@ -21,7 +21,8 @@ import argparse import base64 import json import sys -from urlparse import urlparse, urlunparse + +from six.moves.urllib import parse as urlparse import nacl.signing import requests @@ -145,7 +146,7 @@ def request_json(method, origin_name, origin_key, destination, path, content): for key, sig in signed_json["signatures"][origin_name].items(): header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig) - authorization_headers.append(bytes(header)) + authorization_headers.append(header.encode("ascii")) print("Authorization: %s" % header, file=sys.stderr) dest = "matrix://%s%s" % (destination, path) @@ -250,7 +251,7 @@ def read_args_from_config(args): class MatrixConnectionAdapter(HTTPAdapter): @staticmethod - def lookup(s): + def lookup(s, skip_well_known=False): if s[-1] == ']': # ipv6 literal (with no port) return s, 8448 @@ -263,19 +264,51 @@ class MatrixConnectionAdapter(HTTPAdapter): raise ValueError("Invalid host:port '%s'" % s) return out[0], port + # try a .well-known lookup + if not skip_well_known: + well_known = MatrixConnectionAdapter.get_well_known(s) + if well_known: + return MatrixConnectionAdapter.lookup( + well_known, skip_well_known=True + ) + try: srv = srvlookup.lookup("matrix", "tcp", s)[0] return srv.host, srv.port except Exception: return s, 8448 + @staticmethod + def get_well_known(server_name): + uri = "https://%s/.well-known/matrix/server" % (server_name, ) + print("fetching %s" % (uri, ), file=sys.stderr) + + try: + resp = requests.get(uri) + if resp.status_code != 200: + print("%s gave %i" % (uri, resp.status_code), file=sys.stderr) + return None + + parsed_well_known = resp.json() + if not isinstance(parsed_well_known, dict): + raise Exception("not a dict") + if "m.server" not in parsed_well_known: + raise Exception("Missing key 'm.server'") + new_name = parsed_well_known['m.server'] + print("well-known lookup gave %s" % (new_name, ), file=sys.stderr) + return new_name + + except Exception as e: + print("Invalid response from %s: %s" % (uri, e, ), file=sys.stderr) + return None + def get_connection(self, url, proxies=None): - parsed = urlparse(url) + parsed = urlparse.urlparse(url) (host, port) = self.lookup(parsed.netloc) netloc = "%s:%d" % (host, port) print("Connecting to %s" % (netloc,), file=sys.stderr) - url = urlunparse( + url = urlparse.urlunparse( ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) ) return super(MatrixConnectionAdapter, self).get_connection(url, proxies) From 4f68188d0bbdb1966250375d34125572eb82a117 Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Thu, 13 Jun 2019 16:42:36 +0100 Subject: [PATCH 405/429] Change to absolute path for contrib/docker because this file is reproduced on dockerhub and relative paths don't work --- docker/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/README.md b/docker/README.md index df5d0151e2..5a596eecb9 100644 --- a/docker/README.md +++ b/docker/README.md @@ -14,7 +14,7 @@ This image is designed to run either with an automatically generated configuration file or with a custom configuration that requires manual editing. An easy way to make use of this image is via docker-compose. See the -[contrib/docker](../contrib/docker) section of the synapse project for +[contrib/docker](https://github.com/matrix-org/synapse/tree/master/contrib/docker) section of the synapse project for examples. ### Without Compose (harder) From a10c8dae85d3706afbab588e1004350aa5b49539 Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 14 Jun 2019 21:09:33 +1000 Subject: [PATCH 406/429] fix prometheus rendering error --- synapse/metrics/__init__.py | 2 +- tests/storage/test_event_metrics.py | 61 +++++++++++------------------ 2 files changed, 24 insertions(+), 39 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 539c353528..0d3ae1a43d 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -227,7 +227,7 @@ class BucketCollector(object): break for i in self.buckets: - res.append([i, buckets.get(i, 0)]) + res.append([str(i), buckets.get(i, 0)]) res.append(["+Inf", sum(data.values())]) diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 20a068f1fc..1655fcdafc 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from prometheus_client.exposition import generate_latest + from synapse.metrics import REGISTRY from synapse.types import Requester, UserID @@ -52,46 +54,29 @@ class ExtremStatisticsTestCase(HomeserverTestCase): self.reactor.advance(60 * 60 * 1000) self.pump(1) - items = list( + items = set( filter( - lambda x: x.name == "synapse_forward_extremities", - list(REGISTRY.collect()), + lambda x: b"synapse_forward_extremities_" in x, + generate_latest(REGISTRY).split(b"\n"), ) ) - # Check the values are what we want - buckets = {} - _count = 0 - _sum = 0 + expected = set([ + b'synapse_forward_extremities_bucket{le="1.0"} 0.0', + b'synapse_forward_extremities_bucket{le="2.0"} 2.0', + b'synapse_forward_extremities_bucket{le="3.0"} 0.0', + b'synapse_forward_extremities_bucket{le="5.0"} 0.0', + b'synapse_forward_extremities_bucket{le="7.0"} 1.0', + b'synapse_forward_extremities_bucket{le="10.0"} 0.0', + b'synapse_forward_extremities_bucket{le="15.0"} 0.0', + b'synapse_forward_extremities_bucket{le="20.0"} 0.0', + b'synapse_forward_extremities_bucket{le="50.0"} 0.0', + b'synapse_forward_extremities_bucket{le="100.0"} 0.0', + b'synapse_forward_extremities_bucket{le="200.0"} 0.0', + b'synapse_forward_extremities_bucket{le="500.0"} 0.0', + b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', + b'synapse_forward_extremities_count 3.0', + b'synapse_forward_extremities_sum 10.0', + ]) - for i in items[0].samples: - if i[0].endswith("_bucket"): - buckets[i[1]['le']] = i[2] - elif i[0].endswith("_count"): - _count = i[2] - elif i[0].endswith("_sum"): - _sum = i[2] - - # 3 buckets, 2 with 2 extrems, 1 with 6 extrems (bucketed as 7), and - # +Inf which is all - self.assertEqual( - buckets, - { - 1.0: 0, - 2.0: 2, - 3.0: 0, - 5.0: 0, - 7.0: 1, - 10.0: 0, - 15.0: 0, - 20.0: 0, - 50.0: 0, - 100.0: 0, - 200.0: 0, - 500.0: 0, - "+Inf": 3, - }, - ) - # 3 rooms, with 10 total events - self.assertEqual(_count, 3) - self.assertEqual(_sum, 10) + self.assertEqual(items, expected) From b2a6f90a672174c0c0f815b1e0843c02455b774d Mon Sep 17 00:00:00 2001 From: "Amber H. Brown" Date: Fri, 14 Jun 2019 21:10:21 +1000 Subject: [PATCH 407/429] changelog --- changelog.d/5458.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5458.feature diff --git a/changelog.d/5458.feature b/changelog.d/5458.feature new file mode 100644 index 0000000000..9497f521c8 --- /dev/null +++ b/changelog.d/5458.feature @@ -0,0 +1 @@ +Statistics on forward extremities per room are now exposed via Prometheus. From d8db29c4818829df7a887cedf40b1e2ac49631e7 Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Fri, 14 Jun 2019 13:03:46 +0100 Subject: [PATCH 408/429] Use python3 in the demo --- demo/start.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/demo/start.sh b/demo/start.sh index c4a1328a6f..5c3a8fe61f 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -21,7 +21,7 @@ for port in 8080 8081 8082; do pushd demo/$port #rm $DIR/etc/$port.config - python -m synapse.app.homeserver \ + python3 -m synapse.app.homeserver \ --generate-config \ -H "localhost:$https_port" \ --config-path "$DIR/etc/$port.config" \ @@ -55,7 +55,7 @@ for port in 8080 8081 8082; do echo "report_stats: false" >> $DIR/etc/$port.config fi - python -m synapse.app.homeserver \ + python3 -m synapse.app.homeserver \ --config-path "$DIR/etc/$port.config" \ -D \ -vv \ From d0530382eeff053547304532167c0e4654af172c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 13:18:24 +0100 Subject: [PATCH 409/429] Track deactivated accounts in the database (#5378) --- changelog.d/5378.misc | 1 + synapse/handlers/deactivate_account.py | 4 + synapse/storage/registration.py | 114 ++++++++++++++++++ .../delta/55/users_alter_deactivated.sql | 19 +++ tests/rest/client/v2_alpha/test_account.py | 45 +++++++ 5 files changed, 183 insertions(+) create mode 100644 changelog.d/5378.misc create mode 100644 synapse/storage/schema/delta/55/users_alter_deactivated.sql diff --git a/changelog.d/5378.misc b/changelog.d/5378.misc new file mode 100644 index 0000000000..365e49d634 --- /dev/null +++ b/changelog.d/5378.misc @@ -0,0 +1 @@ +Track deactivated accounts in the database. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 6a91f7698e..b29089d82c 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -114,6 +115,9 @@ class DeactivateAccountHandler(BaseHandler): # parts users from rooms (if it isn't already running) self._start_user_parting() + # Mark the user as deactivated. + yield self.store.set_user_deactivated_status(user_id, True) + defer.returnValue(identity_server_supports_unbinding) def _start_user_parting(self): diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 1dd1182e82..4c5751b57f 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import logging import re from six import iterkeys @@ -31,6 +32,8 @@ from synapse.util.caches.descriptors import cached, cachedInlineCallbacks THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 +logger = logging.getLogger(__name__) + class RegistrationWorkerStore(SQLBaseStore): def __init__(self, db_conn, hs): @@ -598,11 +601,75 @@ class RegistrationStore( "user_threepids_grandfather", self._bg_user_threepids_grandfather, ) + self.register_background_update_handler( + "users_set_deactivated_flag", self._backgroud_update_set_deactivated_flag, + ) + # Create a background job for culling expired 3PID validity tokens hs.get_clock().looping_call( self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS, ) + @defer.inlineCallbacks + def _backgroud_update_set_deactivated_flag(self, progress, batch_size): + """Retrieves a list of all deactivated users and sets the 'deactivated' flag to 1 + for each of them. + """ + + last_user = progress.get("user_id", "") + + def _backgroud_update_set_deactivated_flag_txn(txn): + txn.execute( + """ + SELECT + users.name, + COUNT(access_tokens.token) AS count_tokens, + COUNT(user_threepids.address) AS count_threepids + FROM users + LEFT JOIN access_tokens ON (access_tokens.user_id = users.name) + LEFT JOIN user_threepids ON (user_threepids.user_id = users.name) + WHERE password_hash IS NULL OR password_hash = '' + AND users.name > ? + GROUP BY users.name + ORDER BY users.name ASC + LIMIT ?; + """, + (last_user, batch_size), + ) + + rows = self.cursor_to_dict(txn) + + if not rows: + return True + + rows_processed_nb = 0 + + for user in rows: + if not user["count_tokens"] and not user["count_threepids"]: + self.set_user_deactivated_status_txn(txn, user["user_id"], True) + rows_processed_nb += 1 + + logger.info("Marked %d rows as deactivated", rows_processed_nb) + + self._background_update_progress_txn( + txn, "users_set_deactivated_flag", {"user_id": rows[-1]["user_id"]} + ) + + if batch_size > len(rows): + return True + else: + return False + + end = yield self.runInteraction( + "users_set_deactivated_flag", + _backgroud_update_set_deactivated_flag_txn, + ) + + if end: + yield self._end_background_update("users_set_deactivated_flag") + + defer.returnValue(batch_size) + @defer.inlineCallbacks def add_access_token_to_user(self, user_id, token, device_id=None): """Adds an access token for the given user. @@ -1268,3 +1335,50 @@ class RegistrationStore( "delete_threepid_session", delete_threepid_session_txn, ) + + def set_user_deactivated_status_txn(self, txn, user_id, deactivated): + self._simple_update_one_txn( + txn=txn, + table="users", + keyvalues={"name": user_id}, + updatevalues={"deactivated": 1 if deactivated else 0}, + ) + self._invalidate_cache_and_stream( + txn, self.get_user_deactivated_status, (user_id,), + ) + + @defer.inlineCallbacks + def set_user_deactivated_status(self, user_id, deactivated): + """Set the `deactivated` property for the provided user to the provided value. + + Args: + user_id (str): The ID of the user to set the status for. + deactivated (bool): The value to set for `deactivated`. + """ + + yield self.runInteraction( + "set_user_deactivated_status", + self.set_user_deactivated_status_txn, + user_id, deactivated, + ) + + @cachedInlineCallbacks() + def get_user_deactivated_status(self, user_id): + """Retrieve the value for the `deactivated` property for the provided user. + + Args: + user_id (str): The ID of the user to retrieve the status for. + + Returns: + defer.Deferred(bool): The requested value. + """ + + res = yield self._simple_select_one_onecol( + table="users", + keyvalues={"name": user_id}, + retcol="deactivated", + desc="get_user_deactivated_status", + ) + + # Convert the integer into a boolean. + defer.returnValue(res == 1) diff --git a/synapse/storage/schema/delta/55/users_alter_deactivated.sql b/synapse/storage/schema/delta/55/users_alter_deactivated.sql new file mode 100644 index 0000000000..dabdde489b --- /dev/null +++ b/synapse/storage/schema/delta/55/users_alter_deactivated.sql @@ -0,0 +1,19 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE users ADD deactivated SMALLINT DEFAULT 0 NOT NULL; + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('users_set_deactivated_flag', '{}'); diff --git a/tests/rest/client/v2_alpha/test_account.py b/tests/rest/client/v2_alpha/test_account.py index 0d1c0868ce..a60a4a3b87 100644 --- a/tests/rest/client/v2_alpha/test_account.py +++ b/tests/rest/client/v2_alpha/test_account.py @@ -15,6 +15,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import json import os import re from email.parser import Parser @@ -239,3 +240,47 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): ) self.render(request) self.assertEquals(expected_code, channel.code, channel.result) + + +class DeactivateTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + login.register_servlets, + account.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + hs = self.setup_test_homeserver() + return hs + + def test_deactivate_account(self): + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + + request_data = json.dumps({ + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "test", + }, + "erase": False, + }) + request, channel = self.make_request( + "POST", + "account/deactivate", + request_data, + access_token=tok, + ) + self.render(request) + self.assertEqual(request.code, 200) + + store = self.hs.get_datastore() + + # Check that the user has been marked as deactivated. + self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id))) + + # Check that this access token has been invalidated. + request, channel = self.make_request("GET", "account/whoami") + self.render(request) + self.assertEqual(request.code, 401) From 3ed595e327aee6d45ed0371c98e828d724c26b2d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 14:07:32 +0100 Subject: [PATCH 410/429] Prometheus histograms are cumalative --- synapse/metrics/__init__.py | 1 - synapse/storage/events.py | 3 ++- tests/storage/test_event_metrics.py | 20 ++++++++++---------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index 0d3ae1a43d..8aee14a8a8 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -224,7 +224,6 @@ class BucketCollector(object): for i, bound in enumerate(self.buckets): if x <= bound: buckets[bound] = buckets.get(bound, 0) + data[x] - break for i in self.buckets: res.append([str(i), buckets.get(i, 0)]) diff --git a/synapse/storage/events.py b/synapse/storage/events.py index 1578403f79..f631fb1733 100644 --- a/synapse/storage/events.py +++ b/synapse/storage/events.py @@ -228,7 +228,8 @@ class EventsStore( self._state_resolution_handler = hs.get_state_resolution_handler() # Collect metrics on the number of forward extremities that exist. - self._current_forward_extremities_amount = {} + # Counter of number of extremities to count + self._current_forward_extremities_amount = c_counter() BucketCollector( "synapse_forward_extremities", diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index 1655fcdafc..19f9ccf5e0 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -64,16 +64,16 @@ class ExtremStatisticsTestCase(HomeserverTestCase): expected = set([ b'synapse_forward_extremities_bucket{le="1.0"} 0.0', b'synapse_forward_extremities_bucket{le="2.0"} 2.0', - b'synapse_forward_extremities_bucket{le="3.0"} 0.0', - b'synapse_forward_extremities_bucket{le="5.0"} 0.0', - b'synapse_forward_extremities_bucket{le="7.0"} 1.0', - b'synapse_forward_extremities_bucket{le="10.0"} 0.0', - b'synapse_forward_extremities_bucket{le="15.0"} 0.0', - b'synapse_forward_extremities_bucket{le="20.0"} 0.0', - b'synapse_forward_extremities_bucket{le="50.0"} 0.0', - b'synapse_forward_extremities_bucket{le="100.0"} 0.0', - b'synapse_forward_extremities_bucket{le="200.0"} 0.0', - b'synapse_forward_extremities_bucket{le="500.0"} 0.0', + b'synapse_forward_extremities_bucket{le="3.0"} 2.0', + b'synapse_forward_extremities_bucket{le="5.0"} 2.0', + b'synapse_forward_extremities_bucket{le="7.0"} 3.0', + b'synapse_forward_extremities_bucket{le="10.0"} 3.0', + b'synapse_forward_extremities_bucket{le="15.0"} 3.0', + b'synapse_forward_extremities_bucket{le="20.0"} 3.0', + b'synapse_forward_extremities_bucket{le="50.0"} 3.0', + b'synapse_forward_extremities_bucket{le="100.0"} 3.0', + b'synapse_forward_extremities_bucket{le="200.0"} 3.0', + b'synapse_forward_extremities_bucket{le="500.0"} 3.0', b'synapse_forward_extremities_bucket{le="+Inf"} 3.0', b'synapse_forward_extremities_count 3.0', b'synapse_forward_extremities_sum 10.0', From cc7cc853b1b9da283f2568243df49a97116bc61b Mon Sep 17 00:00:00 2001 From: Jorik Schellekens Date: Fri, 14 Jun 2019 14:07:47 +0100 Subject: [PATCH 411/429] Changelog --- changelog.d/5460.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5460.misc diff --git a/changelog.d/5460.misc b/changelog.d/5460.misc new file mode 100644 index 0000000000..badc8bb79a --- /dev/null +++ b/changelog.d/5460.misc @@ -0,0 +1 @@ +Demo script now uses python3. From 9fd4f83f1a31abeae8d110f87fbd257608caa2e2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 14:19:37 +0100 Subject: [PATCH 412/429] Newsfile --- changelog.d/5461.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5461.feature diff --git a/changelog.d/5461.feature b/changelog.d/5461.feature new file mode 100644 index 0000000000..9497f521c8 --- /dev/null +++ b/changelog.d/5461.feature @@ -0,0 +1 @@ +Statistics on forward extremities per room are now exposed via Prometheus. From 6d56a694f4cbfaf9c57a56837d4170e6c6783f3c Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 7 Jun 2019 15:30:54 +0100 Subject: [PATCH 413/429] Don't send renewal emails to deactivated users --- changelog.d/5394.bugfix | 1 + synapse/handlers/account_validity.py | 3 + synapse/handlers/deactivate_account.py | 6 ++ synapse/storage/_base.py | 4 +- synapse/storage/registration.py | 14 +++++ tests/rest/client/v2_alpha/test_register.py | 67 +++++++++++++-------- 6 files changed, 68 insertions(+), 27 deletions(-) create mode 100644 changelog.d/5394.bugfix diff --git a/changelog.d/5394.bugfix b/changelog.d/5394.bugfix new file mode 100644 index 0000000000..2ad9fbe82c --- /dev/null +++ b/changelog.d/5394.bugfix @@ -0,0 +1 @@ +Fix a bug where deactivated users could receive renewal emails if the account validity feature is on. diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 261446517d..5e0b92eb1c 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -110,6 +110,9 @@ class AccountValidityHandler(object): # Stop right here if the user doesn't have at least one email address. # In this case, they will have to ask their server admin to renew their # account manually. + # We don't need to do a specific check to make sure the account isn't + # deactivated, as a deactivated account isn't supposed to have any + # email address attached to it. if not addresses: return diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index b29089d82c..7378b56c1d 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -43,6 +43,8 @@ class DeactivateAccountHandler(BaseHandler): # it left off (if it has work left to do). hs.get_reactor().callWhenRunning(self._start_user_parting) + self._account_validity_enabled = hs.config.account_validity.enabled + @defer.inlineCallbacks def deactivate_account(self, user_id, erase_data, id_server=None): """Deactivate a user's account @@ -115,6 +117,10 @@ class DeactivateAccountHandler(BaseHandler): # parts users from rooms (if it isn't already running) self._start_user_parting() + # Remove all information on the user from the account_validity table. + if self._account_validity_enabled: + yield self.store.delete_account_validity_for_user(user_id) + # Mark the user as deactivated. yield self.store.set_user_deactivated_status(user_id, True) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index ae891aa332..941c07fce5 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -299,12 +299,12 @@ class SQLBaseStore(object): def select_users_with_no_expiration_date_txn(txn): """Retrieves the list of registered users with no expiration date from the - database. + database, filtering out deactivated users. """ sql = ( "SELECT users.name FROM users" " LEFT JOIN account_validity ON (users.name = account_validity.user_id)" - " WHERE account_validity.user_id is NULL;" + " WHERE account_validity.user_id is NULL AND users.deactivated = 0;" ) txn.execute(sql, []) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 4c5751b57f..9f910eac9c 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -251,6 +251,20 @@ class RegistrationWorkerStore(SQLBaseStore): desc="set_renewal_mail_status", ) + @defer.inlineCallbacks + def delete_account_validity_for_user(self, user_id): + """Deletes the entry for the given user in the account validity table, removing + their expiration date and renewal token. + + Args: + user_id (str): ID of the user to remove from the account validity table. + """ + yield self._simple_delete_one( + table="account_validity", + keyvalues={"user_id": user_id}, + desc="delete_account_validity_for_user", + ) + @defer.inlineCallbacks def is_server_admin(self, user): res = yield self._simple_select_one_onecol( diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index 8536e6777a..b35b215446 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -26,7 +26,7 @@ from synapse.api.constants import LoginType from synapse.api.errors import Codes from synapse.appservice import ApplicationService from synapse.rest.client.v1 import login -from synapse.rest.client.v2_alpha import account_validity, register, sync +from synapse.rest.client.v2_alpha import account, account_validity, register, sync from tests import unittest @@ -308,6 +308,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): login.register_servlets, sync.register_servlets, account_validity.register_servlets, + account.register_servlets, ] def make_homeserver(self, reactor, clock): @@ -358,20 +359,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): def test_renewal_email(self): self.email_attempts = [] - user_id = self.register_user("kermit", "monkey") - tok = self.login("kermit", "monkey") - # We need to manually add an email address otherwise the handler will do - # nothing. - now = self.hs.clock.time_msec() - self.get_success( - self.store.user_add_threepid( - user_id=user_id, - medium="email", - address="kermit@example.com", - validated_at=now, - added_at=now, - ) - ) + (user_id, tok) = self.create_user() # Move 6 days forward. This should trigger a renewal email to be sent. self.reactor.advance(datetime.timedelta(days=6).total_seconds()) @@ -396,6 +384,44 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): def test_manual_email_send(self): self.email_attempts = [] + (user_id, tok) = self.create_user() + request, channel = self.make_request( + b"POST", + "/_matrix/client/unstable/account_validity/send_mail", + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + self.assertEqual(len(self.email_attempts), 1) + + def test_deactivated_user(self): + self.email_attempts = [] + + (user_id, tok) = self.create_user() + + request_data = json.dumps({ + "auth": { + "type": "m.login.password", + "user": user_id, + "password": "monkey", + }, + "erase": False, + }) + request, channel = self.make_request( + "POST", + "account/deactivate", + request_data, + access_token=tok, + ) + self.render(request) + self.assertEqual(request.code, 200) + + self.reactor.advance(datetime.timedelta(days=8).total_seconds()) + + self.assertEqual(len(self.email_attempts), 0) + + def create_user(self): user_id = self.register_user("kermit", "monkey") tok = self.login("kermit", "monkey") # We need to manually add an email address otherwise the handler will do @@ -410,16 +436,7 @@ class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): added_at=now, ) ) - - request, channel = self.make_request( - b"POST", - "/_matrix/client/unstable/account_validity/send_mail", - access_token=tok, - ) - self.render(request) - self.assertEquals(channel.result["code"], b"200", channel.result) - - self.assertEqual(len(self.email_attempts), 1) + return (user_id, tok) def test_manual_email_send_expired_account(self): user_id = self.register_user("kermit", "monkey") From e0b77b004db33563ec0a08fe835406dbc1591b6b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 16:00:45 +0100 Subject: [PATCH 414/429] Fix background job for deactivated flag --- synapse/storage/registration.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/registration.py b/synapse/storage/registration.py index 9f910eac9c..d36917e4d6 100644 --- a/synapse/storage/registration.py +++ b/synapse/storage/registration.py @@ -642,7 +642,9 @@ class RegistrationStore( FROM users LEFT JOIN access_tokens ON (access_tokens.user_id = users.name) LEFT JOIN user_threepids ON (user_threepids.user_id = users.name) - WHERE password_hash IS NULL OR password_hash = '' + WHERE (users.password_hash IS NULL OR users.password_hash = '') + AND (users.appservice_id IS NULL OR users.appservice_id = '') + AND users.is_guest = 0 AND users.name > ? GROUP BY users.name ORDER BY users.name ASC @@ -666,7 +668,7 @@ class RegistrationStore( logger.info("Marked %d rows as deactivated", rows_processed_nb) self._background_update_progress_txn( - txn, "users_set_deactivated_flag", {"user_id": rows[-1]["user_id"]} + txn, "users_set_deactivated_flag", {"user_id": rows[-1]["name"]} ) if batch_size > len(rows): From 304a1376c2fbe6758b7b0c1987d16fcea5205528 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 15:47:19 +0100 Subject: [PATCH 415/429] Fix 3PID invite room state over federation. Fixes that when a user exchanges a 3PID invite for a proper invite over federation it does not include the `invite_room_state` key. This was due to synapse incorrectly sending out two invite requests. --- synapse/handlers/federation.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ac5ca79143..65ac127930 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2613,12 +2613,6 @@ class FederationHandler(BaseHandler): # though the sender isn't a local user. event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender) - # XXX we send the invite here, but send_membership_event also sends it, - # so we end up making two requests. I think this is redundant. - returned_invite = yield self.send_invite(origin, event) - # TODO: Make sure the signatures actually are correct. - event.signatures.update(returned_invite.signatures) - member_handler = self.hs.get_room_member_handler() yield member_handler.send_membership_event(None, event, context) From 3c9bb86fde7d0157a59fcd7e2588e1671d945e50 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 14 Jun 2019 15:54:30 +0100 Subject: [PATCH 416/429] Newsfile --- changelog.d/5464.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5464.bugfix diff --git a/changelog.d/5464.bugfix b/changelog.d/5464.bugfix new file mode 100644 index 0000000000..8278d1bce9 --- /dev/null +++ b/changelog.d/5464.bugfix @@ -0,0 +1 @@ +Fix missing invite state after exchanging 3PID invites over federaton. From 4024520ff807eb6dec618332b203510b45fdb8da Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 16:38:44 +0100 Subject: [PATCH 417/429] Changelog --- changelog.d/5465.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/5465.bugfix diff --git a/changelog.d/5465.bugfix b/changelog.d/5465.bugfix new file mode 100644 index 0000000000..d1655c8ea7 --- /dev/null +++ b/changelog.d/5465.bugfix @@ -0,0 +1 @@ +Fix a crash happening when running a specific background update. From 5cec6d1845d7daa4f36748e08b2d36ba0f564f58 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 14 Jun 2019 17:18:21 +0100 Subject: [PATCH 418/429] Fix changelog --- changelog.d/5465.bugfix | 1 - changelog.d/5465.misc | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) delete mode 100644 changelog.d/5465.bugfix create mode 100644 changelog.d/5465.misc diff --git a/changelog.d/5465.bugfix b/changelog.d/5465.bugfix deleted file mode 100644 index d1655c8ea7..0000000000 --- a/changelog.d/5465.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a crash happening when running a specific background update. diff --git a/changelog.d/5465.misc b/changelog.d/5465.misc new file mode 100644 index 0000000000..af5f0f8f45 --- /dev/null +++ b/changelog.d/5465.misc @@ -0,0 +1,2 @@ +Track deactivated accounts in the database. + From f874b16b2e7208d3a202283c085340196d065560 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 12 Jun 2019 10:31:37 +0100 Subject: [PATCH 419/429] Add plugin APIs for implementations of custom event rules. --- changelog.d/5440.feature | 1 + docs/sample_config.yaml | 13 ++++ synapse/config/homeserver.py | 2 + synapse/config/third_party_event_rules.py | 42 ++++++++++++ synapse/events/third_party_rules.py | 62 ++++++++++++++++++ synapse/handlers/federation.py | 68 ++++++++++++++++++- synapse/handlers/message.py | 14 +++- synapse/server.py | 7 ++ tests/rest/client/third_party_rules.py | 79 +++++++++++++++++++++++ 9 files changed, 284 insertions(+), 4 deletions(-) create mode 100644 changelog.d/5440.feature create mode 100644 synapse/config/third_party_event_rules.py create mode 100644 synapse/events/third_party_rules.py create mode 100644 tests/rest/client/third_party_rules.py diff --git a/changelog.d/5440.feature b/changelog.d/5440.feature new file mode 100644 index 0000000000..63d9b58734 --- /dev/null +++ b/changelog.d/5440.feature @@ -0,0 +1 @@ +Allow server admins to define implementations of extra rules for allowing or denying incoming events. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 4d7e6f3eb5..bd80d97a93 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1351,3 +1351,16 @@ password_config: # alias: "*" # room_id: "*" # action: allow + + +# Server admins can define a Python module that implements extra rules for +# allowing or denying incoming events. In order to work, this module needs to +# override the methods defined in synapse/events/third_party_rules.py. +# +# This feature is designed to be used in closed federations only, where each +# participating server enforces the same rules. +# +#third_party_event_rules: +# module: "my_custom_project.SuperRulesSet" +# config: +# example_option: 'things' diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 5c4fc8ff21..acadef4fd3 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -38,6 +38,7 @@ from .server import ServerConfig from .server_notices_config import ServerNoticesConfig from .spam_checker import SpamCheckerConfig from .stats import StatsConfig +from .third_party_event_rules import ThirdPartyRulesConfig from .tls import TlsConfig from .user_directory import UserDirectoryConfig from .voip import VoipConfig @@ -73,5 +74,6 @@ class HomeServerConfig( StatsConfig, ServerNoticesConfig, RoomDirectoryConfig, + ThirdPartyRulesConfig, ): pass diff --git a/synapse/config/third_party_event_rules.py b/synapse/config/third_party_event_rules.py new file mode 100644 index 0000000000..a89dd5f98a --- /dev/null +++ b/synapse/config/third_party_event_rules.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.util.module_loader import load_module + +from ._base import Config + + +class ThirdPartyRulesConfig(Config): + def read_config(self, config): + self.third_party_event_rules = None + + provider = config.get("third_party_event_rules", None) + if provider is not None: + self.third_party_event_rules = load_module(provider) + + def default_config(self, **kwargs): + return """\ + # Server admins can define a Python module that implements extra rules for + # allowing or denying incoming events. In order to work, this module needs to + # override the methods defined in synapse/events/third_party_rules.py. + # + # This feature is designed to be used in closed federations only, where each + # participating server enforces the same rules. + # + #third_party_event_rules: + # module: "my_custom_project.SuperRulesSet" + # config: + # example_option: 'things' + """ diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py new file mode 100644 index 0000000000..9f98d51523 --- /dev/null +++ b/synapse/events/third_party_rules.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.internet import defer + + +class ThirdPartyEventRules(object): + """Allows server admins to provide a Python module implementing an extra set of rules + to apply when processing events. + + This is designed to help admins of closed federations with enforcing custom + behaviours. + """ + + def __init__(self, hs): + self.third_party_rules = None + + self.store = hs.get_datastore() + + module = None + config = None + if hs.config.third_party_event_rules: + module, config = hs.config.third_party_event_rules + + if module is not None: + self.third_party_rules = module(config=config) + + @defer.inlineCallbacks + def check_event_allowed(self, event, context): + """Check if a provided event should be allowed in the given context. + + Args: + event (synapse.events.EventBase): The event to be checked. + context (synapse.events.snapshot.EventContext): The context of the event. + + Returns: + defer.Deferred(bool), True if the event should be allowed, False if not. + """ + if self.third_party_rules is None: + defer.returnValue(True) + + prev_state_ids = yield context.get_prev_state_ids(self.store) + + # Retrieve the state events from the database. + state_events = {} + for key, event_id in prev_state_ids.items(): + state_events[key] = yield self.store.get_event(event_id, allow_none=True) + + ret = yield self.third_party_rules.check_event_allowed(event, state_events) + defer.returnValue(ret) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ac5ca79143..983ac9f915 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018 New Vector Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,6 +34,7 @@ from synapse.api.constants import EventTypes, Membership, RejectedReason from synapse.api.errors import ( AuthError, CodeMessageException, + Codes, FederationDeniedError, FederationError, RequestSendFailed, @@ -127,6 +129,8 @@ class FederationHandler(BaseHandler): self.room_queues = {} self._room_pdu_linearizer = Linearizer("fed_room_pdu") + self.third_party_event_rules = hs.get_third_party_event_rules() + @defer.inlineCallbacks def on_receive_pdu( self, origin, pdu, sent_to_us_directly=False, @@ -1258,6 +1262,15 @@ class FederationHandler(BaseHandler): logger.warn("Failed to create join %r because %s", event, e) raise e + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info("Creation of join %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` yield self.auth.check_from_context( @@ -1300,6 +1313,15 @@ class FederationHandler(BaseHandler): origin, event ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info("Sending of join %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + logger.debug( "on_send_join_request: After _handle_new_event: %s, sigs: %s", event.event_id, @@ -1458,6 +1480,15 @@ class FederationHandler(BaseHandler): builder=builder, ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.warning("Creation of leave %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + try: # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_leave_request` @@ -1484,10 +1515,19 @@ class FederationHandler(BaseHandler): event.internal_metadata.outlier = False - yield self._handle_new_event( + context = yield self._handle_new_event( origin, event ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info("Sending of leave %s forbidden by third-party rules", event) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + logger.debug( "on_send_leave_request: After _handle_new_event: %s, sigs: %s", event.event_id, @@ -2550,6 +2590,18 @@ class FederationHandler(BaseHandler): builder=builder ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.info( + "Creation of threepid invite %s forbidden by third-party rules", + event, + ) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + event, context = yield self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) @@ -2598,6 +2650,18 @@ class FederationHandler(BaseHandler): builder=builder, ) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + logger.warning( + "Exchange of threepid invite %s forbidden by third-party rules", + event, + ) + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + event, context = yield self.add_display_name_to_third_party_invite( room_version, event_dict, event, context ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 0b02469ceb..11650dc80c 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- -# Copyright 2014 - 2016 OpenMarket Ltd -# Copyright 2017 - 2018 New Vector Ltd +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -248,6 +249,7 @@ class EventCreationHandler(object): self.action_generator = hs.get_action_generator() self.spam_checker = hs.get_spam_checker() + self.third_party_event_rules = hs.get_third_party_event_rules() self._block_events_without_consent_error = ( self.config.block_events_without_consent_error @@ -658,6 +660,14 @@ class EventCreationHandler(object): else: room_version = yield self.store.get_room_version(event.room_id) + event_allowed = yield self.third_party_event_rules.check_event_allowed( + event, context, + ) + if not event_allowed: + raise SynapseError( + 403, "This event is not allowed in this context", Codes.FORBIDDEN, + ) + try: yield self.auth.check_from_context(room_version, event, context) except AuthError as err: diff --git a/synapse/server.py b/synapse/server.py index 9229a68a8d..a54e023cc9 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd +# Copyright 2017-2018 New Vector Ltd +# Copyright 2019 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,6 +37,7 @@ from synapse.crypto import context_factory from synapse.crypto.keyring import Keyring from synapse.events.builder import EventBuilderFactory from synapse.events.spamcheck import SpamChecker +from synapse.events.third_party_rules import ThirdPartyEventRules from synapse.events.utils import EventClientSerializer from synapse.federation.federation_client import FederationClient from synapse.federation.federation_server import ( @@ -178,6 +181,7 @@ class HomeServer(object): 'groups_attestation_renewer', 'secrets', 'spam_checker', + 'third_party_event_rules', 'room_member_handler', 'federation_registry', 'server_notices_manager', @@ -483,6 +487,9 @@ class HomeServer(object): def build_spam_checker(self): return SpamChecker(self) + def build_third_party_event_rules(self): + return ThirdPartyEventRules(self) + def build_room_member_handler(self): if self.config.worker_app: return RoomMemberWorkerHandler(self) diff --git a/tests/rest/client/third_party_rules.py b/tests/rest/client/third_party_rules.py new file mode 100644 index 0000000000..7167fc56b6 --- /dev/null +++ b/tests/rest/client/third_party_rules.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.rest import admin +from synapse.rest.client.v1 import login, room + +from tests import unittest + + +class ThirdPartyRulesTestModule(object): + def __init__(self, config): + pass + + def check_event_allowed(self, event, context): + if event.type == "foo.bar.forbidden": + return False + else: + return True + + @staticmethod + def parse_config(config): + return config + + +class ThirdPartyRulesTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["third_party_event_rules"] = { + "module": "tests.rest.client.third_party_rules.ThirdPartyRulesTestModule", + "config": {}, + } + + self.hs = self.setup_test_homeserver(config=config) + return self.hs + + def test_third_party_rules(self): + """Tests that a forbidden event is forbidden from being sent, but an allowed one + can be sent. + """ + user_id = self.register_user("kermit", "monkey") + tok = self.login("kermit", "monkey") + + room_id = self.helper.create_room_as(user_id, tok=tok) + + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/foo.bar.allowed/1" % room_id, + {}, + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"200", channel.result) + + request, channel = self.make_request( + "PUT", + "/_matrix/client/r0/rooms/%s/send/foo.bar.forbidden/1" % room_id, + {}, + access_token=tok, + ) + self.render(request) + self.assertEquals(channel.result["code"], b"403", channel.result) From 97d7e4c7b75999b991f53f8a7ee6b25d15442e92 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Mon, 17 Jun 2019 21:08:15 +1000 Subject: [PATCH 420/429] Move SyTest to Buildkite (#5459) Including workers! --- .buildkite/format_tap.py | 33 ++++ .../merge_base_branch.sh | 17 +- .buildkite/pipeline.yml | 60 ++++++++ .buildkite/synapse_sytest.sh | 145 ++++++++++++++++++ .circleci/config.yml | 67 -------- changelog.d/5459.misc | 1 + 6 files changed, 247 insertions(+), 76 deletions(-) create mode 100644 .buildkite/format_tap.py rename {.circleci => .buildkite}/merge_base_branch.sh (52%) create mode 100644 .buildkite/synapse_sytest.sh create mode 100644 changelog.d/5459.misc diff --git a/.buildkite/format_tap.py b/.buildkite/format_tap.py new file mode 100644 index 0000000000..94582f5571 --- /dev/null +++ b/.buildkite/format_tap.py @@ -0,0 +1,33 @@ +import sys +from tap.parser import Parser +from tap.line import Result, Unknown, Diagnostic + +out = ["### TAP Output for " + sys.argv[2]] + +p = Parser() + +in_error = False + +for line in p.parse_file(sys.argv[1]): + if isinstance(line, Result): + if in_error: + out.append("") + out.append("") + out.append("") + out.append("----") + out.append("") + in_error = False + + if not line.ok and not line.todo: + in_error = True + + out.append("FAILURE Test #%d: ``%s``" % (line.number, line.description)) + out.append("") + out.append("
Show log
")
+
+    elif isinstance(line, Diagnostic) and in_error:
+        out.append(line.text)
+
+if out:
+    for line in out[:-3]:
+        print(line)
diff --git a/.circleci/merge_base_branch.sh b/.buildkite/merge_base_branch.sh
similarity index 52%
rename from .circleci/merge_base_branch.sh
rename to .buildkite/merge_base_branch.sh
index 4c19fa70d7..26176d6465 100755
--- a/.circleci/merge_base_branch.sh
+++ b/.buildkite/merge_base_branch.sh
@@ -1,22 +1,21 @@
 #!/usr/bin/env bash
 
-set -e
+set -ex
 
-# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
-# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
-echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
-source $BASH_ENV
+if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then
+    echo "Not merging forward, as this is a release branch"
+    exit 0
+fi
 
-if [[ -z "${CIRCLE_PR_NUMBER}" ]]
-then
-    echo "Can't figure out what the PR number is! Assuming merge target is develop."
+if [[ -z $BUILDKITE_PULL_REQUEST_BASE_BRANCH ]]; then
+    echo "Not a pull request, or hasn't had a PR opened yet..."
 
     # It probably hasn't had a PR opened yet. Since all PRs land on develop, we
     # can probably assume it's based on it and will be merged into it.
     GITBASE="develop"
 else
     # Get the reference, using the GitHub API
-    GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
+    GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH
 fi
 
 # Show what we are before
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index 8eddf8b931..6c6229a205 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -2,6 +2,7 @@ env:
   CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
 
 steps:
+
   - command:
       - "python -m pip install tox"
       - "tox -e pep8"
@@ -46,6 +47,7 @@ steps:
 
   - wait
 
+
   - command:
       - "python -m pip install tox"
       - "tox -e py35-old,codecov"
@@ -181,3 +183,61 @@ steps:
           limit: 2
         - exit_status: 2
           limit: 2
+
+
+  - label: "SyTest - :python: 3.5 / SQLite / Monolith"
+    agents:
+      queue: "medium"
+    command:
+      - "bash .buildkite/merge_base_branch.sh"
+      - "bash .buildkite/synapse_sytest.sh"
+    plugins:
+      - docker#v3.0.1:
+          image: "matrixdotorg/sytest-synapse:py35"
+          propagate-environment: true
+    retry:
+      automatic:
+        - exit_status: -1
+          limit: 2
+        - exit_status: 2
+          limit: 2
+
+  - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Monolith"
+    agents:
+      queue: "medium"
+    env:
+      POSTGRES: "1"
+    command:
+      - "bash .buildkite/merge_base_branch.sh"
+      - "bash .buildkite/synapse_sytest.sh"
+    plugins:
+      - docker#v3.0.1:
+          image: "matrixdotorg/sytest-synapse:py35"
+          propagate-environment: true
+    retry:
+      automatic:
+        - exit_status: -1
+          limit: 2
+        - exit_status: 2
+          limit: 2
+
+  - label: "SyTest - :python: 3.5 / :postgres: 9.6 / Workers"
+    agents:
+      queue: "medium"
+    env:
+      POSTGRES: "1"
+      WORKERS: "1"
+    command:
+      - "bash .buildkite/merge_base_branch.sh"
+      - "bash .buildkite/synapse_sytest.sh"
+    plugins:
+      - docker#v3.0.1:
+          image: "matrixdotorg/sytest-synapse:py35"
+          propagate-environment: true
+    soft_fail: true
+    retry:
+      automatic:
+        - exit_status: -1
+          limit: 2
+        - exit_status: 2
+          limit: 2
diff --git a/.buildkite/synapse_sytest.sh b/.buildkite/synapse_sytest.sh
new file mode 100644
index 0000000000..3011b88bb7
--- /dev/null
+++ b/.buildkite/synapse_sytest.sh
@@ -0,0 +1,145 @@
+#!/bin/bash
+#
+# Fetch sytest, and then run the tests for synapse. The entrypoint for the
+# sytest-synapse docker images.
+
+set -ex
+
+if [ -n "$BUILDKITE" ]
+then
+    SYNAPSE_DIR=`pwd`
+else
+    SYNAPSE_DIR="/src"
+fi
+
+# Attempt to find a sytest to use.
+# If /sytest exists, it means that a SyTest checkout has been mounted into the Docker image.
+if [ -d "/sytest" ]; then
+    # If the user has mounted in a SyTest checkout, use that.
+    echo "Using local sytests..."
+
+    # create ourselves a working directory and dos2unix some scripts therein
+    mkdir -p /work/jenkins
+    for i in install-deps.pl run-tests.pl tap-to-junit-xml.pl jenkins/prep_sytest_for_postgres.sh; do
+        dos2unix -n "/sytest/$i" "/work/$i"
+    done
+    ln -sf /sytest/tests /work
+    ln -sf /sytest/keys /work
+    SYTEST_LIB="/sytest/lib"
+else
+    if [ -n "BUILDKITE_BRANCH" ]
+    then
+        branch_name=$BUILDKITE_BRANCH
+    else
+        # Otherwise, try and find out what the branch that the Synapse checkout is using. Fall back to develop if it's not a branch.
+        branch_name="$(git --git-dir=/src/.git symbolic-ref HEAD 2>/dev/null)" || branch_name="develop"
+    fi
+
+    # Try and fetch the branch
+    echo "Trying to get same-named sytest branch..."
+    wget -q https://github.com/matrix-org/sytest/archive/$branch_name.tar.gz -O sytest.tar.gz || {
+        # Probably a 404, fall back to develop
+        echo "Using develop instead..."
+        wget -q https://github.com/matrix-org/sytest/archive/develop.tar.gz -O sytest.tar.gz
+    }
+
+    mkdir -p /work
+    tar -C /work --strip-components=1 -xf sytest.tar.gz
+    SYTEST_LIB="/work/lib"
+fi
+
+cd /work
+
+# PostgreSQL setup
+if [ -n "$POSTGRES" ]
+then
+    export PGUSER=postgres
+    export POSTGRES_DB_1=pg1
+    export POSTGRES_DB_2=pg2
+
+    # Start the database
+    su -c 'eatmydata /usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
+
+    # Use the Jenkins script to write out the configuration for a PostgreSQL using Synapse
+    jenkins/prep_sytest_for_postgres.sh
+
+    # Make the test databases for the two Synapse servers that will be spun up
+    su -c 'psql -c "CREATE DATABASE pg1;"' postgres
+    su -c 'psql -c "CREATE DATABASE pg2;"' postgres
+
+fi
+
+if [ -n "$OFFLINE" ]; then
+    # if we're in offline mode, just put synapse into the virtualenv, and
+    # hope that the deps are up-to-date.
+    #
+    # (`pip install -e` likes to reinstall setuptools even if it's already installed,
+    # so we just run setup.py explicitly.)
+    #
+    (cd $SYNAPSE_DIR && /venv/bin/python setup.py -q develop)
+else
+    # We've already created the virtualenv, but lets double check we have all
+    # deps.
+    /venv/bin/pip install -q --upgrade --no-cache-dir -e $SYNAPSE_DIR
+    /venv/bin/pip install -q --upgrade --no-cache-dir \
+        lxml psycopg2 coverage codecov tap.py
+
+    # Make sure all Perl deps are installed -- this is done in the docker build
+    # so will only install packages added since the last Docker build
+    ./install-deps.pl
+fi
+
+
+# Run the tests
+>&2 echo "+++ Running tests"
+
+RUN_TESTS=(
+    perl -I "$SYTEST_LIB" ./run-tests.pl --python=/venv/bin/python --synapse-directory=$SYNAPSE_DIR --coverage -O tap --all
+)
+
+TEST_STATUS=0
+
+if [ -n "$WORKERS" ]; then
+    RUN_TESTS+=(-I Synapse::ViaHaproxy --dendron-binary=/pydron.py)
+else
+    RUN_TESTS+=(-I Synapse)
+fi
+
+"${RUN_TESTS[@]}" "$@" > results.tap || TEST_STATUS=$?
+
+if [ $TEST_STATUS -ne 0 ]; then
+    >&2 echo -e "run-tests \e[31mFAILED\e[0m: exit code $TEST_STATUS"
+else
+    >&2 echo -e "run-tests \e[32mPASSED\e[0m"
+fi
+
+>&2 echo "--- Copying assets"
+
+# Copy out the logs
+mkdir -p /logs
+cp results.tap /logs/results.tap
+rsync --ignore-missing-args  --min-size=1B -av server-0 server-1 /logs --include "*/" --include="*.log.*" --include="*.log" --exclude="*"
+
+# Upload coverage to codecov and upload files, if running on Buildkite
+if [ -n "$BUILDKITE" ]
+then
+    /venv/bin/coverage combine || true
+    /venv/bin/coverage xml || true
+    /venv/bin/codecov -X gcov -f coverage.xml
+
+    wget -O buildkite.tar.gz https://github.com/buildkite/agent/releases/download/v3.13.0/buildkite-agent-linux-amd64-3.13.0.tar.gz
+    tar xvf buildkite.tar.gz
+    chmod +x ./buildkite-agent
+
+    # Upload the files
+    ./buildkite-agent artifact upload "/logs/**/*.log*"
+    ./buildkite-agent artifact upload "/logs/results.tap"
+
+    if [ $TEST_STATUS -ne 0 ]; then
+        # Annotate, if failure
+        /venv/bin/python $SYNAPSE_DIR/.buildkite/format_tap.py /logs/results.tap "$BUILDKITE_LABEL" | ./buildkite-agent annotate --style="error" --context="$BUILDKITE_LABEL"
+    fi
+fi
+
+
+exit $TEST_STATUS
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 3c2b32c015..e4fd5ffa6b 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -17,77 +17,10 @@ jobs:
       - run: docker push matrixdotorg/synapse:latest
       - run: docker push matrixdotorg/synapse:latest-py3
 
-  sytestpy3:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-  sytestpy3postgres:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: POSTGRES=1 /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-  sytestpy3merged:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: bash .circleci/merge_base_branch.sh
-      - run: /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-  sytestpy3postgresmerged:
-    docker:
-      - image: matrixdotorg/sytest-synapsepy3
-    working_directory: /src
-    steps:
-      - checkout
-      - run: bash .circleci/merge_base_branch.sh
-      - run: POSTGRES=1 /synapse_sytest.sh
-      - store_artifacts:
-          path: /logs
-          destination: logs
-      - store_test_results:
-          path: /logs
-
 workflows:
   version: 2
   build:
     jobs:
-      - sytestpy3:
-          filters:
-            branches:
-              only: /develop|master|release-.*/
-      - sytestpy3postgres:
-          filters:
-            branches:
-              only: /develop|master|release-.*/
-      - sytestpy3merged:
-          filters:
-            branches:
-              ignore: /develop|master|release-.*/
-      - sytestpy3postgresmerged:
-          filters:
-            branches:
-              ignore: /develop|master|release-.*/
       - dockerhubuploadrelease:
           filters:
             tags:
diff --git a/changelog.d/5459.misc b/changelog.d/5459.misc
new file mode 100644
index 0000000000..904e45f66b
--- /dev/null
+++ b/changelog.d/5459.misc
@@ -0,0 +1 @@
+SyTest has been moved to Buildkite.

From eba7caf09fe9bb5f5a0d4b17c5dde1413343cadc Mon Sep 17 00:00:00 2001
From: Amber Brown 
Date: Tue, 18 Jun 2019 00:59:00 +1000
Subject: [PATCH 421/429] Remove Postgres 9.4 support (#5448)

---
 .buildkite/docker-compose.py35.pg94.yaml | 21 ----------------
 .buildkite/pipeline.yml                  | 17 -------------
 CONTRIBUTING.rst                         | 19 +++++++--------
 UPGRADE.rst                              | 31 ++++++++++++++++++++++--
 changelog.d/5448.removal                 |  1 +
 docker/Dockerfile-pgtests                |  4 +--
 docker/run_pg_tests.sh                   |  2 +-
 docs/postgres.rst                        |  4 +--
 synapse/storage/engines/postgres.py      |  8 ++++--
 synapse/storage/search.py                | 22 -----------------
 10 files changed, 50 insertions(+), 79 deletions(-)
 delete mode 100644 .buildkite/docker-compose.py35.pg94.yaml
 create mode 100644 changelog.d/5448.removal

diff --git a/.buildkite/docker-compose.py35.pg94.yaml b/.buildkite/docker-compose.py35.pg94.yaml
deleted file mode 100644
index 978aedd115..0000000000
--- a/.buildkite/docker-compose.py35.pg94.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
-version: '3.1'
-
-services:
-
-  postgres:
-    image: postgres:9.4
-    environment:
-      POSTGRES_PASSWORD: postgres
-
-  testenv:
-    image: python:3.5
-    depends_on:
-      - postgres
-    env_file: .env
-    environment:
-      SYNAPSE_POSTGRES_HOST: postgres
-      SYNAPSE_POSTGRES_USER: postgres
-      SYNAPSE_POSTGRES_PASSWORD: postgres
-    working_dir: /app
-    volumes:
-      - ..:/app
diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml
index 6c6229a205..20c7aab5a7 100644
--- a/.buildkite/pipeline.yml
+++ b/.buildkite/pipeline.yml
@@ -116,23 +116,6 @@ steps:
         - exit_status: 2
           limit: 2
 
-  - label: ":python: 3.5 / :postgres: 9.4"
-    env:
-      TRIAL_FLAGS: "-j 4"
-    command:
-      - "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
-    plugins:
-      - docker-compose#v2.1.0:
-          run: testenv
-          config:
-            - .buildkite/docker-compose.py35.pg94.yaml
-    retry:
-      automatic:
-        - exit_status: -1
-          limit: 2
-        - exit_status: 2
-          limit: 2
-
   - label: ":python: 3.5 / :postgres: 9.5"
     env:
       TRIAL_FLAGS: "-j 4"
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 9a283ced6e..2c44422a0e 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -30,21 +30,20 @@ use github's pull request workflow to review the contribution, and either ask
 you to make any refinements needed or merge it and make them ourselves. The
 changes will then land on master when we next do a release.
 
-We use `CircleCI `_ and `Travis CI
-`_ for continuous integration. All
-pull requests to synapse get automatically tested by Travis and CircleCI.
-If your change breaks the build, this will be shown in GitHub, so please
-keep an eye on the pull request for feedback.
+We use `CircleCI `_ and `Buildkite
+`_ for continuous integration.
+Buildkite builds need to be authorised by a maintainer. If your change breaks
+the build, this will be shown in GitHub, so please keep an eye on the pull
+request for feedback.
 
 To run unit tests in a local development environment, you can use:
 
-- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
-  SQLite-backed Synapse on Python 2.7.
-- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
+- ``tox -e py35`` (requires tox to be installed by ``pip install tox``)
+  for SQLite-backed Synapse on Python 3.5.
 - ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
-- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
+- ``tox -e py36-postgres`` for PostgreSQL-backed Synapse on Python 3.6
   (requires a running local PostgreSQL with access to create databases).
-- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
+- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 3.5
   (requires Docker). Entirely self-contained, recommended if you don't want to
   set up PostgreSQL yourself.
 
diff --git a/UPGRADE.rst b/UPGRADE.rst
index 6032a505c9..1fb109a218 100644
--- a/UPGRADE.rst
+++ b/UPGRADE.rst
@@ -49,6 +49,33 @@ returned by the Client-Server API:
     # configured on port 443.
     curl -kv https:///_matrix/client/versions 2>&1 | grep "Server:"
 
+Upgrading to v1.1
+=================
+
+Synapse 1.1 removes support for older Python and PostgreSQL versions, as
+outlined in `our deprecation notice `_.
+
+Minimum Python Version
+----------------------
+
+Synapse v1.1 has a minimum Python requirement of Python 3.5. Python 3.6 or
+Python 3.7 are recommended as they have improved internal string handling,
+significantly reducing memory usage.
+
+If you use current versions of the Matrix.org-distributed Debian packages or
+Docker images, action is not required.
+
+If you install Synapse in a Python virtual environment, please see "Upgrading to
+v0.34.0" for notes on setting up a new virtualenv under Python 3.
+
+Minimum PostgreSQL Version
+--------------------------
+
+If using PostgreSQL under Synapse, you will need to use PostgreSQL 9.5 or above.
+Please see the
+`PostgreSQL documentation `_
+for more details on upgrading your database.
+
 Upgrading to v1.0
 =================
 
@@ -71,11 +98,11 @@ server in a closed federation. This can be done in one of two ways:-
 * Configure a whitelist of server domains to trust via ``federation_certificate_verification_whitelist``.
 
 See the `sample configuration file `_
-for more details on these settings. 
+for more details on these settings.
 
 Email
 -----
-When a user requests a password reset, Synapse will send an email to the 
+When a user requests a password reset, Synapse will send an email to the
 user to confirm the request.
 
 Previous versions of Synapse delegated the job of sending this email to an
diff --git a/changelog.d/5448.removal b/changelog.d/5448.removal
new file mode 100644
index 0000000000..33b9859dae
--- /dev/null
+++ b/changelog.d/5448.removal
@@ -0,0 +1 @@
+PostgreSQL 9.4 is no longer supported. Synapse requires Postgres 9.5+ or above for Postgres support.
diff --git a/docker/Dockerfile-pgtests b/docker/Dockerfile-pgtests
index 7da8eeb9eb..3bfee845c6 100644
--- a/docker/Dockerfile-pgtests
+++ b/docker/Dockerfile-pgtests
@@ -3,10 +3,10 @@
 FROM matrixdotorg/sytest:latest
 
 # The Sytest image doesn't come with python, so install that
-RUN apt-get -qq install -y python python-dev python-pip
+RUN apt-get update && apt-get -qq install -y python3 python3-dev python3-pip
 
 # We need tox to run the tests in run_pg_tests.sh
-RUN pip install tox
+RUN python3 -m pip install tox
 
 ADD run_pg_tests.sh /pg_tests.sh
 ENTRYPOINT /pg_tests.sh
diff --git a/docker/run_pg_tests.sh b/docker/run_pg_tests.sh
index e77424c41a..d18d1e4c8e 100755
--- a/docker/run_pg_tests.sh
+++ b/docker/run_pg_tests.sh
@@ -17,4 +17,4 @@ su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start'
 # Run the tests
 cd /src
 export TRIAL_FLAGS="-j 4"
-tox --workdir=/tmp -e py27-postgres
+tox --workdir=/tmp -e py35-postgres
diff --git a/docs/postgres.rst b/docs/postgres.rst
index e81e10403f..33f58e3ace 100644
--- a/docs/postgres.rst
+++ b/docs/postgres.rst
@@ -1,7 +1,7 @@
 Using Postgres
 --------------
 
-Postgres version 9.4 or later is known to work.
+Postgres version 9.5 or later is known to work.
 
 Install postgres client libraries
 =================================
@@ -16,7 +16,7 @@ a postgres database.
 * For other pre-built packages, please consult the documentation from the
   relevant package.
 
-* If you installed synapse `in a virtualenv 
+* If you installed synapse `in a virtualenv
   <../INSTALL.md#installing-from-source>`_, you can install the library with::
 
       ~/synapse/env/bin/pip install matrix-synapse[postgres]
diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py
index 1b97ee74e3..289b6bc281 100644
--- a/synapse/storage/engines/postgres.py
+++ b/synapse/storage/engines/postgres.py
@@ -45,6 +45,10 @@ class PostgresEngine(object):
         # together. For example, version 8.1.5 will be returned as 80105
         self._version = db_conn.server_version
 
+        # Are we on a supported PostgreSQL version?
+        if self._version < 90500:
+            raise RuntimeError("Synapse requires PostgreSQL 9.5+ or above.")
+
         db_conn.set_isolation_level(
             self.module.extensions.ISOLATION_LEVEL_REPEATABLE_READ
         )
@@ -64,9 +68,9 @@ class PostgresEngine(object):
     @property
     def can_native_upsert(self):
         """
-        Can we use native UPSERTs? This requires PostgreSQL 9.5+.
+        Can we use native UPSERTs?
         """
-        return self._version >= 90500
+        return True
 
     def is_deadlock(self, error):
         if isinstance(error, self.module.DatabaseError):
diff --git a/synapse/storage/search.py b/synapse/storage/search.py
index ff49eaae02..10a27c207a 100644
--- a/synapse/storage/search.py
+++ b/synapse/storage/search.py
@@ -341,29 +341,7 @@ class SearchStore(BackgroundUpdateStore):
                 for entry in entries
             )
 
-            # inserts to a GIN index are normally batched up into a pending
-            # list, and then all committed together once the list gets to a
-            # certain size. The trouble with that is that postgres (pre-9.5)
-            # uses work_mem to determine the length of the list, and work_mem
-            # is typically very large.
-            #
-            # We therefore reduce work_mem while we do the insert.
-            #
-            # (postgres 9.5 uses the separate gin_pending_list_limit setting,
-            # so doesn't suffer the same problem, but changing work_mem will
-            # be harmless)
-            #
-            # Note that we don't need to worry about restoring it on
-            # exception, because exceptions will cause the transaction to be
-            # rolled back, including the effects of the SET command.
-            #
-            # Also: we use SET rather than SET LOCAL because there's lots of
-            # other stuff going on in this transaction, which want to have the
-            # normal work_mem setting.
-
-            txn.execute("SET work_mem='256kB'")
             txn.executemany(sql, args)
-            txn.execute("RESET work_mem")
 
         elif isinstance(self.database_engine, Sqlite3Engine):
             sql = (

From 839f9b923182208d1f0f57a0ff02fb0edd5d4a47 Mon Sep 17 00:00:00 2001
From: Jorik Schellekens 
Date: Mon, 17 Jun 2019 16:24:28 +0100
Subject: [PATCH 422/429] One shot demo server startup

Configure the demo servers to use untrusted
tls certs so that they communicate with each other.

This configuration makes them very unsafe so I've added warnings about
it in the readme.
---
 demo/README   |  8 +++++--
 demo/start.sh | 66 +++++++++++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 70 insertions(+), 4 deletions(-)

diff --git a/demo/README b/demo/README
index 0b584ceb15..0bec820ad6 100644
--- a/demo/README
+++ b/demo/README
@@ -1,9 +1,13 @@
+DO NOT USE THESE DEMO SERVERS IN PRODUCTION
+
 Requires you to have done:
     python setup.py develop
 
 
-The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required. 
-It will also start a web server on port 8000 pointed at the webclient.
+The demo start.sh will start three synapse servers on ports 8080, 8081 and 8082, with host names localhost:$port. This can be easily changed to `hostname`:$port in start.sh if required.
+
+To enable the servers to communicate untrusted ssl certs are used. In order to do this the servers do not check the certs
+and are configured in a highly insecure way. Do not use these configuration files in production.
 
 stop.sh will stop the synapse servers and the webclient.
 
diff --git a/demo/start.sh b/demo/start.sh
index 5c3a8fe61f..1c4f12d0bb 100755
--- a/demo/start.sh
+++ b/demo/start.sh
@@ -27,8 +27,70 @@ for port in 8080 8081 8082; do
         --config-path "$DIR/etc/$port.config" \
         --report-stats no
 
-    printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
-    echo 'enable_registration: true' >> $DIR/etc/$port.config
+    if ! grep -F "Customisation made by demo/start.sh" -q  $DIR/etc/$port.config; then
+        printf '\n\n# Customisation made by demo/start.sh\n' >> $DIR/etc/$port.config
+        
+        echo 'enable_registration: true' >> $DIR/etc/$port.config
+
+        # Warning, this heredoc depends on the interaction of tabs and spaces. Please don't
+        # accidentaly bork me with your fancy settings.
+		listeners=$(cat <<-PORTLISTENERS
+		# Configure server to listen on both $https_port and $port
+		# This overides some of the default settings above
+		listeners:
+		  - port: $https_port
+		    type: http
+		    tls: true
+		    resources:
+		      - names: [client, federation]
+		
+		  - port: $port
+		    tls: false
+		    bind_addresses: ['::1', '127.0.0.1']
+		    type: http
+		    x_forwarded: true
+		    resources:
+		      - names: [client, federation]
+		        compress: false
+		PORTLISTENERS
+		)
+        echo "${listeners}" >> $DIR/etc/$port.config
+
+        # Disable tls for the servers
+        printf '\n\n# Disable tls on the servers.' >> $DIR/etc/$port.config
+        echo '# DO NOT USE IN PRODUCTION' >> $DIR/etc/$port.config
+        echo 'use_insecure_ssl_client_just_for_testing_do_not_use: true' >> $DIR/etc/$port.config
+        echo 'federation_verify_certificates: false' >> $DIR/etc/$port.config
+
+        # Set tls paths
+        echo "tls_certificate_path: \"$DIR/etc/localhost:$https_port.tls.crt\"" >> $DIR/etc/$port.config
+        echo "tls_private_key_path: \"$DIR/etc/localhost:$https_port.tls.key\"" >> $DIR/etc/$port.config
+
+        # Generate tls keys
+        openssl req -x509 -newkey rsa:4096 -keyout $DIR/etc/localhost\:$https_port.tls.key -out $DIR/etc/localhost\:$https_port.tls.crt -days 365 -nodes -subj "/O=matrix"
+        
+        # Ignore keys from the trusted keys server
+        echo '# Ignore keys from the trusted keys server' >> $DIR/etc/$port.config
+        echo 'trusted_key_servers:' >> $DIR/etc/$port.config
+        echo '  - server_name: "matrix.org"' >> $DIR/etc/$port.config
+        echo '    accept_keys_insecurely: true' >> $DIR/etc/$port.config
+
+        # Reduce the blacklist
+        blacklist=$(cat <<-BLACK
+		# Set the blacklist so that it doesn't include 127.0.0.1
+		federation_ip_range_blacklist:
+		  - '10.0.0.0/8'
+		  - '172.16.0.0/12'
+		  - '192.168.0.0/16'
+		  - '100.64.0.0/10'
+		  - '169.254.0.0/16'
+		  - '::1/128'
+		  - 'fe80::/64'
+		  - 'fc00::/7'
+		BLACK
+		)
+        echo "${blacklist}" >> $DIR/etc/$port.config
+    fi
 
     # Check script parameters
     if [ $# -eq 1 ]; then

From 2d6308a043153b149ead23b1a6567a03ae20496b Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 17 Jun 2019 15:52:15 +0100
Subject: [PATCH 423/429] Newsfile

---
 changelog.d/5474.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/5474.feature

diff --git a/changelog.d/5474.feature b/changelog.d/5474.feature
new file mode 100644
index 0000000000..63d9b58734
--- /dev/null
+++ b/changelog.d/5474.feature
@@ -0,0 +1 @@
+Allow server admins to define implementations of extra rules for allowing or denying incoming events.

From 187d2837a9fc9d5b9e585f3a7f0f54f2ceac7d1b Mon Sep 17 00:00:00 2001
From: Erik Johnston 
Date: Mon, 17 Jun 2019 15:48:57 +0100
Subject: [PATCH 424/429] Add third party rules hook into create room

---
 synapse/events/third_party_rules.py | 27 ++++++++++++++++++++++++---
 synapse/handlers/room.py            | 25 ++++++++++++++++++++++++-
 2 files changed, 48 insertions(+), 4 deletions(-)

diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 9f98d51523..ee7b97ad39 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -17,8 +17,8 @@ from twisted.internet import defer
 
 
 class ThirdPartyEventRules(object):
-    """Allows server admins to provide a Python module implementing an extra set of rules
-    to apply when processing events.
+    """Allows server admins to provide a Python module implementing an extra
+    set of rules to apply when processing events.
 
     This is designed to help admins of closed federations with enforcing custom
     behaviours.
@@ -46,7 +46,7 @@ class ThirdPartyEventRules(object):
             context (synapse.events.snapshot.EventContext): The context of the event.
 
         Returns:
-            defer.Deferred(bool), True if the event should be allowed, False if not.
+            defer.Deferred[bool]: True if the event should be allowed, False if not.
         """
         if self.third_party_rules is None:
             defer.returnValue(True)
@@ -60,3 +60,24 @@ class ThirdPartyEventRules(object):
 
         ret = yield self.third_party_rules.check_event_allowed(event, state_events)
         defer.returnValue(ret)
+
+    @defer.inlineCallbacks
+    def on_create_room(self, requester, config, is_requester_admin):
+        """Intercept requests to create room to allow, deny or update the
+        request config.
+
+        Args:
+            requester (Requester)
+            config (dict): The creation config from the client.
+            is_requester_admin (bool): If the requester is an admin
+
+        Returns:
+            defer.Deferred
+        """
+
+        if self.third_party_rules is None:
+            return
+
+        yield self.third_party_rules.on_create_room(
+            requester, config, is_requester_admin
+        )
diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py
index 4a17911a87..74793bab33 100644
--- a/synapse/handlers/room.py
+++ b/synapse/handlers/room.py
@@ -75,6 +75,10 @@ class RoomCreationHandler(BaseHandler):
         # linearizer to stop two upgrades happening at once
         self._upgrade_linearizer = Linearizer("room_upgrade_linearizer")
 
+        self._server_notices_mxid = hs.config.server_notices_mxid
+
+        self.third_party_event_rules = hs.get_third_party_event_rules()
+
     @defer.inlineCallbacks
     def upgrade_room(self, requester, old_room_id, new_version):
         """Replace a room with a new room with a different version
@@ -470,7 +474,26 @@ class RoomCreationHandler(BaseHandler):
 
         yield self.auth.check_auth_blocking(user_id)
 
-        if not self.spam_checker.user_may_create_room(user_id):
+        if (self._server_notices_mxid is not None and
+                requester.user.to_string() == self._server_notices_mxid):
+            # allow the server notices mxid to create rooms
+            is_requester_admin = True
+        else:
+            is_requester_admin = yield self.auth.is_server_admin(
+                requester.user,
+            )
+
+        # Check whether the third party rules allows/changes the room create
+        # request.
+        yield self.third_party_event_rules.on_create_room(
+            requester,
+            config,
+            is_requester_admin=is_requester_admin,
+        )
+
+        if not is_requester_admin and not self.spam_checker.user_may_create_room(
+            user_id,
+        ):
             raise SynapseError(403, "You are not permitted to create rooms")
 
         if ratelimit:

From 25d16fea780b071199ceb97bcf6a9eb21e4509b9 Mon Sep 17 00:00:00 2001
From: Jorik Schellekens 
Date: Mon, 17 Jun 2019 16:50:31 +0100
Subject: [PATCH 425/429] Changelog

---
 changelog.d/5478.misc | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/5478.misc

diff --git a/changelog.d/5478.misc b/changelog.d/5478.misc
new file mode 100644
index 0000000000..829bb1e521
--- /dev/null
+++ b/changelog.d/5478.misc
@@ -0,0 +1 @@
+The demo servers talk to each other again.

From 112cf5a73a12c1618414f0e2ef4153bf6d4a89f9 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 17 Jun 2019 16:27:47 +0100
Subject: [PATCH 426/429] Add third party rules hook for 3PID invites

---
 synapse/events/third_party_rules.py | 32 ++++++++++++++++++++++++++++-
 synapse/handlers/room_member.py     | 10 +++++++++
 2 files changed, 41 insertions(+), 1 deletion(-)

diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index ee7b97ad39..768cfa8e9c 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -35,7 +35,10 @@ class ThirdPartyEventRules(object):
             module, config = hs.config.third_party_event_rules
 
         if module is not None:
-            self.third_party_rules = module(config=config)
+            self.third_party_rules = module(
+                config=config,
+                http_client=hs.get_simple_http_client(),
+            )
 
     @defer.inlineCallbacks
     def check_event_allowed(self, event, context):
@@ -81,3 +84,30 @@ class ThirdPartyEventRules(object):
         yield self.third_party_rules.on_create_room(
             requester, config, is_requester_admin
         )
+
+    def check_threepid_can_be_invited(self, medium, address, room_id):
+        """Check if a provided 3PID can be invited in the given room.
+
+        Args:
+            medium (str): The 3PID's medium.
+            address (str): The 3PID's address.
+            room_id (str): The room we want to invite the threepid to.
+
+        Returns:
+            defer.Deferred[bool], True if the 3PID can be invited, False if not.
+        """
+
+        if self.third_party_rules is None:
+            defer.returnValue(True)
+
+        state_ids = yield self.store.get_filtered_current_state_ids(room_id)
+        room_state_events = yield self.store.get_events(state_ids.values())
+
+        state_events = {}
+        for key, event_id in state_ids.items():
+            state_events[key] = room_state_events[event_id]
+
+        ret = yield self.third_party_rules.check_threepid_can_be_invited(
+            medium, address, state_events,
+        )
+        defer.returnValue(ret)
diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py
index 93ac986c86..458902bb7e 100644
--- a/synapse/handlers/room_member.py
+++ b/synapse/handlers/room_member.py
@@ -72,6 +72,7 @@ class RoomMemberHandler(object):
 
         self.clock = hs.get_clock()
         self.spam_checker = hs.get_spam_checker()
+        self.third_party_event_rules = hs.get_third_party_event_rules()
         self._server_notices_mxid = self.config.server_notices_mxid
         self._enable_lookup = hs.config.enable_3pid_lookup
         self.allow_per_room_profiles = self.config.allow_per_room_profiles
@@ -723,6 +724,15 @@ class RoomMemberHandler(object):
         # can't just rely on the standard ratelimiting of events.
         yield self.base_handler.ratelimit(requester)
 
+        can_invite = yield self.third_party_event_rules.check_threepid_can_be_invited(
+            medium, address, room_id,
+        )
+        if not can_invite:
+            raise SynapseError(
+                403, "This third-party identifier can not be invited in this room",
+                Codes.FORBIDDEN,
+            )
+
         invitee = yield self._lookup_3pid(
             id_server, medium, address
         )

From 9ce4220d6ca96644b00e4c014d7ee35505ca8b84 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 17 Jun 2019 16:33:16 +0100
Subject: [PATCH 427/429] Changelog

---
 changelog.d/5477.feature | 1 +
 1 file changed, 1 insertion(+)
 create mode 100644 changelog.d/5477.feature

diff --git a/changelog.d/5477.feature b/changelog.d/5477.feature
new file mode 100644
index 0000000000..63d9b58734
--- /dev/null
+++ b/changelog.d/5477.feature
@@ -0,0 +1 @@
+Allow server admins to define implementations of extra rules for allowing or denying incoming events.

From 33ea87be3926c2cf5bd57170a808a20217433ad6 Mon Sep 17 00:00:00 2001
From: Brendan Abolivier 
Date: Mon, 17 Jun 2019 17:39:38 +0100
Subject: [PATCH 428/429] Make check_threepid_can_be_invited async

---
 synapse/events/third_party_rules.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py
index 768cfa8e9c..50ceeb1e8e 100644
--- a/synapse/events/third_party_rules.py
+++ b/synapse/events/third_party_rules.py
@@ -85,6 +85,7 @@ class ThirdPartyEventRules(object):
             requester, config, is_requester_admin
         )
 
+    @defer.inlineCallbacks
     def check_threepid_can_be_invited(self, medium, address, room_id):
         """Check if a provided 3PID can be invited in the given room.
 

From 82d9d524bdb8ce2f54855589e44ce3a20ba1af37 Mon Sep 17 00:00:00 2001
From: cclauss 
Date: Mon, 17 Jun 2019 19:21:30 +0200
Subject: [PATCH 429/429] Fix seven contrib files with Python syntax errors
 (#5446)

* Fix seven contrib files with Python syntax errors

Signed-off-by: cclauss 
---
 changelog.d/5446.misc                      |   1 +
 contrib/cmdclient/console.py               | 109 +++++++++++----------
 contrib/cmdclient/http.py                  |  13 +--
 contrib/graph/graph.py                     |   5 +-
 contrib/graph/graph3.py                    |  21 ++--
 contrib/jitsimeetbridge/jitsimeetbridge.py |  67 ++++++-------
 contrib/scripts/kick_users.py              |  34 ++++---
 7 files changed, 131 insertions(+), 119 deletions(-)
 create mode 100644 changelog.d/5446.misc

diff --git a/changelog.d/5446.misc b/changelog.d/5446.misc
new file mode 100644
index 0000000000..e5209be0a6
--- /dev/null
+++ b/changelog.d/5446.misc
@@ -0,0 +1 @@
+Update Python syntax in contrib/ to Python 3. 
diff --git a/contrib/cmdclient/console.py b/contrib/cmdclient/console.py
index 4918fa1a9a..462f146113 100755
--- a/contrib/cmdclient/console.py
+++ b/contrib/cmdclient/console.py
@@ -15,6 +15,7 @@
 # limitations under the License.
 
 """ Starts a synapse client console. """
+from __future__ import print_function
 
 from twisted.internet import reactor, defer, threads
 from http import TwistedHttpClient
@@ -109,7 +110,7 @@ class SynapseCmd(cmd.Cmd):
         by using $. E.g. 'config roomid room1' then 'raw get /rooms/$roomid'.
         """
         if len(line) == 0:
-            print json.dumps(self.config, indent=4)
+            print(json.dumps(self.config, indent=4))
             return
 
         try:
@@ -123,8 +124,8 @@ class SynapseCmd(cmd.Cmd):
             ]
             for key, valid_vals in config_rules:
                 if key == args["key"] and args["val"] not in valid_vals:
-                    print "%s value must be one of %s" % (args["key"],
-                                                          valid_vals)
+                    print("%s value must be one of %s" % (args["key"],
+                                                          valid_vals))
                     return
 
             # toggle the http client verbosity
@@ -133,11 +134,11 @@ class SynapseCmd(cmd.Cmd):
 
             # assign the new config
             self.config[args["key"]] = args["val"]
-            print json.dumps(self.config, indent=4)
+            print(json.dumps(self.config, indent=4))
 
             save_config(self.config)
         except Exception as e:
-            print e
+            print(e)
 
     def do_register(self, line):
         """Registers for a new account: "register  "
@@ -153,7 +154,7 @@ class SynapseCmd(cmd.Cmd):
             pwd = getpass.getpass("Type a password for this user: ")
             pwd2 = getpass.getpass("Retype the password: ")
             if pwd != pwd2 or len(pwd) == 0:
-                print "Password mismatch."
+                print("Password mismatch.")
                 pwd = None
             else:
                 password = pwd
@@ -174,12 +175,12 @@ class SynapseCmd(cmd.Cmd):
         # check the registration flows
         url = self._url() + "/register"
         json_res = yield self.http_client.do_request("GET", url)
-        print json.dumps(json_res, indent=4)
+        print(json.dumps(json_res, indent=4))
 
         passwordFlow = None
         for flow in json_res["flows"]:
             if flow["type"] == "m.login.recaptcha" or ("stages" in flow and "m.login.recaptcha" in flow["stages"]):
-                print "Unable to register: Home server requires captcha."
+                print("Unable to register: Home server requires captcha.")
                 return
             if flow["type"] == "m.login.password" and "stages" not in flow:
                 passwordFlow = flow
@@ -189,7 +190,7 @@ class SynapseCmd(cmd.Cmd):
             return
 
         json_res = yield self.http_client.do_request("POST", url, data=data)
-        print json.dumps(json_res, indent=4)
+        print(json.dumps(json_res, indent=4))
         if update_config and "user_id" in json_res:
             self.config["user"] = json_res["user_id"]
             self.config["token"] = json_res["access_token"]
@@ -215,7 +216,7 @@ class SynapseCmd(cmd.Cmd):
                 reactor.callFromThread(self._do_login, user, p)
                 #print " got %s " % p
         except Exception as e:
-            print e
+            print(e)
 
     @defer.inlineCallbacks
     def _do_login(self, user, password):
@@ -227,13 +228,13 @@ class SynapseCmd(cmd.Cmd):
         }
         url = self._url() + path
         json_res = yield self.http_client.do_request("POST", url, data=data)
-        print json_res
+        print(json_res)
 
         if "access_token" in json_res:
             self.config["user"] = user
             self.config["token"] = json_res["access_token"]
             save_config(self.config)
-            print "Login successful."
+            print("Login successful.")
 
     @defer.inlineCallbacks
     def _check_can_login(self):
@@ -242,10 +243,10 @@ class SynapseCmd(cmd.Cmd):
         # submitting!
         url = self._url() + path
         json_res = yield self.http_client.do_request("GET", url)
-        print json_res
+        print(json_res)
 
         if "flows" not in json_res:
-            print "Failed to find any login flows."
+            print("Failed to find any login flows.")
             defer.returnValue(False)
 
         flow = json_res["flows"][0] # assume first is the one we want.
@@ -275,9 +276,9 @@ class SynapseCmd(cmd.Cmd):
 
         json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
                                                      headers={'Content-Type': ['application/x-www-form-urlencoded']})
-        print json_res
+        print(json_res)
         if 'sid' in json_res:
-            print "Token sent. Your session ID is %s" % (json_res['sid'])
+            print("Token sent. Your session ID is %s" % (json_res['sid']))
 
     def do_emailvalidate(self, line):
         """Validate and associate a third party ID
@@ -297,7 +298,7 @@ class SynapseCmd(cmd.Cmd):
 
         json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
                                                      headers={'Content-Type': ['application/x-www-form-urlencoded']})
-        print json_res
+        print(json_res)
 
     def do_3pidbind(self, line):
         """Validate and associate a third party ID
@@ -317,7 +318,7 @@ class SynapseCmd(cmd.Cmd):
 
         json_res = yield self.http_client.do_request("POST", url, data=urllib.urlencode(args), jsonreq=False,
                                                      headers={'Content-Type': ['application/x-www-form-urlencoded']})
-        print json_res
+        print(json_res)
 
     def do_join(self, line):
         """Joins a room: "join " """
@@ -325,7 +326,7 @@ class SynapseCmd(cmd.Cmd):
             args = self._parse(line, ["roomid"], force_keys=True)
             self._do_membership_change(args["roomid"], "join", self._usr())
         except Exception as e:
-            print e
+            print(e)
 
     def do_joinalias(self, line):
         try:
@@ -333,7 +334,7 @@ class SynapseCmd(cmd.Cmd):
             path = "/join/%s" % urllib.quote(args["roomname"])
             reactor.callFromThread(self._run_and_pprint, "POST", path, {})
         except Exception as e:
-            print e
+            print(e)
 
     def do_topic(self, line):
         """"topic [set|get]  []"
@@ -343,17 +344,17 @@ class SynapseCmd(cmd.Cmd):
         try:
             args = self._parse(line, ["action", "roomid", "topic"])
             if "action" not in args or "roomid" not in args:
-                print "Must specify set|get and a room ID."
+                print("Must specify set|get and a room ID.")
                 return
             if args["action"].lower() not in ["set", "get"]:
-                print "Must specify set|get, not %s" % args["action"]
+                print("Must specify set|get, not %s" % args["action"])
                 return
 
             path = "/rooms/%s/topic" % urllib.quote(args["roomid"])
 
             if args["action"].lower() == "set":
                 if "topic" not in args:
-                    print "Must specify a new topic."
+                    print("Must specify a new topic.")
                     return
                 body = {
                     "topic": args["topic"]
@@ -362,7 +363,7 @@ class SynapseCmd(cmd.Cmd):
             elif args["action"].lower() == "get":
                 reactor.callFromThread(self._run_and_pprint, "GET", path)
         except Exception as e:
-            print e
+            print(e)
 
     def do_invite(self, line):
         """Invite a user to a room: "invite  " """
@@ -373,7 +374,7 @@ class SynapseCmd(cmd.Cmd):
 
             reactor.callFromThread(self._do_invite, args["roomid"], user_id)
         except Exception as e:
-            print e
+            print(e)
 
     @defer.inlineCallbacks
     def _do_invite(self, roomid, userstring):
@@ -393,29 +394,29 @@ class SynapseCmd(cmd.Cmd):
                 if 'public_key' in pubKeyObj:
                     pubKey = nacl.signing.VerifyKey(pubKeyObj['public_key'], encoder=nacl.encoding.HexEncoder)
                 else:
-                    print "No public key found in pubkey response!"
+                    print("No public key found in pubkey response!")
 
                 sigValid = False
 
                 if pubKey:
                     for signame in json_res['signatures']:
                         if signame not in TRUSTED_ID_SERVERS:
-                            print "Ignoring signature from untrusted server %s" % (signame)
+                            print("Ignoring signature from untrusted server %s" % (signame))
                         else:
                             try:
                                 verify_signed_json(json_res, signame, pubKey)
                                 sigValid = True
-                                print "Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame)
+                                print("Mapping %s -> %s correctly signed by %s" % (userstring, json_res['mxid'], signame))
                                 break
                             except SignatureVerifyException as e:
-                                print "Invalid signature from %s" % (signame)
-                                print e
+                                print("Invalid signature from %s" % (signame))
+                                print(e)
 
                 if sigValid:
-                    print "Resolved 3pid %s to %s" % (userstring, json_res['mxid'])
+                    print("Resolved 3pid %s to %s" % (userstring, json_res['mxid']))
                     mxid = json_res['mxid']
                 else:
-                    print "Got association for %s but couldn't verify signature" % (userstring)
+                    print("Got association for %s but couldn't verify signature" % (userstring))
 
             if not mxid:
                 mxid = "@" + userstring + ":" + self._domain()
@@ -428,7 +429,7 @@ class SynapseCmd(cmd.Cmd):
             args = self._parse(line, ["roomid"], force_keys=True)
             self._do_membership_change(args["roomid"], "leave", self._usr())
         except Exception as e:
-            print e
+            print(e)
 
     def do_send(self, line):
         """Sends a message. "send  " """
@@ -453,10 +454,10 @@ class SynapseCmd(cmd.Cmd):
         """
         args = self._parse(line, ["type", "roomid", "qp"])
         if not "type" in args or not "roomid" in args:
-            print "Must specify type and room ID."
+            print("Must specify type and room ID.")
             return
         if args["type"] not in ["members", "messages"]:
-            print "Unrecognised type: %s" % args["type"]
+            print("Unrecognised type: %s" % args["type"])
             return
         room_id = args["roomid"]
         path = "/rooms/%s/%s" % (urllib.quote(room_id), args["type"])
@@ -468,7 +469,7 @@ class SynapseCmd(cmd.Cmd):
                     key_value = key_value_str.split("=")
                     qp[key_value[0]] = key_value[1]
                 except:
-                    print "Bad query param: %s" % key_value
+                    print("Bad query param: %s" % key_value)
                     return
 
         reactor.callFromThread(self._run_and_pprint, "GET", path,
@@ -508,14 +509,14 @@ class SynapseCmd(cmd.Cmd):
         args = self._parse(line, ["method", "path", "data"])
         # sanity check
         if "method" not in args or "path" not in args:
-            print "Must specify path and method."
+            print("Must specify path and method.")
             return
 
         args["method"] = args["method"].upper()
         valid_methods = ["PUT", "GET", "POST", "DELETE",
                          "XPUT", "XGET", "XPOST", "XDELETE"]
         if args["method"] not in valid_methods:
-            print "Unsupported method: %s" % args["method"]
+            print("Unsupported method: %s" % args["method"])
             return
 
         if "data" not in args:
@@ -524,7 +525,7 @@ class SynapseCmd(cmd.Cmd):
             try:
                 args["data"] = json.loads(args["data"])
             except Exception as e:
-                print "Data is not valid JSON. %s" % e
+                print("Data is not valid JSON. %s" % e)
                 return
 
         qp = {"access_token": self._tok()}
@@ -553,7 +554,7 @@ class SynapseCmd(cmd.Cmd):
             try:
                 timeout = int(args["timeout"])
             except ValueError:
-                print "Timeout must be in milliseconds."
+                print("Timeout must be in milliseconds.")
                 return
         reactor.callFromThread(self._do_event_stream, timeout)
 
@@ -566,7 +567,7 @@ class SynapseCmd(cmd.Cmd):
                     "timeout": str(timeout),
                     "from": self.event_stream_token
                 })
-        print json.dumps(res, indent=4)
+        print(json.dumps(res, indent=4))
 
         if "chunk" in res:
             for event in res["chunk"]:
@@ -669,9 +670,9 @@ class SynapseCmd(cmd.Cmd):
                                                     data=data,
                                                     qparams=query_params)
         if alt_text:
-            print alt_text
+            print(alt_text)
         else:
-            print json.dumps(json_res, indent=4)
+            print(json.dumps(json_res, indent=4))
 
 
 def save_config(config):
@@ -680,16 +681,16 @@ def save_config(config):
 
 
 def main(server_url, identity_server_url, username, token, config_path):
-    print "Synapse command line client"
-    print "==========================="
-    print "Server: %s" % server_url
-    print "Type 'help' to get started."
-    print "Close this console with CTRL+C then CTRL+D."
+    print("Synapse command line client")
+    print("===========================")
+    print("Server: %s" % server_url)
+    print("Type 'help' to get started.")
+    print("Close this console with CTRL+C then CTRL+D.")
     if not username or not token:
-        print "-  'register ' - Register an account"
-        print "-  'stream' - Connect to the event stream"
-        print "-  'create ' - Create a room"
-        print "-  'send  ' - Send a message"
+        print("-  'register ' - Register an account")
+        print("-  'stream' - Connect to the event stream")
+        print("-  'create ' - Create a room")
+        print("-  'send  ' - Send a message")
     http_client = TwistedHttpClient()
 
     # the command line client
@@ -705,7 +706,7 @@ def main(server_url, identity_server_url, username, token, config_path):
                 http_client.verbose = "on" == syn_cmd.config["verbose"]
             except:
                 pass
-            print "Loaded config from %s" % config_path
+            print("Loaded config from %s" % config_path)
     except:
         pass
 
@@ -736,7 +737,7 @@ if __name__ == '__main__':
     args = parser.parse_args()
 
     if not args.server:
-        print "You must supply a server URL to communicate with."
+        print("You must supply a server URL to communicate with.")
         parser.print_help()
         sys.exit(1)
 
diff --git a/contrib/cmdclient/http.py b/contrib/cmdclient/http.py
index c833f3f318..1bd600e148 100644
--- a/contrib/cmdclient/http.py
+++ b/contrib/cmdclient/http.py
@@ -13,6 +13,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from __future__ import print_function
 from twisted.web.client import Agent, readBody
 from twisted.web.http_headers import Headers
 from twisted.internet import defer, reactor
@@ -141,15 +142,15 @@ class TwistedHttpClient(HttpClient):
         headers_dict["User-Agent"] = ["Synapse Cmd Client"]
 
         retries_left = 5
-        print "%s to %s with headers %s" % (method, url, headers_dict)
+        print("%s to %s with headers %s" % (method, url, headers_dict))
         if self.verbose and producer:
             if "password" in producer.data:
                 temp = producer.data["password"]
                 producer.data["password"] = "[REDACTED]"
-                print json.dumps(producer.data, indent=4)
+                print(json.dumps(producer.data, indent=4))
                 producer.data["password"] = temp
             else:
-                print json.dumps(producer.data, indent=4)
+                print(json.dumps(producer.data, indent=4))
 
         while True:
             try:
@@ -161,7 +162,7 @@ class TwistedHttpClient(HttpClient):
                 )
                 break
             except Exception as e:
-                print "uh oh: %s" % e
+                print("uh oh: %s" % e)
                 if retries_left:
                     yield self.sleep(2 ** (5 - retries_left))
                     retries_left -= 1
@@ -169,8 +170,8 @@ class TwistedHttpClient(HttpClient):
                     raise e
 
         if self.verbose:
-            print "Status %s %s" % (response.code, response.phrase)
-            print pformat(list(response.headers.getAllRawHeaders()))
+            print("Status %s %s" % (response.code, response.phrase))
+            print(pformat(list(response.headers.getAllRawHeaders())))
         defer.returnValue(response)
 
     def sleep(self, seconds):
diff --git a/contrib/graph/graph.py b/contrib/graph/graph.py
index afd1d446b4..e174ff5026 100644
--- a/contrib/graph/graph.py
+++ b/contrib/graph/graph.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 # Copyright 2014-2016 OpenMarket Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -48,7 +49,7 @@ def make_graph(pdus, room, filename_prefix):
             c = colors.pop()
             color_map[o] = c
         except:
-            print "Run out of colours!"
+            print("Run out of colours!")
             color_map[o] = "black"
 
     graph = pydot.Dot(graph_name="Test")
@@ -93,7 +94,7 @@ def make_graph(pdus, room, filename_prefix):
             end_name = make_name(i, o)
 
             if end_name not in node_map:
-                print "%s not in nodes" % end_name
+                print("%s not in nodes" % end_name)
                 continue
 
             edge = pydot.Edge(node_map[start_name], node_map[end_name])
diff --git a/contrib/graph/graph3.py b/contrib/graph/graph3.py
index 7d3b4d7eb6..fe1dc81e90 100644
--- a/contrib/graph/graph3.py
+++ b/contrib/graph/graph3.py
@@ -1,3 +1,4 @@
+from __future__ import print_function
 # Copyright 2016 OpenMarket Ltd
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,19 +27,19 @@ from six import string_types
 
 
 def make_graph(file_name, room_id, file_prefix, limit):
-    print "Reading lines"
+    print("Reading lines")
     with open(file_name) as f:
         lines = f.readlines()
 
-    print "Read lines"
+    print("Read lines")
 
     events = [FrozenEvent(json.loads(line)) for line in lines]
 
-    print "Loaded events."
+    print("Loaded events.")
 
     events.sort(key=lambda e: e.depth)
 
-    print "Sorted events"
+    print("Sorted events")
 
     if limit:
         events = events[-int(limit):]
@@ -55,7 +56,7 @@ def make_graph(file_name, room_id, file_prefix, limit):
         content = json.dumps(unfreeze(event.get_dict()["content"]), indent=4)
         content = content.replace("\n", "
\n") - print content + print(content) content = [] for key, value in unfreeze(event.get_dict()["content"]).items(): if value is None: @@ -74,7 +75,7 @@ def make_graph(file_name, room_id, file_prefix, limit): content = "
\n".join(content) - print content + print(content) label = ( "<" @@ -102,7 +103,7 @@ def make_graph(file_name, room_id, file_prefix, limit): node_map[event.event_id] = node graph.add_node(node) - print "Created Nodes" + print("Created Nodes") for event in events: for prev_id, _ in event.prev_events: @@ -120,15 +121,15 @@ def make_graph(file_name, room_id, file_prefix, limit): edge = pydot.Edge(node_map[event.event_id], end_node) graph.add_edge(edge) - print "Created edges" + print("Created edges") graph.write('%s.dot' % file_prefix, format='raw', prog='dot') - print "Created Dot" + print("Created Dot") graph.write_svg("%s.svg" % file_prefix, prog='dot') - print "Created svg" + print("Created svg") if __name__ == "__main__": parser = argparse.ArgumentParser( diff --git a/contrib/jitsimeetbridge/jitsimeetbridge.py b/contrib/jitsimeetbridge/jitsimeetbridge.py index 15f8e1c48b..e82d1be5d2 100644 --- a/contrib/jitsimeetbridge/jitsimeetbridge.py +++ b/contrib/jitsimeetbridge/jitsimeetbridge.py @@ -8,8 +8,9 @@ we set the remote SDP at which point the stream ends. Our video never gets to the bridge. Requires: -npm install jquery jsdom +npm install jquery jsdom """ +from __future__ import print_function import gevent import grequests @@ -51,7 +52,7 @@ class TrivialMatrixClient: req = grequests.get(url) resps = grequests.map([req]) obj = json.loads(resps[0].content) - print "incoming from matrix",obj + print("incoming from matrix",obj) if 'end' not in obj: continue self.token = obj['end'] @@ -60,22 +61,22 @@ class TrivialMatrixClient: def joinRoom(self, roomId): url = MATRIXBASE+'rooms/'+roomId+'/join?access_token='+self.access_token - print url + print(url) headers={ 'Content-Type': 'application/json' } req = grequests.post(url, headers=headers, data='{}') resps = grequests.map([req]) obj = json.loads(resps[0].content) - print "response: ",obj + print("response: ",obj) def sendEvent(self, roomId, evType, event): url = MATRIXBASE+'rooms/'+roomId+'/send/'+evType+'?access_token='+self.access_token - print url - print json.dumps(event) + print(url) + print(json.dumps(event)) headers={ 'Content-Type': 'application/json' } req = grequests.post(url, headers=headers, data=json.dumps(event)) resps = grequests.map([req]) obj = json.loads(resps[0].content) - print "response: ",obj + print("response: ",obj) @@ -85,31 +86,31 @@ xmppClients = {} def matrixLoop(): while True: ev = matrixCli.getEvent() - print ev + print(ev) if ev['type'] == 'm.room.member': - print 'membership event' + print('membership event') if ev['membership'] == 'invite' and ev['state_key'] == MYUSERNAME: roomId = ev['room_id'] - print "joining room %s" % (roomId) + print("joining room %s" % (roomId)) matrixCli.joinRoom(roomId) elif ev['type'] == 'm.room.message': if ev['room_id'] in xmppClients: - print "already have a bridge for that user, ignoring" + print("already have a bridge for that user, ignoring") continue - print "got message, connecting" + print("got message, connecting") xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id']) gevent.spawn(xmppClients[ev['room_id']].xmppLoop) elif ev['type'] == 'm.call.invite': - print "Incoming call" + print("Incoming call") #sdp = ev['content']['offer']['sdp'] #print "sdp: %s" % (sdp) #xmppClients[ev['room_id']] = TrivialXmppClient(ev['room_id'], ev['user_id']) #gevent.spawn(xmppClients[ev['room_id']].xmppLoop) elif ev['type'] == 'm.call.answer': - print "Call answered" + print("Call answered") sdp = ev['content']['answer']['sdp'] if ev['room_id'] not in xmppClients: - print "We didn't have a call for that room" + print("We didn't have a call for that room") continue # should probably check call ID too xmppCli = xmppClients[ev['room_id']] @@ -146,7 +147,7 @@ class TrivialXmppClient: return obj def sendAnswer(self, answer): - print "sdp from matrix client",answer + print("sdp from matrix client",answer) p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--sdp'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) jingle, out_err = p.communicate(answer) jingle = jingle % { @@ -156,28 +157,28 @@ class TrivialXmppClient: 'responder': self.jid, 'sid': self.callsid } - print "answer jingle from sdp",jingle + print("answer jingle from sdp",jingle) res = self.sendIq(jingle) - print "reply from answer: ",res + print("reply from answer: ",res) self.ssrcs = {} jingleSoup = BeautifulSoup(jingle) for cont in jingleSoup.iq.jingle.findAll('content'): if cont.description: self.ssrcs[cont['name']] = cont.description['ssrc'] - print "my ssrcs:",self.ssrcs + print("my ssrcs:",self.ssrcs) gevent.joinall([ gevent.spawn(self.advertiseSsrcs) ]) def advertiseSsrcs(self): - time.sleep(7) - print "SSRC spammer started" + time.sleep(7) + print("SSRC spammer started") while self.running: ssrcMsg = "%(nick)s" % { 'tojid': "%s@%s/%s" % (ROOMNAME, ROOMDOMAIN, self.shortJid), 'nick': self.userId, 'assrc': self.ssrcs['audio'], 'vssrc': self.ssrcs['video'] } res = self.sendIq(ssrcMsg) - print "reply from ssrc announce: ",res + print("reply from ssrc announce: ",res) time.sleep(10) @@ -186,19 +187,19 @@ class TrivialXmppClient: self.matrixCallId = time.time() res = self.xmppPoke("" % (self.nextRid(), HOST)) - print res + print(res) self.sid = res.body['sid'] - print "sid %s" % (self.sid) + print("sid %s" % (self.sid)) res = self.sendIq("") res = self.xmppPoke("" % (self.nextRid(), self.sid, HOST)) res = self.sendIq("") - print res + print(res) self.jid = res.body.iq.bind.jid.string - print "jid: %s" % (self.jid) + print("jid: %s" % (self.jid)) self.shortJid = self.jid.split('-')[0] res = self.sendIq("") @@ -217,13 +218,13 @@ class TrivialXmppClient: if p.c and p.c.nick: u['nick'] = p.c.nick.string self.muc['users'].append(u) - print "muc: ",self.muc + print("muc: ",self.muc) # wait for stuff while True: - print "waiting..." + print("waiting...") res = self.sendIq("") - print "got from stream: ",res + print("got from stream: ",res) if res.body.iq: jingles = res.body.iq.findAll('jingle') if len(jingles): @@ -232,15 +233,15 @@ class TrivialXmppClient: elif 'type' in res.body and res.body['type'] == 'terminate': self.running = False del xmppClients[self.matrixRoom] - return + return def handleInvite(self, jingle): self.initiator = jingle['initiator'] self.callsid = jingle['sid'] p = subprocess.Popen(['node', 'unjingle/unjingle.js', '--jingle'], stdin=subprocess.PIPE, stdout=subprocess.PIPE) - print "raw jingle invite",str(jingle) + print("raw jingle invite",str(jingle)) sdp, out_err = p.communicate(str(jingle)) - print "transformed remote offer sdp",sdp + print("transformed remote offer sdp",sdp) inviteEvent = { 'offer': { 'type': 'offer', @@ -252,7 +253,7 @@ class TrivialXmppClient: } matrixCli.sendEvent(self.matrixRoom, 'm.call.invite', inviteEvent) -matrixCli = TrivialMatrixClient(ACCESS_TOKEN) +matrixCli = TrivialMatrixClient(ACCESS_TOKEN) # Undefined name gevent.joinall([ gevent.spawn(matrixLoop) diff --git a/contrib/scripts/kick_users.py b/contrib/scripts/kick_users.py index 5dfaec3ad0..b4a14385d0 100755 --- a/contrib/scripts/kick_users.py +++ b/contrib/scripts/kick_users.py @@ -1,10 +1,16 @@ #!/usr/bin/env python +from __future__ import print_function from argparse import ArgumentParser import json import requests import sys import urllib +try: + raw_input +except NameError: # Python 3 + raw_input = input + def _mkurl(template, kws): for key in kws: template = template.replace(key, kws[key]) @@ -13,7 +19,7 @@ def _mkurl(template, kws): def main(hs, room_id, access_token, user_id_prefix, why): if not why: why = "Automated kick." - print "Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix) + print("Kicking members on %s in room %s matching %s" % (hs, room_id, user_id_prefix)) room_state_url = _mkurl( "$HS/_matrix/client/api/v1/rooms/$ROOM/state?access_token=$TOKEN", { @@ -22,13 +28,13 @@ def main(hs, room_id, access_token, user_id_prefix, why): "$TOKEN": access_token } ) - print "Getting room state => %s" % room_state_url + print("Getting room state => %s" % room_state_url) res = requests.get(room_state_url) - print "HTTP %s" % res.status_code + print("HTTP %s" % res.status_code) state_events = res.json() if "error" in state_events: - print "FATAL" - print state_events + print("FATAL") + print(state_events) return kick_list = [] @@ -44,15 +50,15 @@ def main(hs, room_id, access_token, user_id_prefix, why): kick_list.append(event["state_key"]) if len(kick_list) == 0: - print "No user IDs match the prefix '%s'" % user_id_prefix + print("No user IDs match the prefix '%s'" % user_id_prefix) return - print "The following user IDs will be kicked from %s" % room_name + print("The following user IDs will be kicked from %s" % room_name) for uid in kick_list: - print uid + print(uid) doit = raw_input("Continue? [Y]es\n") if len(doit) > 0 and doit.lower() == 'y': - print "Kicking members..." + print("Kicking members...") # encode them all kick_list = [urllib.quote(uid) for uid in kick_list] for uid in kick_list: @@ -69,14 +75,14 @@ def main(hs, room_id, access_token, user_id_prefix, why): "membership": "leave", "reason": why } - print "Kicking %s" % uid + print("Kicking %s" % uid) res = requests.put(kick_url, data=json.dumps(kick_body)) if res.status_code != 200: - print "ERROR: HTTP %s" % res.status_code + print("ERROR: HTTP %s" % res.status_code) if res.json().get("error"): - print "ERROR: JSON %s" % res.json() - - + print("ERROR: JSON %s" % res.json()) + + if __name__ == "__main__": parser = ArgumentParser("Kick members in a room matching a certain user ID prefix.")