From 41dab8a22236691c34d8db283e80483470ee8fce Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 23 Jan 2017 15:22:48 +0000 Subject: [PATCH 01/29] Fix bug where current_state_events renamed to current_state_ids --- synapse/state.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/state.py b/synapse/state.py index 383d32b163..29e7604322 100644 --- a/synapse/state.py +++ b/synapse/state.py @@ -200,11 +200,11 @@ class StateHandler(object): (s.type, s.state_key): s.event_id for s in old_state } if event.is_state(): - context.current_state_events = dict(context.prev_state_ids) + context.current_state_ids = dict(context.prev_state_ids) key = (event.type, event.state_key) - context.current_state_events[key] = event.event_id + context.current_state_ids[key] = event.event_id else: - context.current_state_events = context.prev_state_ids + context.current_state_ids = context.prev_state_ids else: context.current_state_ids = {} context.prev_state_ids = {} From 961e242aaf75cfa7b0d8d75bdafeff3397885a71 Mon Sep 17 00:00:00 2001 From: Richard Kellner Date: Mon, 20 Mar 2017 19:54:38 +0100 Subject: [PATCH 02/29] Added missing system requiremnt and pip upgrade before install When installing on CentOS7 I wans't able to follow README instructions to install due to errors. I was missing libsodium in order to compile python dependencies. Default version of Python pip is really old and therefore setuptools upgrade ended with error as well. In order to be able to continue I needed to upgrade pip as well. --- README.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index a73c71c77e..5c9b87e349 100644 --- a/README.rst +++ b/README.rst @@ -111,7 +111,7 @@ Installing prerequisites on ArchLinux:: Installing prerequisites on CentOS 7:: sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ - lcms2-devel libwebp-devel tcl-devel tk-devel \ + lcms2-devel libwebp-devel libsodium tcl-devel tk-devel \ python-virtualenv libffi-devel openssl-devel sudo yum groupinstall "Development Tools" @@ -146,6 +146,7 @@ To install the synapse homeserver run:: virtualenv -p python2.7 ~/.synapse source ~/.synapse/bin/activate + pip install --upgrade pip pip install --upgrade setuptools pip install https://github.com/matrix-org/synapse/tarball/master From a1880563643d5f4b656190b8471f7f3d26b0e33a Mon Sep 17 00:00:00 2001 From: Richard Kellner Date: Mon, 20 Mar 2017 19:58:24 +0100 Subject: [PATCH 03/29] Updated user creation section register_new_matrix_user command has one more question, I have updated the documentation to match the reality. --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index 5c9b87e349..6cdaa055ca 100644 --- a/README.rst +++ b/README.rst @@ -229,6 +229,7 @@ To get started, it is easiest to use the command line to register new users:: New user localpart: erikj Password: Confirm password: + Make admin [no]: Success! This process uses a setting ``registration_shared_secret`` in From 8d1dd7eb3019dd8b7c02da04da59f161fe5a4eaa Mon Sep 17 00:00:00 2001 From: Richard Kellner Date: Wed, 22 Mar 2017 22:49:32 +0100 Subject: [PATCH 04/29] Removed requirement that is not needed I have removed libsodium from CentOS system requirements, as it is part PyNaCl. Signed-off-by: Richard Kellner --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 6cdaa055ca..7885d0dc93 100644 --- a/README.rst +++ b/README.rst @@ -111,7 +111,7 @@ Installing prerequisites on ArchLinux:: Installing prerequisites on CentOS 7:: sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ - lcms2-devel libwebp-devel libsodium tcl-devel tk-devel \ + lcms2-devel libwebp-devel tcl-devel tk-devel \ python-virtualenv libffi-devel openssl-devel sudo yum groupinstall "Development Tools" From e64655c25de0b3797ace51239203792a15766949 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 23 Mar 2017 13:17:00 +0000 Subject: [PATCH 05/29] Don't user upsert to persist new one time keys Instead we no-op duplicate one time key uploads, an error if the key_id already exists but encodes a different key. --- synapse/storage/end_to_end_keys.py | 57 +++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index b9f1365f92..77bb9b0657 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -120,24 +120,63 @@ class EndToEndKeyStore(SQLBaseStore): return result + @defer.inlineCallbacks def add_e2e_one_time_keys(self, user_id, device_id, time_now, key_list): + """Insert some new one time keys for a device. + + Checks if any of the keys are already inserted, if they are then check + if they match. If they don't then we raise an error. + """ + + # First we check if we have already persisted any of the keys. + rows = yield self._simple_select_many_batch( + table="e2e_one_time_keys_json", + column="key_id", + iterable=[key_id for _, key_id, _ in key_list], + retcols=("algorithm", "key_id", "key_json",), + keyvalues={ + "user_id": user_id, + "device_id": device_id, + }, + desc="add_e2e_one_time_keys_check", + ) + + existing_key_map = { + row["key_id"]: (row["algorithm"], row["key_json"]) for row in rows + } + + new_keys = [] # Keys that we need to insert + for algorithm, key_id, json_bytes in key_list: + if key_id in existing_key_map: + ex_algo, ex_bytes = existing_key_map[key_id] + if algorithm != ex_algo or json_bytes != ex_bytes: + raise Exception( + "One time key with key_id %r already exists" % (key_id,) + ) + else: + new_keys.append((algorithm, key_id, json_bytes)) + def _add_e2e_one_time_keys(txn): - for (algorithm, key_id, json_bytes) in key_list: - self._simple_upsert_txn( - txn, table="e2e_one_time_keys_json", - keyvalues={ + # We are protected from race between lookup and insertion due to + # a unique constraint. If there is a race of two calls to + # `add_e2e_one_time_keys` then they'll conflict and we will only + # insert one set. + self._simple_insert_many_txn( + txn, table="e2e_one_time_keys_json", + values=[ + { "user_id": user_id, "device_id": device_id, "algorithm": algorithm, "key_id": key_id, - }, - values={ "ts_added_ms": time_now, "key_json": json_bytes, } - ) - return self.runInteraction( - "add_e2e_one_time_keys", _add_e2e_one_time_keys + for algorithm, key_id, json_bytes in new_keys + ], + ) + yield self.runInteraction( + "add_e2e_one_time_keys_insert", _add_e2e_one_time_keys ) def count_e2e_one_time_keys(self, user_id, device_id): From 6ebe2d23b1a366199e585dda671c9086c145e6b9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 23 Mar 2017 13:48:30 +0000 Subject: [PATCH 06/29] Raise a more helpful exception --- synapse/storage/end_to_end_keys.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 77bb9b0657..85340746c7 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -14,6 +14,8 @@ # limitations under the License. from twisted.internet import defer +from synapse.api.errors import SynapseError + from canonicaljson import encode_canonical_json import ujson as json @@ -150,8 +152,8 @@ class EndToEndKeyStore(SQLBaseStore): if key_id in existing_key_map: ex_algo, ex_bytes = existing_key_map[key_id] if algorithm != ex_algo or json_bytes != ex_bytes: - raise Exception( - "One time key with key_id %r already exists" % (key_id,) + raise SynapseError( + 400, "One time key with key_id %r already exists" % (key_id,) ) else: new_keys.append((algorithm, key_id, json_bytes)) From 2a28b79e04531f2630432e6a710db3ddc434ad8e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Mar 2017 14:44:49 +0000 Subject: [PATCH 07/29] Batch sending of device list pokes --- synapse/federation/transaction_queue.py | 1 + synapse/storage/devices.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index d7ecefcc64..2c96475b2a 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -309,6 +309,7 @@ class TransactionQueue(object): # XXX: what's this for? yield run_on_reactor() + pending_pdus = [] while True: device_message_edus, device_stream_id, dev_list_id = ( yield self._get_new_device_messages(destination) diff --git a/synapse/storage/devices.py b/synapse/storage/devices.py index 6beeff8b00..53e36791d5 100644 --- a/synapse/storage/devices.py +++ b/synapse/storage/devices.py @@ -329,6 +329,7 @@ class DeviceStore(SQLBaseStore): SELECT user_id, device_id, max(stream_id) FROM device_lists_outbound_pokes WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ? GROUP BY user_id, device_id + LIMIT 20 """ txn.execute( sql, (destination, from_stream_id, now_stream_id, False) @@ -339,6 +340,9 @@ class DeviceStore(SQLBaseStore): if not query_map: return (now_stream_id, []) + if len(query_map) >= 20: + now_stream_id = max(stream_id for stream_id in query_map.itervalues()) + devices = self._get_e2e_device_keys_txn( txn, query_map.keys(), include_all_devices=True ) From 35b4aa04befbe2853cb409ebbb0c7d9ff04137a4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 27 Mar 2017 14:07:47 +0100 Subject: [PATCH 08/29] Notify on new federation traffic --- synapse/federation/send_queue.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 5c9f7a86f0..bbb0195228 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -54,6 +54,7 @@ class FederationRemoteSendQueue(object): def __init__(self, hs): self.server_name = hs.hostname self.clock = hs.get_clock() + self.notifier = hs.get_notifier() self.presence_map = {} self.presence_changed = sorteddict() @@ -186,6 +187,8 @@ class FederationRemoteSendQueue(object): else: self.edus[pos] = edu + self.notifier.on_new_replication_data() + def send_presence(self, destination, states): """As per TransactionQueue""" pos = self._next_pos() @@ -199,16 +202,20 @@ class FederationRemoteSendQueue(object): (destination, state.user_id) for state in states ] + self.notifier.on_new_replication_data() + def send_failure(self, failure, destination): """As per TransactionQueue""" pos = self._next_pos() self.failures[pos] = (destination, str(failure)) + self.notifier.on_new_replication_data() def send_device_messages(self, destination): """As per TransactionQueue""" pos = self._next_pos() self.device_messages[pos] = destination + self.notifier.on_new_replication_data() def get_current_token(self): return self.pos - 1 From 58a35366be4e9a56c6655972af5f26462a867f36 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Mar 2017 11:34:37 +0100 Subject: [PATCH 09/29] The algorithm is part of the key id --- synapse/storage/end_to_end_keys.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 85340746c7..b746f74269 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -144,14 +144,14 @@ class EndToEndKeyStore(SQLBaseStore): ) existing_key_map = { - row["key_id"]: (row["algorithm"], row["key_json"]) for row in rows + (row["algorithm"], row["key_id"]): row["key_json"] for row in rows } new_keys = [] # Keys that we need to insert for algorithm, key_id, json_bytes in key_list: - if key_id in existing_key_map: - ex_algo, ex_bytes = existing_key_map[key_id] - if algorithm != ex_algo or json_bytes != ex_bytes: + if (algorithm, key_id) in existing_key_map: + ex_bytes = existing_key_map[key_id] + if json_bytes != ex_bytes: raise SynapseError( 400, "One time key with key_id %r already exists" % (key_id,) ) From 650f0e69f2ff1c15739868c0e1a639d70ac13dbf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Mar 2017 13:03:50 +0100 Subject: [PATCH 10/29] Compile the regex's used in ASes --- synapse/appservice/__init__.py | 14 +++++--------- tests/appservice/test_appservice.py | 4 +++- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index b0106a3597..1e298ccf36 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -124,22 +124,18 @@ class ApplicationService(object): raise ValueError( "Expected bool for 'exclusive' in ns '%s'" % ns ) - if not isinstance(regex_obj.get("regex"), basestring): + regex = regex_obj.get("regex") + if isinstance(regex, basestring): + regex_obj["regex"] = re.compile(regex) + else: raise ValueError( "Expected string for 'regex' in ns '%s'" % ns ) return namespaces def _matches_regex(self, test_string, namespace_key, return_obj=False): - if not isinstance(test_string, basestring): - logger.error( - "Expected a string to test regex against, but got %s", - test_string - ) - return False - for regex_obj in self.namespaces[namespace_key]: - if re.match(regex_obj["regex"], test_string): + if regex_obj["regex"].match(test_string): if return_obj: return regex_obj return True diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index aa8cc50550..7586ea9053 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -19,10 +19,12 @@ from twisted.internet import defer from mock import Mock from tests import unittest +import re + def _regex(regex, exclusive=True): return { - "regex": regex, + "regex": re.compile(regex), "exclusive": exclusive } From 30f5ffdca2a610c7b47dd9aaa02f1fa91976775f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Mar 2017 13:20:15 +0100 Subject: [PATCH 11/29] Remove param and cast at call site --- synapse/appservice/__init__.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 1e298ccf36..885d14fa91 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -133,16 +133,14 @@ class ApplicationService(object): ) return namespaces - def _matches_regex(self, test_string, namespace_key, return_obj=False): + def _matches_regex(self, test_string, namespace_key): for regex_obj in self.namespaces[namespace_key]: if regex_obj["regex"].match(test_string): - if return_obj: - return regex_obj - return True - return False + return regex_obj + return None def _is_exclusive(self, ns_key, test_string): - regex_obj = self._matches_regex(test_string, ns_key, return_obj=True) + regex_obj = self._matches_regex(test_string, ns_key) if regex_obj: return regex_obj["exclusive"] return False @@ -215,10 +213,10 @@ class ApplicationService(object): ) def is_interested_in_alias(self, alias): - return self._matches_regex(alias, ApplicationService.NS_ALIASES) + return bool(self._matches_regex(alias, ApplicationService.NS_ALIASES)) def is_interested_in_room(self, room_id): - return self._matches_regex(room_id, ApplicationService.NS_ROOMS) + return bool(self._matches_regex(room_id, ApplicationService.NS_ROOMS)) def is_exclusive_user(self, user_id): return ( From 51b156d48a14f1f3c8b03a6901317b0330cd368b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Mar 2017 13:25:18 +0100 Subject: [PATCH 12/29] Cache whether an AS is interested based on members --- synapse/appservice/__init__.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 885d14fa91..48791f0d9b 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from synapse.api.constants import EventTypes +from synapse.util.caches.descriptors import cachedInlineCallbacks from twisted.internet import defer @@ -160,7 +161,14 @@ class ApplicationService(object): if not store: defer.returnValue(False) - member_list = yield store.get_users_in_room(event.room_id) + does_match = yield self._matches_user_in_member_list(event.room_id, store) + defer.returnValue(does_match) + + @cachedInlineCallbacks(num_args=1, cache_context=True) + def _matches_user_in_member_list(self, room_id, store, cache_context): + member_list = yield store.get_users_in_room( + room_id, on_invalidate=cache_context.invalidate + ) # check joined member events for user_id in member_list: From 69efd7774935c1dd6a0330f114c7fca00db959c0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Mar 2017 09:50:05 +0100 Subject: [PATCH 13/29] Add comment --- synapse/appservice/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 48791f0d9b..7346206bb1 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -127,7 +127,7 @@ class ApplicationService(object): ) regex = regex_obj.get("regex") if isinstance(regex, basestring): - regex_obj["regex"] = re.compile(regex) + regex_obj["regex"] = re.compile(regex) # Pre-compile regex else: raise ValueError( "Expected string for 'regex' in ns '%s'" % ns From ac6bc5551204467e30d6544bd9ab520b71695687 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Mar 2017 10:56:26 +0100 Subject: [PATCH 14/29] Correctly look up key --- synapse/storage/end_to_end_keys.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index b746f74269..c4020869f4 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -149,12 +149,11 @@ class EndToEndKeyStore(SQLBaseStore): new_keys = [] # Keys that we need to insert for algorithm, key_id, json_bytes in key_list: - if (algorithm, key_id) in existing_key_map: - ex_bytes = existing_key_map[key_id] - if json_bytes != ex_bytes: - raise SynapseError( - 400, "One time key with key_id %r already exists" % (key_id,) - ) + ex_bytes = existing_key_map.get((algorithm, key_id), None) + if ex_bytes and json_bytes != ex_bytes: + raise SynapseError( + 400, "One time key with key_id %r already exists" % (key_id,) + ) else: new_keys.append((algorithm, key_id, json_bytes)) From e4df0e189dea7f719c019c562ca811be90e2a5b1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Mar 2017 11:02:35 +0100 Subject: [PATCH 15/29] Decrank last commit --- synapse/storage/end_to_end_keys.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/synapse/storage/end_to_end_keys.py b/synapse/storage/end_to_end_keys.py index 1fafeae3f3..7cbc1470fd 100644 --- a/synapse/storage/end_to_end_keys.py +++ b/synapse/storage/end_to_end_keys.py @@ -150,10 +150,11 @@ class EndToEndKeyStore(SQLBaseStore): new_keys = [] # Keys that we need to insert for algorithm, key_id, json_bytes in key_list: ex_bytes = existing_key_map.get((algorithm, key_id), None) - if ex_bytes and json_bytes != ex_bytes: - raise SynapseError( - 400, "One time key with key_id %r already exists" % (key_id,) - ) + if ex_bytes: + if json_bytes != ex_bytes: + raise SynapseError( + 400, "One time key with key_id %r already exists" % (key_id,) + ) else: new_keys.append((algorithm, key_id, json_bytes)) From 85be3dde8147d5246a52c364fc1f824d55e8d6e8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Mar 2017 11:48:27 +0100 Subject: [PATCH 16/29] Bail early if remote wouldn't be retried (#2064) * Bail early if remote wouldn't be retried * Don't always return true * Just use get_retry_limiter * Spelling --- synapse/federation/transaction_queue.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/synapse/federation/transaction_queue.py b/synapse/federation/transaction_queue.py index 2c96475b2a..c27ce7c5f3 100644 --- a/synapse/federation/transaction_queue.py +++ b/synapse/federation/transaction_queue.py @@ -22,7 +22,7 @@ from .units import Transaction, Edu from synapse.api.errors import HttpResponseException from synapse.util.async import run_on_reactor from synapse.util.logcontext import preserve_context_over_fn -from synapse.util.retryutils import NotRetryingDestination +from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter from synapse.util.metrics import measure_func from synapse.types import get_domain_from_id from synapse.handlers.presence import format_user_presence_state @@ -303,9 +303,15 @@ class TransactionQueue(object): ) return + pending_pdus = [] try: self.pending_transactions[destination] = 1 + # This will throw if we wouldn't retry. We do this here so we fail + # quickly, but we will later check this again in the http client, + # hence why we throw the result away. + yield get_retry_limiter(destination, self.clock, self.store) + # XXX: what's this for? yield run_on_reactor() @@ -398,7 +404,7 @@ class TransactionQueue(object): destination, e, ) - for p in pending_pdus: + for p, _ in pending_pdus: logger.info("Failed to send event %s to %s", p.event_id, destination) finally: From 3ce8d591763059bb17b5d5053bcfadaa2c988f83 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Mar 2017 14:31:46 +0100 Subject: [PATCH 17/29] Increase cache size for _get_state_group_for_event --- synapse/storage/state.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/state.py b/synapse/storage/state.py index 314216f039..fb23f6f462 100644 --- a/synapse/storage/state.py +++ b/synapse/storage/state.py @@ -496,7 +496,7 @@ class StateStore(SQLBaseStore): state_map = yield self.get_state_ids_for_events([event_id], types) defer.returnValue(state_map[event_id]) - @cached(num_args=2, max_entries=10000) + @cached(num_args=2, max_entries=100000) def _get_state_group_for_event(self, room_id, event_id): return self._simple_select_one_onecol( table="event_to_state_groups", From a3810136fe107415543ae92ec21fdbeb22b049b8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 29 Mar 2017 15:53:14 +0100 Subject: [PATCH 18/29] Cache glob to regex at a higher level for push --- synapse/push/push_rule_evaluator.py | 104 +++++++++++++++------------- 1 file changed, 57 insertions(+), 47 deletions(-) diff --git a/synapse/push/push_rule_evaluator.py b/synapse/push/push_rule_evaluator.py index 4db76f18bd..4d88046579 100644 --- a/synapse/push/push_rule_evaluator.py +++ b/synapse/push/push_rule_evaluator.py @@ -17,6 +17,7 @@ import logging import re from synapse.types import UserID +from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache from synapse.util.caches.lrucache import LruCache logger = logging.getLogger(__name__) @@ -125,6 +126,11 @@ class PushRuleEvaluatorForEvent(object): return self._value_cache.get(dotted_key, None) +# Caches (glob, word_boundary) -> regex for push. See _glob_matches +regex_cache = LruCache(50000 * CACHE_SIZE_FACTOR) +register_cache("regex_push_cache", regex_cache) + + def _glob_matches(glob, value, word_boundary=False): """Tests if value matches glob. @@ -137,46 +143,63 @@ def _glob_matches(glob, value, word_boundary=False): Returns: bool """ + try: - if IS_GLOB.search(glob): - r = re.escape(glob) - - r = r.replace(r'\*', '.*?') - r = r.replace(r'\?', '.') - - # handle [abc], [a-z] and [!a-z] style ranges. - r = GLOB_REGEX.sub( - lambda x: ( - '[%s%s]' % ( - x.group(1) and '^' or '', - x.group(2).replace(r'\\\-', '-') - ) - ), - r, - ) - if word_boundary: - r = r"\b%s\b" % (r,) - r = _compile_regex(r) - - return r.search(value) - else: - r = r + "$" - r = _compile_regex(r) - - return r.match(value) - elif word_boundary: - r = re.escape(glob) - r = r"\b%s\b" % (r,) - r = _compile_regex(r) - - return r.search(value) - else: - return value.lower() == glob.lower() + r = regex_cache.get((glob, word_boundary), None) + if not r: + r = _glob_to_re(glob, word_boundary) + regex_cache[(glob, word_boundary)] = r + return r.search(value) except re.error: logger.warn("Failed to parse glob to regex: %r", glob) return False +def _glob_to_re(glob, word_boundary): + """Generates regex for a given glob. + + Args: + glob (string) + word_boundary (bool): Whether to match against word boundaries or entire + string. Defaults to False. + + Returns: + regex object + """ + if IS_GLOB.search(glob): + r = re.escape(glob) + + r = r.replace(r'\*', '.*?') + r = r.replace(r'\?', '.') + + # handle [abc], [a-z] and [!a-z] style ranges. + r = GLOB_REGEX.sub( + lambda x: ( + '[%s%s]' % ( + x.group(1) and '^' or '', + x.group(2).replace(r'\\\-', '-') + ) + ), + r, + ) + if word_boundary: + r = r"\b%s\b" % (r,) + + return re.compile(r, flags=re.IGNORECASE) + else: + r = "^" + r + "$" + + return re.compile(r, flags=re.IGNORECASE) + elif word_boundary: + r = re.escape(glob) + r = r"\b%s\b" % (r,) + + return re.compile(r, flags=re.IGNORECASE) + else: + r = "^" + re.escape(glob) + "$" + return re.compile(r, flags=re.IGNORECASE) + + def _flatten_dict(d, prefix=[], result={}): for key, value in d.items(): if isinstance(value, basestring): @@ -185,16 +208,3 @@ def _flatten_dict(d, prefix=[], result={}): _flatten_dict(value, prefix=(prefix + [key]), result=result) return result - - -regex_cache = LruCache(5000) - - -def _compile_regex(regex_str): - r = regex_cache.get(regex_str, None) - if r: - return r - - r = re.compile(regex_str, flags=re.IGNORECASE) - regex_cache[regex_str] = r - return r From 6cdca71079a9b19da764dc2edad86faec07a4ae4 Mon Sep 17 00:00:00 2001 From: Anant Prakash Date: Wed, 29 Mar 2017 23:50:13 +0530 Subject: [PATCH 19/29] synctl.py: wait for synapse to stop before restarting (#2020) --- synapse/app/synctl.py | 47 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 43 insertions(+), 4 deletions(-) diff --git a/synapse/app/synctl.py b/synapse/app/synctl.py index c045588866..23eb6a1ec4 100755 --- a/synapse/app/synctl.py +++ b/synapse/app/synctl.py @@ -23,14 +23,27 @@ import signal import subprocess import sys import yaml +import errno +import time SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"] GREEN = "\x1b[1;32m" +YELLOW = "\x1b[1;33m" RED = "\x1b[1;31m" NORMAL = "\x1b[m" +def pid_running(pid): + try: + os.kill(pid, 0) + return True + except OSError, err: + if err.errno == errno.EPERM: + return True + return False + + def write(message, colour=NORMAL, stream=sys.stdout): if colour == NORMAL: stream.write(message + "\n") @@ -38,6 +51,11 @@ def write(message, colour=NORMAL, stream=sys.stdout): stream.write(colour + message + NORMAL + "\n") +def abort(message, colour=RED, stream=sys.stderr): + write(message, colour, stream) + sys.exit(1) + + def start(configfile): write("Starting ...") args = SYNAPSE @@ -45,7 +63,8 @@ def start(configfile): try: subprocess.check_call(args) - write("started synapse.app.homeserver(%r)" % (configfile,), colour=GREEN) + write("started synapse.app.homeserver(%r)" % + (configfile,), colour=GREEN) except subprocess.CalledProcessError as e: write( "error starting (exit code: %d); see above for logs" % e.returncode, @@ -76,8 +95,16 @@ def start_worker(app, configfile, worker_configfile): def stop(pidfile, app): if os.path.exists(pidfile): pid = int(open(pidfile).read()) - os.kill(pid, signal.SIGTERM) - write("stopped %s" % (app,), colour=GREEN) + try: + os.kill(pid, signal.SIGTERM) + write("stopped %s" % (app,), colour=GREEN) + except OSError, err: + if err.errno == errno.ESRCH: + write("%s not running" % (app,), colour=YELLOW) + elif err.errno == errno.EPERM: + abort("Cannot stop %s: Operation not permitted" % (app,)) + else: + abort("Cannot stop %s: Unknown error" % (app,)) Worker = collections.namedtuple("Worker", [ @@ -190,7 +217,19 @@ def main(): if start_stop_synapse: stop(pidfile, "synapse.app.homeserver") - # TODO: Wait for synapse to actually shutdown before starting it again + # Wait for synapse to actually shutdown before starting it again + if action == "restart": + running_pids = [] + if start_stop_synapse and os.path.exists(pidfile): + running_pids.append(int(open(pidfile).read())) + for worker in workers: + if os.path.exists(worker.pidfile): + running_pids.append(int(open(worker.pidfile).read())) + if len(running_pids) > 0: + write("Waiting for process to exit before restarting...") + for running_pid in running_pids: + while pid_running(running_pid): + time.sleep(0.2) if action == "start" or action == "restart": if start_stop_synapse: From 30348c924cc177c0cb84d1efd0be0906ac972809 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Mar 2017 10:30:05 +0100 Subject: [PATCH 20/29] Use txn.fetchall() so we can reuse txn --- synapse/storage/event_federation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/event_federation.py b/synapse/storage/event_federation.py index 43b5b49986..519059c306 100644 --- a/synapse/storage/event_federation.py +++ b/synapse/storage/event_federation.py @@ -152,7 +152,7 @@ class EventFederationStore(SQLBaseStore): txn.execute(sql, (room_id, )) results = [] - for event_id, depth in txn: + for event_id, depth in txn.fetchall(): hashes = self._get_event_reference_hashes_txn(txn, event_id) prev_hashes = { k: encode_base64(v) for k, v in hashes.items() From f9b4bb05e05694f3000df2bc5331b1aaa501575c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 30 Mar 2017 13:22:24 +0100 Subject: [PATCH 21/29] Fix the logcontext handling in the cache wrappers (#2077) The cache wrappers had a habit of leaking the logcontext into the reactor while the lookup function was running, and then not restoring it correctly when the lookup function had completed. It's all the fault of `preserve_context_over_{fn,deferred}` which are basically a bit broken. --- docs/log_contexts.rst | 11 +++- synapse/util/caches/descriptors.py | 30 +++++---- synapse/util/logcontext.py | 23 +++++++ tests/util/caches/test_descriptors.py | 91 +++++++++++++++++++++++++++ 4 files changed, 136 insertions(+), 19 deletions(-) diff --git a/docs/log_contexts.rst b/docs/log_contexts.rst index 8d04a973de..eb1784e700 100644 --- a/docs/log_contexts.rst +++ b/docs/log_contexts.rst @@ -204,9 +204,14 @@ That doesn't follow the rules, but we can fix it by wrapping it with This technique works equally for external functions which return deferreds, or deferreds we have made ourselves. -XXX: think this is what ``preserve_context_over_deferred`` is supposed to do, -though it is broken, in that it only restores the logcontext for the duration -of the callbacks, which doesn't comply with the logcontext rules. +You can also use ``logcontext.make_deferred_yieldable``, which just does the +boilerplate for you, so the above could be written: + +.. code:: python + + def sleep(seconds): + return logcontext.make_deferred_yieldable(get_sleep_deferred(seconds)) + Fire-and-forget --------------- diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 19595df422..5c30ed235d 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -15,12 +15,9 @@ import logging from synapse.util.async import ObservableDeferred -from synapse.util import unwrapFirstError +from synapse.util import unwrapFirstError, logcontext from synapse.util.caches.lrucache import LruCache from synapse.util.caches.treecache import TreeCache, iterate_tree_cache_entry -from synapse.util.logcontext import ( - PreserveLoggingContext, preserve_context_over_deferred, preserve_context_over_fn -) from . import DEBUG_CACHES, register_cache @@ -328,11 +325,9 @@ class CacheDescriptor(_CacheDescriptorBase): defer.returnValue(cached_result) observer.addCallback(check_result) - return preserve_context_over_deferred(observer) except KeyError: ret = defer.maybeDeferred( - preserve_context_over_fn, - self.function_to_call, + logcontext.preserve_fn(self.function_to_call), obj, *args, **kwargs ) @@ -342,10 +337,11 @@ class CacheDescriptor(_CacheDescriptorBase): ret.addErrback(onErr) - ret = ObservableDeferred(ret, consumeErrors=True) - cache.set(cache_key, ret, callback=invalidate_callback) + result_d = ObservableDeferred(ret, consumeErrors=True) + cache.set(cache_key, result_d, callback=invalidate_callback) + observer = result_d.observe() - return preserve_context_over_deferred(ret.observe()) + return logcontext.make_deferred_yieldable(observer) wrapped.invalidate = cache.invalidate wrapped.invalidate_all = cache.invalidate_all @@ -362,7 +358,11 @@ class CacheListDescriptor(_CacheDescriptorBase): """Wraps an existing cache to support bulk fetching of keys. Given a list of keys it looks in the cache to find any hits, then passes - the list of missing keys to the wrapped fucntion. + the list of missing keys to the wrapped function. + + Once wrapped, the function returns either a Deferred which resolves to + the list of results, or (if all results were cached), just the list of + results. """ def __init__(self, orig, cached_method_name, list_name, num_args=None, @@ -433,8 +433,7 @@ class CacheListDescriptor(_CacheDescriptorBase): args_to_call[self.list_name] = missing ret_d = defer.maybeDeferred( - preserve_context_over_fn, - self.function_to_call, + logcontext.preserve_fn(self.function_to_call), **args_to_call ) @@ -443,8 +442,7 @@ class CacheListDescriptor(_CacheDescriptorBase): # We need to create deferreds for each arg in the list so that # we can insert the new deferred into the cache. for arg in missing: - with PreserveLoggingContext(): - observer = ret_d.observe() + observer = ret_d.observe() observer.addCallback(lambda r, arg: r.get(arg, None), arg) observer = ObservableDeferred(observer) @@ -471,7 +469,7 @@ class CacheListDescriptor(_CacheDescriptorBase): results.update(res) return results - return preserve_context_over_deferred(defer.gatherResults( + return logcontext.make_deferred_yieldable(defer.gatherResults( cached_defers.values(), consumeErrors=True, ).addCallback(update_results_dict).addErrback( diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index ff67b1d794..857afee7cb 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -310,6 +310,10 @@ def preserve_context_over_fn(fn, *args, **kwargs): def preserve_context_over_deferred(deferred, context=None): """Given a deferred wrap it such that any callbacks added later to it will be invoked with the current context. + + Deprecated: this almost certainly doesn't do want you want, ie make + the deferred follow the synapse logcontext rules: try + ``make_deferred_yieldable`` instead. """ if context is None: context = LoggingContext.current_context() @@ -359,6 +363,25 @@ def preserve_fn(f): return g +@defer.inlineCallbacks +def make_deferred_yieldable(deferred): + """Given a deferred, make it follow the Synapse logcontext rules: + + If the deferred has completed (or is not actually a Deferred), essentially + does nothing (just returns another completed deferred with the + result/failure). + + If the deferred has not yet completed, resets the logcontext before + returning a deferred. Then, when the deferred completes, restores the + current logcontext before running callbacks/errbacks. + + (This is more-or-less the opposite operation to preserve_fn.) + """ + with PreserveLoggingContext(): + r = yield deferred + defer.returnValue(r) + + # modules to ignore in `logcontext_tracer` _to_ignore = [ "synapse.util.logcontext", diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 419281054d..4414e86771 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -12,11 +12,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging + import mock +from synapse.api.errors import SynapseError +from synapse.util import async +from synapse.util import logcontext from twisted.internet import defer from synapse.util.caches import descriptors from tests import unittest +logger = logging.getLogger(__name__) + class DescriptorTestCase(unittest.TestCase): @defer.inlineCallbacks @@ -84,3 +91,87 @@ class DescriptorTestCase(unittest.TestCase): r = yield obj.fn(2, 5) self.assertEqual(r, 'chips') obj.mock.assert_not_called() + + def test_cache_logcontexts(self): + """Check that logcontexts are set and restored correctly when + using the cache.""" + + complete_lookup = defer.Deferred() + + class Cls(object): + @descriptors.cached() + def fn(self, arg1): + @defer.inlineCallbacks + def inner_fn(): + with logcontext.PreserveLoggingContext(): + yield complete_lookup + defer.returnValue(1) + + return inner_fn() + + @defer.inlineCallbacks + def do_lookup(): + with logcontext.LoggingContext() as c1: + c1.name = "c1" + r = yield obj.fn(1) + self.assertEqual(logcontext.LoggingContext.current_context(), + c1) + defer.returnValue(r) + + def check_result(r): + self.assertEqual(r, 1) + + obj = Cls() + + # set off a deferred which will do a cache lookup + d1 = do_lookup() + self.assertEqual(logcontext.LoggingContext.current_context(), + logcontext.LoggingContext.sentinel) + d1.addCallback(check_result) + + # and another + d2 = do_lookup() + self.assertEqual(logcontext.LoggingContext.current_context(), + logcontext.LoggingContext.sentinel) + d2.addCallback(check_result) + + # let the lookup complete + complete_lookup.callback(None) + + return defer.gatherResults([d1, d2]) + + def test_cache_logcontexts_with_exception(self): + """Check that the cache sets and restores logcontexts correctly when + the lookup function throws an exception""" + + class Cls(object): + @descriptors.cached() + def fn(self, arg1): + @defer.inlineCallbacks + def inner_fn(): + yield async.run_on_reactor() + raise SynapseError(400, "blah") + + return inner_fn() + + @defer.inlineCallbacks + def do_lookup(): + with logcontext.LoggingContext() as c1: + c1.name = "c1" + try: + yield obj.fn(1) + self.fail("No exception thrown") + except SynapseError: + pass + + self.assertEqual(logcontext.LoggingContext.current_context(), + c1) + + obj = Cls() + + # set off a deferred which will do a cache lookup + d1 = do_lookup() + self.assertEqual(logcontext.LoggingContext.current_context(), + logcontext.LoggingContext.sentinel) + + return d1 From 86780a8bc3eac566d2c03a601f84b5ccf5737ceb Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Mar 2017 10:41:08 +0100 Subject: [PATCH 22/29] Don't convert to deferreds when not necessary --- synapse/push/push_tools.py | 7 ++----- synapse/util/async.py | 2 +- synapse/util/caches/descriptors.py | 5 ++++- synapse/util/logcontext.py | 3 +++ synapse/visibility.py | 3 ++- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 287df94b4f..6835f54e97 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -17,15 +17,12 @@ from twisted.internet import defer from synapse.push.presentable_names import ( calculate_room_name, name_from_member_event ) -from synapse.util.logcontext import preserve_fn, preserve_context_over_deferred @defer.inlineCallbacks def get_badge_count(store, user_id): - invites, joins = yield preserve_context_over_deferred(defer.gatherResults([ - preserve_fn(store.get_invited_rooms_for_user)(user_id), - preserve_fn(store.get_rooms_for_user)(user_id), - ], consumeErrors=True)) + invites = yield store.get_invited_rooms_for_user(user_id) + joins = yield store.get_rooms_for_user(user_id) my_receipts_by_room = yield store.get_receipts_for_user( user_id, "m.read", diff --git a/synapse/util/async.py b/synapse/util/async.py index 35380bf8ed..8495de496a 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -101,7 +101,7 @@ class ObservableDeferred(object): return d else: success, res = self._result - return defer.succeed(res) if success else defer.fail(res) + return res if success else defer.fail(res) def observers(self): return self._observers diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 5c30ed235d..1607978e29 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -341,7 +341,10 @@ class CacheDescriptor(_CacheDescriptorBase): cache.set(cache_key, result_d, callback=invalidate_callback) observer = result_d.observe() - return logcontext.make_deferred_yieldable(observer) + if isinstance(observer, defer.Deferred): + return logcontext.make_deferred_yieldable(observer) + else: + return observer wrapped.invalidate = cache.invalidate wrapped.invalidate_all = cache.invalidate_all diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 857afee7cb..183d9cf62f 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -315,6 +315,9 @@ def preserve_context_over_deferred(deferred, context=None): the deferred follow the synapse logcontext rules: try ``make_deferred_yieldable`` instead. """ + if not isinstance(deferred, defer.Deferred): + return deferred + if context is None: context = LoggingContext.current_context() d = _PreservingContextDeferred(context) diff --git a/synapse/visibility.py b/synapse/visibility.py index 31659156ae..c4dd9ae2c7 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -56,7 +56,8 @@ def filter_events_for_clients(store, user_tuples, events, event_id_to_state): events ([synapse.events.EventBase]): list of events to filter """ forgotten = yield preserve_context_over_deferred(defer.gatherResults([ - preserve_fn(store.who_forgot_in_room)( + defer.maybeDeferred( + preserve_fn(store.who_forgot_in_room), room_id, ) for room_id in frozenset(e.room_id for e in events) From 014fee93b38ea93ee7dd9f9f9211895272e50834 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Mar 2017 11:14:15 +0100 Subject: [PATCH 23/29] Manually calculate cache key as getcallargs is expensive This is because getcallargs recomputes the getargspec, amongst other things, which we don't need to do as its already been done --- synapse/util/caches/descriptors.py | 34 ++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1607978e29..eed60d567e 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -197,6 +197,7 @@ class _CacheDescriptorBase(object): arg_spec = inspect.getargspec(orig) all_args = arg_spec.args + self.arg_spec = arg_spec if "cache_context" in all_args: if not cache_context: @@ -226,6 +227,14 @@ class _CacheDescriptorBase(object): self.num_args = num_args self.arg_names = all_args[1:num_args + 1] + if arg_spec.defaults: + self.arg_defaults = dict(zip( + all_args[-len(arg_spec.defaults):], + arg_spec.defaults + )) + else: + self.arg_defaults = {} + if "cache_context" in self.arg_names: raise Exception( "cache_context arg cannot be included among the cache keys" @@ -289,18 +298,31 @@ class CacheDescriptor(_CacheDescriptorBase): iterable=self.iterable, ) + def get_cache_key(args, kwargs): + """Given some args/kwargs return a generator that resolves into + the cache_key. + + We loop through each arg name, looking up if its in the `kwargs`, + otherwise using the next argument in `args`. If there are no more + args then we try looking the arg name up in the defaults + """ + pos = 0 + for nm in self.arg_names: + if nm in kwargs: + yield kwargs[nm] + elif pos < len(args): + yield args[pos] + pos += 1 + else: + yield self.arg_defaults[nm] + @functools.wraps(self.orig) def wrapped(*args, **kwargs): # If we're passed a cache_context then we'll want to call its invalidate() # whenever we are invalidated invalidate_callback = kwargs.pop("on_invalidate", None) - # Add temp cache_context so inspect.getcallargs doesn't explode - if self.add_cache_context: - kwargs["cache_context"] = None - - arg_dict = inspect.getcallargs(self.orig, obj, *args, **kwargs) - cache_key = tuple(arg_dict[arg_nm] for arg_nm in self.arg_names) + cache_key = tuple(get_cache_key(args, kwargs)) # Add our own `cache_context` to argument list if the wrapped function # has asked for one From eefd9fee81428ecec311999980bb4213b6aac2df Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 28 Mar 2017 11:19:15 +0100 Subject: [PATCH 24/29] Fix up tests --- tests/storage/test__base.py | 2 +- tests/util/caches/test_descriptors.py | 38 +++++++++++++++++++++++++++ tests/util/test_snapshot_cache.py | 4 ++- 3 files changed, 42 insertions(+), 2 deletions(-) diff --git a/tests/storage/test__base.py b/tests/storage/test__base.py index 8361dd8cee..281eb16254 100644 --- a/tests/storage/test__base.py +++ b/tests/storage/test__base.py @@ -199,7 +199,7 @@ class CacheDecoratorTestCase(unittest.TestCase): a.func.prefill(("foo",), ObservableDeferred(d)) - self.assertEquals(a.func("foo").result, d.result) + self.assertEquals(a.func("foo"), d.result) self.assertEquals(callcount[0], 0) @defer.inlineCallbacks diff --git a/tests/util/caches/test_descriptors.py b/tests/util/caches/test_descriptors.py index 4414e86771..3f14ab503f 100644 --- a/tests/util/caches/test_descriptors.py +++ b/tests/util/caches/test_descriptors.py @@ -175,3 +175,41 @@ class DescriptorTestCase(unittest.TestCase): logcontext.LoggingContext.sentinel) return d1 + + @defer.inlineCallbacks + def test_cache_default_args(self): + class Cls(object): + def __init__(self): + self.mock = mock.Mock() + + @descriptors.cached() + def fn(self, arg1, arg2=2, arg3=3): + return self.mock(arg1, arg2, arg3) + + obj = Cls() + + obj.mock.return_value = 'fish' + r = yield obj.fn(1, 2, 3) + self.assertEqual(r, 'fish') + obj.mock.assert_called_once_with(1, 2, 3) + obj.mock.reset_mock() + + # a call with same params shouldn't call the mock again + r = yield obj.fn(1, 2) + self.assertEqual(r, 'fish') + obj.mock.assert_not_called() + obj.mock.reset_mock() + + # a call with different params should call the mock again + obj.mock.return_value = 'chips' + r = yield obj.fn(2, 3) + self.assertEqual(r, 'chips') + obj.mock.assert_called_once_with(2, 3, 3) + obj.mock.reset_mock() + + # the two values should now be cached + r = yield obj.fn(1, 2) + self.assertEqual(r, 'fish') + r = yield obj.fn(2, 3) + self.assertEqual(r, 'chips') + obj.mock.assert_not_called() diff --git a/tests/util/test_snapshot_cache.py b/tests/util/test_snapshot_cache.py index 7e289715ba..d3a8630c2f 100644 --- a/tests/util/test_snapshot_cache.py +++ b/tests/util/test_snapshot_cache.py @@ -53,7 +53,9 @@ class SnapshotCacheTestCase(unittest.TestCase): # before the cache expires returns a resolved deferred. get_result_at_11 = self.cache.get(11, "key") self.assertIsNotNone(get_result_at_11) - self.assertTrue(get_result_at_11.called) + if isinstance(get_result_at_11, Deferred): + # The cache may return the actual result rather than a deferred + self.assertTrue(get_result_at_11.called) # Check that getting the key after the deferred has resolved # after the cache expires returns None From 6194a64ae913aa400e19c3a9bd9348ce2bc11305 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Mar 2017 14:19:10 +0100 Subject: [PATCH 25/29] Doc new instance variables --- synapse/util/caches/descriptors.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index eed60d567e..1f02cca8a5 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -197,7 +197,6 @@ class _CacheDescriptorBase(object): arg_spec = inspect.getargspec(orig) all_args = arg_spec.args - self.arg_spec = arg_spec if "cache_context" in all_args: if not cache_context: @@ -225,8 +224,16 @@ class _CacheDescriptorBase(object): ) self.num_args = num_args + + # list of the names of the args used as the cache key self.arg_names = all_args[1:num_args + 1] + # The arg spec of the wrapped function, see `inspect.getargspec` for + # the type. + self.arg_spec = arg_spec + + # self.arg_defaults is a map of arg name to its default value for each + # argument that has a default value if arg_spec.defaults: self.arg_defaults = dict(zip( all_args[-len(arg_spec.defaults):], From b282fe7170142191c8ce795270422754ab4bc58e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Mar 2017 17:03:59 +0100 Subject: [PATCH 26/29] Revert log context change --- synapse/util/logcontext.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/synapse/util/logcontext.py b/synapse/util/logcontext.py index 183d9cf62f..857afee7cb 100644 --- a/synapse/util/logcontext.py +++ b/synapse/util/logcontext.py @@ -315,9 +315,6 @@ def preserve_context_over_deferred(deferred, context=None): the deferred follow the synapse logcontext rules: try ``make_deferred_yieldable`` instead. """ - if not isinstance(deferred, defer.Deferred): - return deferred - if context is None: context = LoggingContext.current_context() d = _PreservingContextDeferred(context) From 5b5b171f3e9223351f72150ea73e1c9797144eae Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Mar 2017 17:05:53 +0100 Subject: [PATCH 27/29] Docs --- synapse/util/async.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/synapse/util/async.py b/synapse/util/async.py index 8495de496a..1453faf0ef 100644 --- a/synapse/util/async.py +++ b/synapse/util/async.py @@ -89,6 +89,11 @@ class ObservableDeferred(object): deferred.addCallbacks(callback, errback) def observe(self): + """Observe the underlying deferred. + + Can return either a deferred if the underlying deferred is still pending + (or has failed), or the actual value. Callers may need to use maybeDeferred. + """ if not self._result: d = defer.Deferred() From 27b1b4a2c958b04f37732d19f163dcfab12ad0a7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 30 Mar 2017 17:50:31 +0100 Subject: [PATCH 28/29] Speed up copy_and_replace --- synapse/types.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/synapse/types.py b/synapse/types.py index 9666f9d73f..c87ed813b9 100644 --- a/synapse/types.py +++ b/synapse/types.py @@ -216,9 +216,7 @@ class StreamToken( return self def copy_and_replace(self, key, new_value): - d = self._asdict() - d[key] = new_value - return StreamToken(**d) + return self._replace(**{key: new_value}) StreamToken.START = StreamToken( From 4d17add8de6d1c3bcd073246519f3cdaa5063bed Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 31 Mar 2017 09:38:27 +0100 Subject: [PATCH 29/29] Remove unused instance variable --- synapse/util/caches/descriptors.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 1f02cca8a5..9d0d0be1f9 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -228,10 +228,6 @@ class _CacheDescriptorBase(object): # list of the names of the args used as the cache key self.arg_names = all_args[1:num_args + 1] - # The arg spec of the wrapped function, see `inspect.getargspec` for - # the type. - self.arg_spec = arg_spec - # self.arg_defaults is a map of arg name to its default value for each # argument that has a default value if arg_spec.defaults: