mirror of
https://github.com/element-hq/synapse.git
synced 2024-11-22 17:46:08 +03:00
Add per server retry limiting.
Factor out the pre destination retry logic from TransactionQueue so it can be reused in both get_pdu and crypto.keyring
This commit is contained in:
parent
5025305fb2
commit
2b8f1a956c
4 changed files with 205 additions and 122 deletions
|
@ -22,6 +22,8 @@ from syutil.crypto.signing_key import (
|
|||
from syutil.base64util import decode_base64, encode_base64
|
||||
from synapse.api.errors import SynapseError, Codes
|
||||
|
||||
from synapse.util.retryutils import get_retry_limiter
|
||||
|
||||
from OpenSSL import crypto
|
||||
|
||||
import logging
|
||||
|
@ -88,19 +90,13 @@ class Keyring(object):
|
|||
|
||||
# Try to fetch the key from the remote server.
|
||||
|
||||
retry_last_ts, retry_interval = (0, 0)
|
||||
retry_timings = yield self.store.get_destination_retry_timings(
|
||||
server_name
|
||||
limiter = yield get_retry_limiter(
|
||||
server_name,
|
||||
self.clock,
|
||||
self.store,
|
||||
)
|
||||
if retry_timings:
|
||||
retry_last_ts, retry_interval = (
|
||||
retry_timings.retry_last_ts, retry_timings.retry_interval
|
||||
)
|
||||
if retry_last_ts + retry_interval > int(self.clock.time_msec()):
|
||||
logger.info("%s not ready for retry", server_name)
|
||||
raise ValueError("No verification key found for given key ids")
|
||||
|
||||
try:
|
||||
with limiter:
|
||||
(response, tls_certificate) = yield fetch_server_key(
|
||||
server_name, self.hs.tls_context_factory
|
||||
)
|
||||
|
@ -165,7 +161,3 @@ class Keyring(object):
|
|||
return
|
||||
|
||||
raise ValueError("No verification key found for given key ids")
|
||||
|
||||
except:
|
||||
self.set_retrying(server_name, retry_interval)
|
||||
raise
|
||||
|
|
|
@ -23,6 +23,8 @@ from synapse.api.errors import CodeMessageException
|
|||
from synapse.util.logutils import log_function
|
||||
from synapse.events import FrozenEvent
|
||||
|
||||
from synapse.util.retryutils import get_retry_limiter, NotRetryingDestination
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
|
@ -163,24 +165,34 @@ class FederationClient(FederationBase):
|
|||
pdu = None
|
||||
for destination in destinations:
|
||||
try:
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
destination, event_id
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
self._clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
with limiter:
|
||||
transaction_data = yield self.transport_layer.get_event(
|
||||
destination, event_id
|
||||
)
|
||||
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
logger.debug("transaction_data %r", transaction_data)
|
||||
|
||||
if pdu_list:
|
||||
pdu = pdu_list[0]
|
||||
pdu_list = [
|
||||
self.event_from_pdu_json(p, outlier=outlier)
|
||||
for p in transaction_data["pdus"]
|
||||
]
|
||||
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
if pdu_list:
|
||||
pdu = pdu_list[0]
|
||||
|
||||
break
|
||||
# Check signatures are correct.
|
||||
pdu = yield self._check_sigs_and_hash(pdu)
|
||||
|
||||
break
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
continue
|
||||
except CodeMessageException:
|
||||
raise
|
||||
except Exception as e:
|
||||
|
|
|
@ -22,6 +22,9 @@ from .units import Transaction
|
|||
from synapse.api.errors import HttpResponseException
|
||||
from synapse.util.logutils import log_function
|
||||
from synapse.util.logcontext import PreserveLoggingContext
|
||||
from synapse.util.retryutils import (
|
||||
get_retry_limiter, NotRetryingDestination,
|
||||
)
|
||||
|
||||
import logging
|
||||
|
||||
|
@ -138,25 +141,6 @@ class TransactionQueue(object):
|
|||
@defer.inlineCallbacks
|
||||
@log_function
|
||||
def _attempt_new_transaction(self, destination):
|
||||
|
||||
(retry_last_ts, retry_interval) = (0, 0)
|
||||
retry_timings = yield self.store.get_destination_retry_timings(
|
||||
destination
|
||||
)
|
||||
if retry_timings:
|
||||
(retry_last_ts, retry_interval) = (
|
||||
retry_timings.retry_last_ts, retry_timings.retry_interval
|
||||
)
|
||||
if retry_last_ts + retry_interval > int(self._clock.time_msec()):
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.info("TX [%s] is ready for retry", destination)
|
||||
|
||||
if destination in self.pending_transactions:
|
||||
# XXX: pending_transactions can get stuck on by a never-ending
|
||||
# request at which point pending_pdus_by_dest just keeps growing.
|
||||
|
@ -204,77 +188,79 @@ class TransactionQueue(object):
|
|||
]
|
||||
|
||||
try:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self._clock.time_msec()),
|
||||
transaction_id=str(self._next_txn_id),
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=pdus,
|
||||
edus=edus,
|
||||
pdu_failures=failures,
|
||||
)
|
||||
|
||||
self._next_txn_id += 1
|
||||
|
||||
yield self.transaction_actions.prepare_to_send(transaction)
|
||||
|
||||
logger.debug("TX [%s] Persisted transaction", destination)
|
||||
logger.info(
|
||||
"TX [%s] Sending transaction [%s]",
|
||||
limiter = yield get_retry_limiter(
|
||||
destination,
|
||||
transaction.transaction_id,
|
||||
self._clock,
|
||||
self.store,
|
||||
)
|
||||
|
||||
# Actually send the transaction
|
||||
with limiter:
|
||||
self.pending_transactions[destination] = 1
|
||||
|
||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self._clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
unsigned = p.setdefault("unsigned", {})
|
||||
unsigned["age"] = now - int(p["age_ts"])
|
||||
del p["age_ts"]
|
||||
return data
|
||||
logger.debug("TX [%s] Persisting transaction...", destination)
|
||||
|
||||
try:
|
||||
response = yield self.transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
transaction = Transaction.create_new(
|
||||
origin_server_ts=int(self._clock.time_msec()),
|
||||
transaction_id=str(self._next_txn_id),
|
||||
origin=self.server_name,
|
||||
destination=destination,
|
||||
pdus=pdus,
|
||||
edus=edus,
|
||||
pdu_failures=failures,
|
||||
)
|
||||
code = 200
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
logger.info("TX [%s] got %d response", destination, code)
|
||||
self._next_txn_id += 1
|
||||
|
||||
logger.debug("TX [%s] Sent transaction", destination)
|
||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||
yield self.transaction_actions.prepare_to_send(transaction)
|
||||
|
||||
yield self.transaction_actions.delivered(
|
||||
transaction, code, response
|
||||
)
|
||||
logger.debug("TX [%s] Persisted transaction", destination)
|
||||
logger.info(
|
||||
"TX [%s] Sending transaction [%s]",
|
||||
destination,
|
||||
transaction.transaction_id,
|
||||
)
|
||||
|
||||
# Actually send the transaction
|
||||
|
||||
# FIXME (erikj): This is a bit of a hack to make the Pdu age
|
||||
# keys work
|
||||
def json_data_cb():
|
||||
data = transaction.get_dict()
|
||||
now = int(self._clock.time_msec())
|
||||
if "pdus" in data:
|
||||
for p in data["pdus"]:
|
||||
if "age_ts" in p:
|
||||
unsigned = p.setdefault("unsigned", {})
|
||||
unsigned["age"] = now - int(p["age_ts"])
|
||||
del p["age_ts"]
|
||||
return data
|
||||
|
||||
try:
|
||||
response = yield self.transport_layer.send_transaction(
|
||||
transaction, json_data_cb
|
||||
)
|
||||
code = 200
|
||||
except HttpResponseException as e:
|
||||
code = e.code
|
||||
response = e.response
|
||||
|
||||
logger.info("TX [%s] got %d response", destination, code)
|
||||
|
||||
logger.debug("TX [%s] Sent transaction", destination)
|
||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
||||
|
||||
yield self.transaction_actions.delivered(
|
||||
transaction, code, response
|
||||
)
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
|
||||
logger.debug("TX [%s] Marked as delivered", destination)
|
||||
logger.debug("TX [%s] Yielding to callbacks...", destination)
|
||||
|
||||
for deferred in deferreds:
|
||||
if code == 200:
|
||||
if retry_last_ts:
|
||||
# this host is alive! reset retry schedule
|
||||
yield self.store.set_destination_retry_timings(
|
||||
destination, 0, 0
|
||||
)
|
||||
deferred.callback(None)
|
||||
else:
|
||||
self.set_retrying(destination, retry_interval)
|
||||
deferred.errback(RuntimeError("Got status %d" % code))
|
||||
|
||||
# Ensures we don't continue until all callbacks on that
|
||||
|
@ -285,6 +271,12 @@ class TransactionQueue(object):
|
|||
pass
|
||||
|
||||
logger.debug("TX [%s] Yielded to callbacks", destination)
|
||||
except NotRetryingDestination:
|
||||
logger.info(
|
||||
"TX [%s] not ready for retry yet - "
|
||||
"dropping transaction for now",
|
||||
destination,
|
||||
)
|
||||
except RuntimeError as e:
|
||||
# We capture this here as there as nothing actually listens
|
||||
# for this finishing functions deferred.
|
||||
|
@ -302,8 +294,6 @@ class TransactionQueue(object):
|
|||
e,
|
||||
)
|
||||
|
||||
self.set_retrying(destination, retry_interval)
|
||||
|
||||
for deferred in deferreds:
|
||||
if not deferred.called:
|
||||
deferred.errback(e)
|
||||
|
@ -314,22 +304,3 @@ class TransactionQueue(object):
|
|||
|
||||
# Check to see if there is anything else to send.
|
||||
self._attempt_new_transaction(destination)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def set_retrying(self, destination, retry_interval):
|
||||
# track that this destination is having problems and we should
|
||||
# give it a chance to recover before trying it again
|
||||
|
||||
if retry_interval:
|
||||
retry_interval *= 2
|
||||
# plateau at hourly retries for now
|
||||
if retry_interval >= 60 * 60 * 1000:
|
||||
retry_interval = 60 * 60 * 1000
|
||||
else:
|
||||
retry_interval = 2000 # try again at first after 2 seconds
|
||||
|
||||
yield self.store.set_destination_retry_timings(
|
||||
destination,
|
||||
int(self._clock.time_msec()),
|
||||
retry_interval
|
||||
)
|
||||
|
|
108
synapse/util/retryutils.py
Normal file
108
synapse/util/retryutils.py
Normal file
|
@ -0,0 +1,108 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2015 OpenMarket Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NotRetryingDestination(Exception):
|
||||
def __init__(self, retry_last_ts, retry_interval, destination):
|
||||
msg = "Not retrying server %s." % (destination,)
|
||||
super(NotRetryingDestination, self).__init__(msg)
|
||||
|
||||
self.retry_last_ts = retry_last_ts
|
||||
self.retry_interval = retry_interval
|
||||
self.destination = destination
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_retry_limiter(destination, clock, store, **kwargs):
|
||||
retry_last_ts, retry_interval = (0, 0)
|
||||
|
||||
retry_timings = yield store.get_destination_retry_timings(
|
||||
destination
|
||||
)
|
||||
|
||||
if retry_timings:
|
||||
retry_last_ts, retry_interval = (
|
||||
retry_timings.retry_last_ts, retry_timings.retry_interval
|
||||
)
|
||||
|
||||
now = int(clock.time_msec())
|
||||
|
||||
if retry_last_ts + retry_interval > now:
|
||||
raise NotRetryingDestination(
|
||||
retry_last_ts=retry_last_ts,
|
||||
retry_interval=retry_interval,
|
||||
destination=destination,
|
||||
)
|
||||
|
||||
defer.returnValue(
|
||||
RetryDestinationLimiter(
|
||||
destination,
|
||||
clock,
|
||||
store,
|
||||
retry_interval,
|
||||
**kwargs
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class RetryDestinationLimiter(object):
|
||||
def __init__(self, destination, clock, store, retry_interval,
|
||||
min_retry_interval=20000, max_retry_interval=60 * 60 * 1000,
|
||||
multiplier_retry_interval=2):
|
||||
self.clock = clock
|
||||
self.store = store
|
||||
self.destination = destination
|
||||
|
||||
self.retry_interval = retry_interval
|
||||
self.min_retry_interval = min_retry_interval
|
||||
self.max_retry_interval = max_retry_interval
|
||||
self.multiplier_retry_interval = multiplier_retry_interval
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
def err(self, failure):
|
||||
logger.exception(
|
||||
"Failed to store set_destination_retry_timings",
|
||||
failure.value
|
||||
)
|
||||
|
||||
if exc_type is None and exc_val is None and exc_tb is None:
|
||||
# We connected successfully.
|
||||
retry_last_ts = 0
|
||||
self.retry_interval = 0
|
||||
else:
|
||||
# We couldn't connect.
|
||||
if self.retry_interval:
|
||||
self.retry_interval *= self.multiplier_retry_interval
|
||||
|
||||
if self.retry_interval >= self.max_retry_interval:
|
||||
self.retry_interval = self.max_retry_interval
|
||||
else:
|
||||
self.retry_interval = self.min_retry_interval
|
||||
|
||||
retry_last_ts = int(self._clock.time_msec()),
|
||||
|
||||
self.store.set_destination_retry_timings(
|
||||
self.destination, retry_last_ts, self.retry_interval
|
||||
).addErrback(err)
|
Loading…
Reference in a new issue