mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-20 10:46:23 +03:00
Merge branch 'develop' of github.com:matrix-org/synapse into matrix-org-hotfixes
This commit is contained in:
commit
cff886c47b
10 changed files with 148 additions and 26 deletions
|
@ -1,3 +1,10 @@
|
||||||
|
Changes in synapse v0.19.2 (2017-02-20)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
* Fix bug with event visibility check in /context/ API. Thanks to Tokodomo for
|
||||||
|
pointing it out! (PR #1929)
|
||||||
|
|
||||||
|
|
||||||
Changes in synapse v0.19.1 (2017-02-09)
|
Changes in synapse v0.19.1 (2017-02-09)
|
||||||
=======================================
|
=======================================
|
||||||
|
|
||||||
|
|
48
contrib/example_log_config.yaml
Normal file
48
contrib/example_log_config.yaml
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
# Example log_config file for synapse. To enable, point `log_config` to it in
|
||||||
|
# `homeserver.yaml`, and restart synapse.
|
||||||
|
#
|
||||||
|
# This configuration will produce similar results to the defaults within
|
||||||
|
# synapse, but can be edited to give more flexibility.
|
||||||
|
|
||||||
|
version: 1
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
fmt:
|
||||||
|
format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s- %(message)s'
|
||||||
|
|
||||||
|
filters:
|
||||||
|
context:
|
||||||
|
(): synapse.util.logcontext.LoggingContextFilter
|
||||||
|
request: ""
|
||||||
|
|
||||||
|
handlers:
|
||||||
|
# example output to console
|
||||||
|
console:
|
||||||
|
class: logging.StreamHandler
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
# example output to file - to enable, edit 'root' config below.
|
||||||
|
file:
|
||||||
|
class: logging.handlers.RotatingFileHandler
|
||||||
|
formatter: fmt
|
||||||
|
filename: /var/log/synapse/homeserver.log
|
||||||
|
maxBytes: 100000000
|
||||||
|
backupCount: 3
|
||||||
|
filters: [context]
|
||||||
|
|
||||||
|
|
||||||
|
root:
|
||||||
|
level: INFO
|
||||||
|
handlers: [console] # to use file handler instead, switch to [file]
|
||||||
|
|
||||||
|
loggers:
|
||||||
|
synapse:
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
synapse.storage:
|
||||||
|
level: INFO
|
||||||
|
|
||||||
|
# example of enabling debugging for a component:
|
||||||
|
#
|
||||||
|
# synapse.federation.transport.server:
|
||||||
|
# level: DEBUG
|
|
@ -1,22 +1,27 @@
|
||||||
How to monitor Synapse metrics using Prometheus
|
How to monitor Synapse metrics using Prometheus
|
||||||
===============================================
|
===============================================
|
||||||
|
|
||||||
1: Install prometheus:
|
1. Install prometheus:
|
||||||
Follow instructions at http://prometheus.io/docs/introduction/install/
|
|
||||||
|
|
||||||
2: Enable synapse metrics:
|
Follow instructions at http://prometheus.io/docs/introduction/install/
|
||||||
Simply setting a (local) port number will enable it. Pick a port.
|
|
||||||
prometheus itself defaults to 9090, so starting just above that for
|
|
||||||
locally monitored services seems reasonable. E.g. 9092:
|
|
||||||
|
|
||||||
Add to homeserver.yaml
|
2. Enable synapse metrics:
|
||||||
|
|
||||||
metrics_port: 9092
|
Simply setting a (local) port number will enable it. Pick a port.
|
||||||
|
prometheus itself defaults to 9090, so starting just above that for
|
||||||
|
locally monitored services seems reasonable. E.g. 9092:
|
||||||
|
|
||||||
Restart synapse
|
Add to homeserver.yaml::
|
||||||
|
|
||||||
3: Add a prometheus target for synapse. It needs to set the ``metrics_path``
|
metrics_port: 9092
|
||||||
to a non-default value::
|
|
||||||
|
Also ensure that ``enable_metrics`` is set to ``True``.
|
||||||
|
|
||||||
|
Restart synapse.
|
||||||
|
|
||||||
|
3. Add a prometheus target for synapse.
|
||||||
|
|
||||||
|
It needs to set the ``metrics_path`` to a non-default value::
|
||||||
|
|
||||||
- job_name: "synapse"
|
- job_name: "synapse"
|
||||||
metrics_path: "/_synapse/metrics"
|
metrics_path: "/_synapse/metrics"
|
||||||
|
@ -24,6 +29,11 @@ How to monitor Synapse metrics using Prometheus
|
||||||
- targets:
|
- targets:
|
||||||
"my.server.here:9092"
|
"my.server.here:9092"
|
||||||
|
|
||||||
|
If your prometheus is older than 1.5.2, you will need to replace
|
||||||
|
``static_configs`` in the above with ``target_groups``.
|
||||||
|
|
||||||
|
Restart prometheus.
|
||||||
|
|
||||||
Standard Metric Names
|
Standard Metric Names
|
||||||
---------------------
|
---------------------
|
||||||
|
|
||||||
|
|
|
@ -16,4 +16,4 @@
|
||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.19.1"
|
__version__ = "0.19.2"
|
||||||
|
|
|
@ -303,18 +303,10 @@ class TransactionQueue(object):
|
||||||
try:
|
try:
|
||||||
self.pending_transactions[destination] = 1
|
self.pending_transactions[destination] = 1
|
||||||
|
|
||||||
|
# XXX: what's this for?
|
||||||
yield run_on_reactor()
|
yield run_on_reactor()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
|
||||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
|
||||||
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
|
||||||
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
|
||||||
|
|
||||||
pending_edus.extend(
|
|
||||||
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
|
|
||||||
)
|
|
||||||
|
|
||||||
limiter = yield get_retry_limiter(
|
limiter = yield get_retry_limiter(
|
||||||
destination,
|
destination,
|
||||||
self.clock,
|
self.clock,
|
||||||
|
@ -326,6 +318,24 @@ class TransactionQueue(object):
|
||||||
yield self._get_new_device_messages(destination)
|
yield self._get_new_device_messages(destination)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# BEGIN CRITICAL SECTION
|
||||||
|
#
|
||||||
|
# In order to avoid a race condition, we need to make sure that
|
||||||
|
# the following code (from popping the queues up to the point
|
||||||
|
# where we decide if we actually have any pending messages) is
|
||||||
|
# atomic - otherwise new PDUs or EDUs might arrive in the
|
||||||
|
# meantime, but not get sent because we hold the
|
||||||
|
# pending_transactions flag.
|
||||||
|
|
||||||
|
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||||
|
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||||
|
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
||||||
|
pending_failures = self.pending_failures_by_dest.pop(destination, [])
|
||||||
|
|
||||||
|
pending_edus.extend(
|
||||||
|
self.pending_edus_keyed_by_dest.pop(destination, {}).values()
|
||||||
|
)
|
||||||
|
|
||||||
pending_edus.extend(device_message_edus)
|
pending_edus.extend(device_message_edus)
|
||||||
if pending_presence:
|
if pending_presence:
|
||||||
pending_edus.append(
|
pending_edus.append(
|
||||||
|
@ -355,6 +365,8 @@ class TransactionQueue(object):
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# END CRITICAL SECTION
|
||||||
|
|
||||||
success = yield self._send_new_transaction(
|
success = yield self._send_new_transaction(
|
||||||
destination, pending_pdus, pending_edus, pending_failures,
|
destination, pending_pdus, pending_edus, pending_failures,
|
||||||
limiter=limiter,
|
limiter=limiter,
|
||||||
|
|
|
@ -356,7 +356,7 @@ class RoomCreationHandler(BaseHandler):
|
||||||
|
|
||||||
class RoomContextHandler(BaseHandler):
|
class RoomContextHandler(BaseHandler):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_event_context(self, user, room_id, event_id, limit, is_guest):
|
def get_event_context(self, user, room_id, event_id, limit):
|
||||||
"""Retrieves events, pagination tokens and state around a given event
|
"""Retrieves events, pagination tokens and state around a given event
|
||||||
in a room.
|
in a room.
|
||||||
|
|
||||||
|
@ -375,12 +375,15 @@ class RoomContextHandler(BaseHandler):
|
||||||
|
|
||||||
now_token = yield self.hs.get_event_sources().get_current_token()
|
now_token = yield self.hs.get_event_sources().get_current_token()
|
||||||
|
|
||||||
|
users = yield self.store.get_users_in_room(room_id)
|
||||||
|
is_peeking = user.to_string() not in users
|
||||||
|
|
||||||
def filter_evts(events):
|
def filter_evts(events):
|
||||||
return filter_events_for_client(
|
return filter_events_for_client(
|
||||||
self.store,
|
self.store,
|
||||||
user.to_string(),
|
user.to_string(),
|
||||||
events,
|
events,
|
||||||
is_peeking=is_guest
|
is_peeking=is_peeking
|
||||||
)
|
)
|
||||||
|
|
||||||
event = yield self.store.get_event(event_id, get_prev_content=True,
|
event = yield self.store.get_event(event_id, get_prev_content=True,
|
||||||
|
|
|
@ -719,7 +719,9 @@ class RoomMemberHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
membership = member.membership if member else None
|
membership = member.membership if member else None
|
||||||
|
|
||||||
if membership is not None and membership != Membership.LEAVE:
|
if membership is not None and membership not in [
|
||||||
|
Membership.LEAVE, Membership.BAN
|
||||||
|
]:
|
||||||
raise SynapseError(400, "User %s in room %s" % (
|
raise SynapseError(400, "User %s in room %s" % (
|
||||||
user_id, room_id
|
user_id, room_id
|
||||||
))
|
))
|
||||||
|
|
|
@ -505,7 +505,6 @@ class RoomEventContext(ClientV1RestServlet):
|
||||||
room_id,
|
room_id,
|
||||||
event_id,
|
event_id,
|
||||||
limit,
|
limit,
|
||||||
requester.is_guest,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if not results:
|
if not results:
|
||||||
|
|
|
@ -84,7 +84,9 @@ class EventPushActionsStore(SQLBaseStore):
|
||||||
)
|
)
|
||||||
|
|
||||||
self._doing_notif_rotation = False
|
self._doing_notif_rotation = False
|
||||||
self._clock.looping_call(self._rotate_notifs, 30 * 60 * 1000)
|
self._rotate_notif_loop = self._clock.looping_call(
|
||||||
|
self._rotate_notifs, 30 * 60 * 1000
|
||||||
|
)
|
||||||
|
|
||||||
def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples):
|
def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples):
|
||||||
"""
|
"""
|
||||||
|
|
39
synapse/storage/schema/delta/40/pushers.sql
Normal file
39
synapse/storage/schema/delta/40/pushers.sql
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
/* Copyright 2017 Vector Creations Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS pushers2 (
|
||||||
|
id BIGINT PRIMARY KEY,
|
||||||
|
user_name TEXT NOT NULL,
|
||||||
|
access_token BIGINT DEFAULT NULL,
|
||||||
|
profile_tag TEXT NOT NULL,
|
||||||
|
kind TEXT NOT NULL,
|
||||||
|
app_id TEXT NOT NULL,
|
||||||
|
app_display_name TEXT NOT NULL,
|
||||||
|
device_display_name TEXT NOT NULL,
|
||||||
|
pushkey TEXT NOT NULL,
|
||||||
|
ts BIGINT NOT NULL,
|
||||||
|
lang TEXT,
|
||||||
|
data TEXT,
|
||||||
|
last_stream_ordering INTEGER,
|
||||||
|
last_success BIGINT,
|
||||||
|
failing_since BIGINT,
|
||||||
|
UNIQUE (app_id, pushkey, user_name)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO pushers2 SELECT * FROM PUSHERS;
|
||||||
|
|
||||||
|
DROP TABLE PUSHERS;
|
||||||
|
|
||||||
|
ALTER TABLE pushers2 RENAME TO pushers;
|
Loading…
Reference in a new issue