mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-18 17:10:43 +03:00
merge master into dinsic, again...
This commit is contained in:
commit
6e7488ce11
273 changed files with 12618 additions and 5176 deletions
|
@ -1,48 +1,172 @@
|
||||||
version: 2
|
version: 2
|
||||||
jobs:
|
jobs:
|
||||||
|
dockerhubuploadrelease:
|
||||||
|
machine: true
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG} .
|
||||||
|
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_TAG}-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||||
|
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||||
|
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}
|
||||||
|
- run: docker push matrixdotorg/synapse:${CIRCLE_TAG}-py3
|
||||||
|
dockerhubuploadlatest:
|
||||||
|
machine: true
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1} .
|
||||||
|
- run: docker build -f docker/Dockerfile -t matrixdotorg/synapse:${CIRCLE_SHA1}-py3 --build-arg PYTHON_VERSION=3.6 .
|
||||||
|
- run: docker login --username $DOCKER_HUB_USERNAME --password $DOCKER_HUB_PASSWORD
|
||||||
|
- run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1} matrixdotorg/synapse:latest
|
||||||
|
- run: docker tag matrixdotorg/synapse:${CIRCLE_SHA1}-py3 matrixdotorg/synapse:latest-py3
|
||||||
|
- run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}
|
||||||
|
- run: docker push matrixdotorg/synapse:${CIRCLE_SHA1}-py3
|
||||||
|
- run: docker push matrixdotorg/synapse:latest
|
||||||
|
- run: docker push matrixdotorg/synapse:latest-py3
|
||||||
sytestpy2:
|
sytestpy2:
|
||||||
machine: true
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy2
|
||||||
|
working_directory: /src
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
- run: /synapse_sytest.sh
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs matrixdotorg/sytest-synapsepy2
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: ~/project/logs
|
path: /logs
|
||||||
destination: logs
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
sytestpy2postgres:
|
sytestpy2postgres:
|
||||||
machine: true
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy2
|
||||||
|
working_directory: /src
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy2
|
- run: POSTGRES=1 /synapse_sytest.sh
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy2
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: ~/project/logs
|
path: /logs
|
||||||
destination: logs
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
|
sytestpy2merged:
|
||||||
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy2
|
||||||
|
working_directory: /src
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run: bash .circleci/merge_base_branch.sh
|
||||||
|
- run: /synapse_sytest.sh
|
||||||
|
- store_artifacts:
|
||||||
|
path: /logs
|
||||||
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
|
sytestpy2postgresmerged:
|
||||||
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy2
|
||||||
|
working_directory: /src
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run: bash .circleci/merge_base_branch.sh
|
||||||
|
- run: POSTGRES=1 /synapse_sytest.sh
|
||||||
|
- store_artifacts:
|
||||||
|
path: /logs
|
||||||
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
|
|
||||||
sytestpy3:
|
sytestpy3:
|
||||||
machine: true
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy3
|
||||||
|
working_directory: /src
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
- run: /synapse_sytest.sh
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs hawkowl/sytestpy3
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: ~/project/logs
|
path: /logs
|
||||||
destination: logs
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
sytestpy3postgres:
|
sytestpy3postgres:
|
||||||
machine: true
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy3
|
||||||
|
working_directory: /src
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run: docker pull matrixdotorg/sytest-synapsepy3
|
- run: POSTGRES=1 /synapse_sytest.sh
|
||||||
- run: docker run --rm -it -v $(pwd)\:/src -v $(pwd)/logs\:/logs -e POSTGRES=1 matrixdotorg/sytest-synapsepy3
|
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: ~/project/logs
|
path: /logs
|
||||||
destination: logs
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
|
sytestpy3merged:
|
||||||
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy3
|
||||||
|
working_directory: /src
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run: bash .circleci/merge_base_branch.sh
|
||||||
|
- run: /synapse_sytest.sh
|
||||||
|
- store_artifacts:
|
||||||
|
path: /logs
|
||||||
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
|
sytestpy3postgresmerged:
|
||||||
|
docker:
|
||||||
|
- image: matrixdotorg/sytest-synapsepy3
|
||||||
|
working_directory: /src
|
||||||
|
steps:
|
||||||
|
- checkout
|
||||||
|
- run: bash .circleci/merge_base_branch.sh
|
||||||
|
- run: POSTGRES=1 /synapse_sytest.sh
|
||||||
|
- store_artifacts:
|
||||||
|
path: /logs
|
||||||
|
destination: logs
|
||||||
|
- store_test_results:
|
||||||
|
path: /logs
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
version: 2
|
||||||
build:
|
build:
|
||||||
jobs:
|
jobs:
|
||||||
- sytestpy2
|
- sytestpy2:
|
||||||
- sytestpy2postgres
|
filters:
|
||||||
# Currently broken while the Python 3 port is incomplete
|
branches:
|
||||||
# - sytestpy3
|
only: /develop|master|release-.*/
|
||||||
# - sytestpy3postgres
|
- sytestpy2postgres:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only: /develop|master|release-.*/
|
||||||
|
- sytestpy3:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only: /develop|master|release-.*/
|
||||||
|
- sytestpy3postgres:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only: /develop|master|release-.*/
|
||||||
|
- sytestpy2merged:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore: /develop|master|release-.*/
|
||||||
|
- sytestpy2postgresmerged:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore: /develop|master|release-.*/
|
||||||
|
- sytestpy3merged:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore: /develop|master|release-.*/
|
||||||
|
- sytestpy3postgresmerged:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
ignore: /develop|master|release-.*/
|
||||||
|
- dockerhubuploadrelease:
|
||||||
|
filters:
|
||||||
|
tags:
|
||||||
|
only: /v[0-9].[0-9]+.[0-9]+.*/
|
||||||
|
branches:
|
||||||
|
ignore: /.*/
|
||||||
|
- dockerhubuploadlatest:
|
||||||
|
filters:
|
||||||
|
branches:
|
||||||
|
only: master
|
||||||
|
|
34
.circleci/merge_base_branch.sh
Executable file
34
.circleci/merge_base_branch.sh
Executable file
|
@ -0,0 +1,34 @@
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# CircleCI doesn't give CIRCLE_PR_NUMBER in the environment for non-forked PRs. Wonderful.
|
||||||
|
# In this case, we just need to do some ~shell magic~ to strip it out of the PULL_REQUEST URL.
|
||||||
|
echo 'export CIRCLE_PR_NUMBER="${CIRCLE_PR_NUMBER:-${CIRCLE_PULL_REQUEST##*/}}"' >> $BASH_ENV
|
||||||
|
source $BASH_ENV
|
||||||
|
|
||||||
|
if [[ -z "${CIRCLE_PR_NUMBER}" ]]
|
||||||
|
then
|
||||||
|
echo "Can't figure out what the PR number is! Assuming merge target is develop."
|
||||||
|
|
||||||
|
# It probably hasn't had a PR opened yet. Since all PRs land on develop, we
|
||||||
|
# can probably assume it's based on it and will be merged into it.
|
||||||
|
GITBASE="develop"
|
||||||
|
else
|
||||||
|
# Get the reference, using the GitHub API
|
||||||
|
GITBASE=`wget -O- https://api.github.com/repos/matrix-org/synapse/pulls/${CIRCLE_PR_NUMBER} | jq -r '.base.ref'`
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Show what we are before
|
||||||
|
git show -s
|
||||||
|
|
||||||
|
# Set up username so it can do a merge
|
||||||
|
git config --global user.email bot@matrix.org
|
||||||
|
git config --global user.name "A robot"
|
||||||
|
|
||||||
|
# Fetch and merge. If it doesn't work, it will raise due to set -e.
|
||||||
|
git fetch -u origin $GITBASE
|
||||||
|
git merge --no-edit origin/$GITBASE
|
||||||
|
|
||||||
|
# Show what we are after.
|
||||||
|
git show -s
|
|
@ -3,6 +3,5 @@ Dockerfile
|
||||||
.gitignore
|
.gitignore
|
||||||
demo/etc
|
demo/etc
|
||||||
tox.ini
|
tox.ini
|
||||||
synctl
|
|
||||||
.git/*
|
.git/*
|
||||||
.tox/*
|
.tox/*
|
||||||
|
|
3
.gitignore
vendored
3
.gitignore
vendored
|
@ -1,9 +1,11 @@
|
||||||
*.pyc
|
*.pyc
|
||||||
.*.swp
|
.*.swp
|
||||||
*~
|
*~
|
||||||
|
*.lock
|
||||||
|
|
||||||
.DS_Store
|
.DS_Store
|
||||||
_trial_temp/
|
_trial_temp/
|
||||||
|
_trial_temp*/
|
||||||
logs/
|
logs/
|
||||||
dbs/
|
dbs/
|
||||||
*.egg
|
*.egg
|
||||||
|
@ -44,6 +46,7 @@ media_store/
|
||||||
build/
|
build/
|
||||||
venv/
|
venv/
|
||||||
venv*/
|
venv*/
|
||||||
|
*venv/
|
||||||
|
|
||||||
localhost-800*/
|
localhost-800*/
|
||||||
static/client/register/register_config.js
|
static/client/register/register_config.js
|
||||||
|
|
54
.travis.yml
54
.travis.yml
|
@ -1,15 +1,27 @@
|
||||||
sudo: false
|
sudo: false
|
||||||
language: python
|
language: python
|
||||||
|
|
||||||
# tell travis to cache ~/.cache/pip
|
cache:
|
||||||
cache: pip
|
directories:
|
||||||
|
# we only bother to cache the wheels; parts of the http cache get
|
||||||
|
# invalidated every build (because they get served with a max-age of 600
|
||||||
|
# seconds), which means that we end up re-uploading the whole cache for
|
||||||
|
# every build, which is time-consuming In any case, it's not obvious that
|
||||||
|
# downloading the cache from S3 would be much faster than downloading the
|
||||||
|
# originals from pypi.
|
||||||
|
#
|
||||||
|
- $HOME/.cache/pip/wheels
|
||||||
|
|
||||||
before_script:
|
# don't clone the whole repo history, one commit will do
|
||||||
- git remote set-branches --add origin develop
|
git:
|
||||||
- git fetch origin develop
|
depth: 1
|
||||||
|
|
||||||
services:
|
# only build branches we care about (PRs are built seperately)
|
||||||
- postgresql
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
- develop
|
||||||
|
- /^release-v/
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
fast_finish: true
|
fast_finish: true
|
||||||
|
@ -17,27 +29,39 @@ matrix:
|
||||||
- python: 2.7
|
- python: 2.7
|
||||||
env: TOX_ENV=packaging
|
env: TOX_ENV=packaging
|
||||||
|
|
||||||
- python: 2.7
|
- python: 3.6
|
||||||
env: TOX_ENV=pep8
|
env: TOX_ENV="pep8,check_isort"
|
||||||
|
|
||||||
- python: 2.7
|
- python: 2.7
|
||||||
env: TOX_ENV=py27
|
env: TOX_ENV=py27
|
||||||
|
|
||||||
|
- python: 2.7
|
||||||
|
env: TOX_ENV=py27-old
|
||||||
|
|
||||||
- python: 2.7
|
- python: 2.7
|
||||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
||||||
|
services:
|
||||||
|
- postgresql
|
||||||
|
|
||||||
|
- python: 3.5
|
||||||
|
env: TOX_ENV=py35
|
||||||
|
|
||||||
- python: 3.6
|
- python: 3.6
|
||||||
env: TOX_ENV=py36
|
env: TOX_ENV=py36
|
||||||
|
|
||||||
- python: 3.6
|
- python: 3.6
|
||||||
env: TOX_ENV=check_isort
|
env: TOX_ENV=py36-postgres TRIAL_FLAGS="-j 4"
|
||||||
|
services:
|
||||||
|
- postgresql
|
||||||
|
|
||||||
- python: 3.6
|
- # we only need to check for the newsfragment if it's a PR build
|
||||||
|
if: type = pull_request
|
||||||
|
python: 3.6
|
||||||
env: TOX_ENV=check-newsfragment
|
env: TOX_ENV=check-newsfragment
|
||||||
|
script:
|
||||||
allow_failures:
|
- git remote set-branches --add origin develop
|
||||||
- python: 2.7
|
- git fetch origin develop
|
||||||
env: TOX_ENV=py27-postgres TRIAL_FLAGS="-j 4"
|
- tox -e $TOX_ENV
|
||||||
|
|
||||||
install:
|
install:
|
||||||
- pip install tox
|
- pip install tox
|
||||||
|
|
346
CHANGES.md
346
CHANGES.md
|
@ -1,3 +1,349 @@
|
||||||
|
Synapse 0.33.8 (2018-11-01)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
No significant changes.
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.8rc2 (2018-10-31)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Searches that request profile info now no longer fail with a 500. Fixes
|
||||||
|
a regression in 0.33.8rc1. ([\#4122](https://github.com/matrix-org/synapse/issues/4122))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.8rc1 (2018-10-29)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Servers with auto-join rooms will now automatically create those rooms when the first user registers ([\#3975](https://github.com/matrix-org/synapse/issues/3975))
|
||||||
|
- Add config option to control alias creation ([\#4051](https://github.com/matrix-org/synapse/issues/4051))
|
||||||
|
- The register_new_matrix_user script is now ported to Python 3. ([\#4085](https://github.com/matrix-org/synapse/issues/4085))
|
||||||
|
- Configure Docker image to listen on both ipv4 and ipv6. ([\#4089](https://github.com/matrix-org/synapse/issues/4089))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix HTTP error response codes for federated group requests. ([\#3969](https://github.com/matrix-org/synapse/issues/3969))
|
||||||
|
- Fix issue where Python 3 users couldn't paginate /publicRooms ([\#4046](https://github.com/matrix-org/synapse/issues/4046))
|
||||||
|
- Fix URL previewing to work in Python 3.7 ([\#4050](https://github.com/matrix-org/synapse/issues/4050))
|
||||||
|
- synctl will use the right python executable to run worker processes ([\#4057](https://github.com/matrix-org/synapse/issues/4057))
|
||||||
|
- Manhole now works again on Python 3, instead of failing with a "couldn't match all kex parts" when connecting. ([\#4060](https://github.com/matrix-org/synapse/issues/4060), [\#4067](https://github.com/matrix-org/synapse/issues/4067))
|
||||||
|
- Fix some metrics being racy and causing exceptions when polled by Prometheus. ([\#4061](https://github.com/matrix-org/synapse/issues/4061))
|
||||||
|
- Fix bug which prevented email notifications from being sent unless an absolute path was given for `email_templates`. ([\#4068](https://github.com/matrix-org/synapse/issues/4068))
|
||||||
|
- Correctly account for cpu usage by background threads ([\#4074](https://github.com/matrix-org/synapse/issues/4074))
|
||||||
|
- Fix race condition where config defined reserved users were not being added to
|
||||||
|
the monthly active user list prior to the homeserver reactor firing up ([\#4081](https://github.com/matrix-org/synapse/issues/4081))
|
||||||
|
- Fix bug which prevented backslashes being used in event field filters ([\#4083](https://github.com/matrix-org/synapse/issues/4083))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Add information about the [matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy) playbook ([\#3698](https://github.com/matrix-org/synapse/issues/3698))
|
||||||
|
- Add initial implementation of new state resolution algorithm ([\#3786](https://github.com/matrix-org/synapse/issues/3786))
|
||||||
|
- Reduce database load when fetching state groups ([\#4011](https://github.com/matrix-org/synapse/issues/4011))
|
||||||
|
- Various cleanups in the federation client code ([\#4031](https://github.com/matrix-org/synapse/issues/4031))
|
||||||
|
- Run the CircleCI builds in docker containers ([\#4041](https://github.com/matrix-org/synapse/issues/4041))
|
||||||
|
- Only colourise synctl output when attached to tty ([\#4049](https://github.com/matrix-org/synapse/issues/4049))
|
||||||
|
- Refactor room alias creation code ([\#4063](https://github.com/matrix-org/synapse/issues/4063))
|
||||||
|
- Make the Python scripts in the top-level scripts folders meet pep8 and pass flake8. ([\#4068](https://github.com/matrix-org/synapse/issues/4068))
|
||||||
|
- The README now contains example for the Caddy web server. Contributed by steamp0rt. ([\#4072](https://github.com/matrix-org/synapse/issues/4072))
|
||||||
|
- Add psutil as an explicit dependency ([\#4073](https://github.com/matrix-org/synapse/issues/4073))
|
||||||
|
- Clean up threading and logcontexts in pushers ([\#4075](https://github.com/matrix-org/synapse/issues/4075))
|
||||||
|
- Correctly manage logcontexts during startup to fix some "Unexpected logging context" warnings ([\#4076](https://github.com/matrix-org/synapse/issues/4076))
|
||||||
|
- Give some more things logcontexts ([\#4077](https://github.com/matrix-org/synapse/issues/4077))
|
||||||
|
- Clean up some bits of code which were flagged by the linter ([\#4082](https://github.com/matrix-org/synapse/issues/4082))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.7 (2018-10-18)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
**Warning**: This release removes the example email notification templates from
|
||||||
|
`res/templates` (they are now internal to the python package). This should only
|
||||||
|
affect you if you (a) deploy your Synapse instance from a git checkout or a
|
||||||
|
github snapshot URL, and (b) have email notifications enabled.
|
||||||
|
|
||||||
|
If you have email notifications enabled, you should ensure that
|
||||||
|
`email.template_dir` is either configured to point at a directory where you
|
||||||
|
have installed customised templates, or leave it unset to use the default
|
||||||
|
templates.
|
||||||
|
|
||||||
|
Synapse 0.33.7rc2 (2018-10-17)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Ship the example email templates as part of the package ([\#4052](https://github.com/matrix-org/synapse/issues/4052))
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix bug which made get_missing_events return too few events ([\#4045](https://github.com/matrix-org/synapse/issues/4045))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.7rc1 (2018-10-15)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Add support for end-to-end key backup (MSC1687) ([\#4019](https://github.com/matrix-org/synapse/issues/4019))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix bug in event persistence logic which caused 'NoneType is not iterable' ([\#3995](https://github.com/matrix-org/synapse/issues/3995))
|
||||||
|
- Fix exception in background metrics collection ([\#3996](https://github.com/matrix-org/synapse/issues/3996))
|
||||||
|
- Fix exception handling in fetching remote profiles ([\#3997](https://github.com/matrix-org/synapse/issues/3997))
|
||||||
|
- Fix handling of rejected threepid invites ([\#3999](https://github.com/matrix-org/synapse/issues/3999))
|
||||||
|
- Workers now start on Python 3. ([\#4027](https://github.com/matrix-org/synapse/issues/4027))
|
||||||
|
- Synapse now starts on Python 3.7. ([\#4033](https://github.com/matrix-org/synapse/issues/4033))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Log exceptions in looping calls ([\#4008](https://github.com/matrix-org/synapse/issues/4008))
|
||||||
|
- Optimisation for serving federation requests ([\#4017](https://github.com/matrix-org/synapse/issues/4017))
|
||||||
|
- Add metric to count number of non-empty sync responses ([\#4022](https://github.com/matrix-org/synapse/issues/4022))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.6 (2018-10-04)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Pin to prometheus_client<0.4 to avoid renaming all of our metrics ([\#4002](https://github.com/matrix-org/synapse/issues/4002))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.6rc1 (2018-10-03)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Adding the ability to change MAX_UPLOAD_SIZE for the docker container variables. ([\#3883](https://github.com/matrix-org/synapse/issues/3883))
|
||||||
|
- Report "python_version" in the phone home stats ([\#3894](https://github.com/matrix-org/synapse/issues/3894))
|
||||||
|
- Always LL ourselves if we're in a room ([\#3916](https://github.com/matrix-org/synapse/issues/3916))
|
||||||
|
- Include eventid in log lines when processing incoming federation transactions ([\#3959](https://github.com/matrix-org/synapse/issues/3959))
|
||||||
|
- Remove spurious check which made 'localhost' servers not work ([\#3964](https://github.com/matrix-org/synapse/issues/3964))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix problem when playing media from Chrome using direct URL (thanks @remjey!) ([\#3578](https://github.com/matrix-org/synapse/issues/3578))
|
||||||
|
- support registering regular users non-interactively with register_new_matrix_user script ([\#3836](https://github.com/matrix-org/synapse/issues/3836))
|
||||||
|
- Fix broken invite email links for self hosted riots ([\#3868](https://github.com/matrix-org/synapse/issues/3868))
|
||||||
|
- Don't ratelimit autojoins ([\#3879](https://github.com/matrix-org/synapse/issues/3879))
|
||||||
|
- Fix 500 error when deleting unknown room alias ([\#3889](https://github.com/matrix-org/synapse/issues/3889))
|
||||||
|
- Fix some b'abcd' noise in logs and metrics ([\#3892](https://github.com/matrix-org/synapse/issues/3892), [\#3895](https://github.com/matrix-org/synapse/issues/3895))
|
||||||
|
- When we join a room, always try the server we used for the alias lookup first, to avoid unresponsive and out-of-date servers. ([\#3899](https://github.com/matrix-org/synapse/issues/3899))
|
||||||
|
- Fix incorrect server-name indication for outgoing federation requests ([\#3907](https://github.com/matrix-org/synapse/issues/3907))
|
||||||
|
- Fix adding client IPs to the database failing on Python 3. ([\#3908](https://github.com/matrix-org/synapse/issues/3908))
|
||||||
|
- Fix bug where things occaisonally were not being timed out correctly. ([\#3910](https://github.com/matrix-org/synapse/issues/3910))
|
||||||
|
- Fix bug where outbound federation would stop talking to some servers when using workers ([\#3914](https://github.com/matrix-org/synapse/issues/3914))
|
||||||
|
- Fix some instances of ExpiringCache not expiring cache items ([\#3932](https://github.com/matrix-org/synapse/issues/3932), [\#3980](https://github.com/matrix-org/synapse/issues/3980))
|
||||||
|
- Fix out-of-bounds error when LLing yourself ([\#3936](https://github.com/matrix-org/synapse/issues/3936))
|
||||||
|
- Sending server notices regarding user consent now works on Python 3. ([\#3938](https://github.com/matrix-org/synapse/issues/3938))
|
||||||
|
- Fix exceptions from metrics handler ([\#3956](https://github.com/matrix-org/synapse/issues/3956))
|
||||||
|
- Fix error message for events with m.room.create missing from auth_events ([\#3960](https://github.com/matrix-org/synapse/issues/3960))
|
||||||
|
- Fix errors due to concurrent monthly_active_user upserts ([\#3961](https://github.com/matrix-org/synapse/issues/3961))
|
||||||
|
- Fix exceptions when processing incoming events over federation ([\#3968](https://github.com/matrix-org/synapse/issues/3968))
|
||||||
|
- Replaced all occurences of e.message with str(e). Contributed by Schnuffle ([\#3970](https://github.com/matrix-org/synapse/issues/3970))
|
||||||
|
- Fix lazy loaded sync in the presence of rejected state events ([\#3986](https://github.com/matrix-org/synapse/issues/3986))
|
||||||
|
- Fix error when logging incomplete HTTP requests ([\#3990](https://github.com/matrix-org/synapse/issues/3990))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Unit tests can now be run under PostgreSQL in Docker using ``test_postgresql.sh``. ([\#3699](https://github.com/matrix-org/synapse/issues/3699))
|
||||||
|
- Speed up calculation of typing updates for replication ([\#3794](https://github.com/matrix-org/synapse/issues/3794))
|
||||||
|
- Remove documentation regarding installation on Cygwin, the use of WSL is recommended instead. ([\#3873](https://github.com/matrix-org/synapse/issues/3873))
|
||||||
|
- Fix typo in README, synaspse -> synapse ([\#3897](https://github.com/matrix-org/synapse/issues/3897))
|
||||||
|
- Increase the timeout when filling missing events in federation requests ([\#3903](https://github.com/matrix-org/synapse/issues/3903))
|
||||||
|
- Improve the logging when handling a federation transaction ([\#3904](https://github.com/matrix-org/synapse/issues/3904), [\#3966](https://github.com/matrix-org/synapse/issues/3966))
|
||||||
|
- Improve logging of outbound federation requests ([\#3906](https://github.com/matrix-org/synapse/issues/3906), [\#3909](https://github.com/matrix-org/synapse/issues/3909))
|
||||||
|
- Fix the docker image building on python 3 ([\#3911](https://github.com/matrix-org/synapse/issues/3911))
|
||||||
|
- Add a regression test for logging failed HTTP requests on Python 3. ([\#3912](https://github.com/matrix-org/synapse/issues/3912))
|
||||||
|
- Comments and interface cleanup for on_receive_pdu ([\#3924](https://github.com/matrix-org/synapse/issues/3924))
|
||||||
|
- Fix spurious exceptions when remote http client closes conncetion ([\#3925](https://github.com/matrix-org/synapse/issues/3925))
|
||||||
|
- Log exceptions thrown by background tasks ([\#3927](https://github.com/matrix-org/synapse/issues/3927))
|
||||||
|
- Add a cache to get_destination_retry_timings ([\#3933](https://github.com/matrix-org/synapse/issues/3933), [\#3991](https://github.com/matrix-org/synapse/issues/3991))
|
||||||
|
- Automate pushes to docker hub ([\#3946](https://github.com/matrix-org/synapse/issues/3946))
|
||||||
|
- Require attrs 16.0.0 or later ([\#3947](https://github.com/matrix-org/synapse/issues/3947))
|
||||||
|
- Fix incompatibility with python3 on alpine ([\#3948](https://github.com/matrix-org/synapse/issues/3948))
|
||||||
|
- Run the test suite on the oldest supported versions of our dependencies in CI. ([\#3952](https://github.com/matrix-org/synapse/issues/3952))
|
||||||
|
- CircleCI now only runs merged jobs on PRs, and commit jobs on develop, master, and release branches. ([\#3957](https://github.com/matrix-org/synapse/issues/3957))
|
||||||
|
- Fix docstrings and add tests for state store methods ([\#3958](https://github.com/matrix-org/synapse/issues/3958))
|
||||||
|
- fix docstring for FederationClient.get_state_for_room ([\#3963](https://github.com/matrix-org/synapse/issues/3963))
|
||||||
|
- Run notify_app_services as a bg process ([\#3965](https://github.com/matrix-org/synapse/issues/3965))
|
||||||
|
- Clarifications in FederationHandler ([\#3967](https://github.com/matrix-org/synapse/issues/3967))
|
||||||
|
- Further reduce the docker image size ([\#3972](https://github.com/matrix-org/synapse/issues/3972))
|
||||||
|
- Build py3 docker images for docker hub too ([\#3976](https://github.com/matrix-org/synapse/issues/3976))
|
||||||
|
- Updated the installation instructions to point to the matrix-synapse package on PyPI. ([\#3985](https://github.com/matrix-org/synapse/issues/3985))
|
||||||
|
- Disable USE_FROZEN_DICTS for unittests by default. ([\#3987](https://github.com/matrix-org/synapse/issues/3987))
|
||||||
|
- Remove unused Jenkins and development related files from the repo. ([\#3988](https://github.com/matrix-org/synapse/issues/3988))
|
||||||
|
- Improve stacktraces in certain exceptions in the logs ([\#3989](https://github.com/matrix-org/synapse/issues/3989))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.5.1 (2018-09-25)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Fix incompatibility with older Twisted version in tests. Thanks @OlegGirko! ([\#3940](https://github.com/matrix-org/synapse/issues/3940))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.5 (2018-09-24)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
No significant changes.
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.5rc1 (2018-09-17)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Python 3.5 and 3.6 support is now in beta. ([\#3576](https://github.com/matrix-org/synapse/issues/3576))
|
||||||
|
- Implement `event_format` filter param in `/sync` ([\#3790](https://github.com/matrix-org/synapse/issues/3790))
|
||||||
|
- Add synapse_admin_mau:registered_reserved_users metric to expose number of real reaserved users ([\#3846](https://github.com/matrix-org/synapse/issues/3846))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Remove connection ID for replication prometheus metrics, as it creates a large number of new series. ([\#3788](https://github.com/matrix-org/synapse/issues/3788))
|
||||||
|
- guest users should not be part of mau total ([\#3800](https://github.com/matrix-org/synapse/issues/3800))
|
||||||
|
- Bump dependency on pyopenssl 16.x, to avoid incompatibility with recent Twisted. ([\#3804](https://github.com/matrix-org/synapse/issues/3804))
|
||||||
|
- Fix existing room tags not coming down sync when joining a room ([\#3810](https://github.com/matrix-org/synapse/issues/3810))
|
||||||
|
- Fix jwt import check ([\#3824](https://github.com/matrix-org/synapse/issues/3824))
|
||||||
|
- fix VOIP crashes under Python 3 (#3821) ([\#3835](https://github.com/matrix-org/synapse/issues/3835))
|
||||||
|
- Fix manhole so that it works with latest openssh clients ([\#3841](https://github.com/matrix-org/synapse/issues/3841))
|
||||||
|
- Fix outbound requests occasionally wedging, which can result in federation breaking between servers. ([\#3845](https://github.com/matrix-org/synapse/issues/3845))
|
||||||
|
- Show heroes if room name/canonical alias has been deleted ([\#3851](https://github.com/matrix-org/synapse/issues/3851))
|
||||||
|
- Fix handling of redacted events from federation ([\#3859](https://github.com/matrix-org/synapse/issues/3859))
|
||||||
|
- ([\#3874](https://github.com/matrix-org/synapse/issues/3874))
|
||||||
|
- Mitigate outbound federation randomly becoming wedged ([\#3875](https://github.com/matrix-org/synapse/issues/3875))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- CircleCI tests now run on the potential merge of a PR. ([\#3704](https://github.com/matrix-org/synapse/issues/3704))
|
||||||
|
- http/ is now ported to Python 3. ([\#3771](https://github.com/matrix-org/synapse/issues/3771))
|
||||||
|
- Improve human readable error messages for threepid registration/account update ([\#3789](https://github.com/matrix-org/synapse/issues/3789))
|
||||||
|
- Make /sync slightly faster by avoiding needless copies ([\#3795](https://github.com/matrix-org/synapse/issues/3795))
|
||||||
|
- handlers/ is now ported to Python 3. ([\#3803](https://github.com/matrix-org/synapse/issues/3803))
|
||||||
|
- Limit the number of PDUs/EDUs per federation transaction ([\#3805](https://github.com/matrix-org/synapse/issues/3805))
|
||||||
|
- Only start postgres instance for postgres tests on Travis CI ([\#3806](https://github.com/matrix-org/synapse/issues/3806))
|
||||||
|
- tests/ is now ported to Python 3. ([\#3808](https://github.com/matrix-org/synapse/issues/3808))
|
||||||
|
- crypto/ is now ported to Python 3. ([\#3822](https://github.com/matrix-org/synapse/issues/3822))
|
||||||
|
- rest/ is now ported to Python 3. ([\#3823](https://github.com/matrix-org/synapse/issues/3823))
|
||||||
|
- add some logging for the keyring queue ([\#3826](https://github.com/matrix-org/synapse/issues/3826))
|
||||||
|
- speed up lazy loading by 2-3x ([\#3827](https://github.com/matrix-org/synapse/issues/3827))
|
||||||
|
- Improved Dockerfile to remove build requirements after building reducing the image size. ([\#3834](https://github.com/matrix-org/synapse/issues/3834))
|
||||||
|
- Disable lazy loading for incremental syncs for now ([\#3840](https://github.com/matrix-org/synapse/issues/3840))
|
||||||
|
- federation/ is now ported to Python 3. ([\#3847](https://github.com/matrix-org/synapse/issues/3847))
|
||||||
|
- Log when we retry outbound requests ([\#3853](https://github.com/matrix-org/synapse/issues/3853))
|
||||||
|
- Removed some excess logging messages. ([\#3855](https://github.com/matrix-org/synapse/issues/3855))
|
||||||
|
- Speed up purge history for rooms that have been previously purged ([\#3856](https://github.com/matrix-org/synapse/issues/3856))
|
||||||
|
- Refactor some HTTP timeout code. ([\#3857](https://github.com/matrix-org/synapse/issues/3857))
|
||||||
|
- Fix running merged builds on CircleCI ([\#3858](https://github.com/matrix-org/synapse/issues/3858))
|
||||||
|
- Fix typo in replication stream exception. ([\#3860](https://github.com/matrix-org/synapse/issues/3860))
|
||||||
|
- Add in flight real time metrics for Measure blocks ([\#3871](https://github.com/matrix-org/synapse/issues/3871))
|
||||||
|
- Disable buffering and automatic retrying in treq requests to prevent timeouts. ([\#3872](https://github.com/matrix-org/synapse/issues/3872))
|
||||||
|
- mention jemalloc in the README ([\#3877](https://github.com/matrix-org/synapse/issues/3877))
|
||||||
|
- Remove unmaintained "nuke-room-from-db.sh" script ([\#3888](https://github.com/matrix-org/synapse/issues/3888))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.4 (2018-09-07)
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.4rc2 (2018-09-06)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Pull in security fixes from v0.33.3.1
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.3.1 (2018-09-06)
|
||||||
|
=============================
|
||||||
|
|
||||||
|
SECURITY FIXES
|
||||||
|
--------------
|
||||||
|
|
||||||
|
- Fix an issue where event signatures were not always correctly validated ([\#3796](https://github.com/matrix-org/synapse/issues/3796))
|
||||||
|
- Fix an issue where server_acls could be circumvented for incoming events ([\#3796](https://github.com/matrix-org/synapse/issues/3796))
|
||||||
|
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Unignore synctl in .dockerignore to fix docker builds ([\#3802](https://github.com/matrix-org/synapse/issues/3802))
|
||||||
|
|
||||||
|
|
||||||
|
Synapse 0.33.4rc1 (2018-09-04)
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Support profile API endpoints on workers ([\#3659](https://github.com/matrix-org/synapse/issues/3659))
|
||||||
|
- Server notices for resource limit blocking ([\#3680](https://github.com/matrix-org/synapse/issues/3680))
|
||||||
|
- Allow guests to use /rooms/:roomId/event/:eventId ([\#3724](https://github.com/matrix-org/synapse/issues/3724))
|
||||||
|
- Add mau_trial_days config param, so that users only get counted as MAU after N days. ([\#3749](https://github.com/matrix-org/synapse/issues/3749))
|
||||||
|
- Require twisted 17.1 or later (fixes [#3741](https://github.com/matrix-org/synapse/issues/3741)). ([\#3751](https://github.com/matrix-org/synapse/issues/3751))
|
||||||
|
|
||||||
|
|
||||||
|
Bugfixes
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Fix error collecting prometheus metrics when run on dedicated thread due to threading concurrency issues ([\#3722](https://github.com/matrix-org/synapse/issues/3722))
|
||||||
|
- Fix bug where we resent "limit exceeded" server notices repeatedly ([\#3747](https://github.com/matrix-org/synapse/issues/3747))
|
||||||
|
- Fix bug where we broke sync when using limit_usage_by_mau but hadn't configured server notices ([\#3753](https://github.com/matrix-org/synapse/issues/3753))
|
||||||
|
- Fix 'federation_domain_whitelist' such that an empty list correctly blocks all outbound federation traffic ([\#3754](https://github.com/matrix-org/synapse/issues/3754))
|
||||||
|
- Fix tagging of server notice rooms ([\#3755](https://github.com/matrix-org/synapse/issues/3755), [\#3756](https://github.com/matrix-org/synapse/issues/3756))
|
||||||
|
- Fix 'admin_uri' config variable and error parameter to be 'admin_contact' to match the spec. ([\#3758](https://github.com/matrix-org/synapse/issues/3758))
|
||||||
|
- Don't return non-LL-member state in incremental sync state blocks ([\#3760](https://github.com/matrix-org/synapse/issues/3760))
|
||||||
|
- Fix bug in sending presence over federation ([\#3768](https://github.com/matrix-org/synapse/issues/3768))
|
||||||
|
- Fix bug where preserved threepid user comes to sign up and server is mau blocked ([\#3777](https://github.com/matrix-org/synapse/issues/3777))
|
||||||
|
|
||||||
|
Internal Changes
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- Removed the link to the unmaintained matrix-synapse-auto-deploy project from the readme. ([\#3378](https://github.com/matrix-org/synapse/issues/3378))
|
||||||
|
- Refactor state module to support multiple room versions ([\#3673](https://github.com/matrix-org/synapse/issues/3673))
|
||||||
|
- The synapse.storage module has been ported to Python 3. ([\#3725](https://github.com/matrix-org/synapse/issues/3725))
|
||||||
|
- Split the state_group_cache into member and non-member state events (and so speed up LL /sync) ([\#3726](https://github.com/matrix-org/synapse/issues/3726))
|
||||||
|
- Log failure to authenticate remote servers as warnings (without stack traces) ([\#3727](https://github.com/matrix-org/synapse/issues/3727))
|
||||||
|
- The CONTRIBUTING guidelines have been updated to mention our use of Markdown and that .misc files have content. ([\#3730](https://github.com/matrix-org/synapse/issues/3730))
|
||||||
|
- Reference the need for an HTTP replication port when using the federation_reader worker ([\#3734](https://github.com/matrix-org/synapse/issues/3734))
|
||||||
|
- Fix minor spelling error in federation client documentation. ([\#3735](https://github.com/matrix-org/synapse/issues/3735))
|
||||||
|
- Remove redundant state resolution function ([\#3737](https://github.com/matrix-org/synapse/issues/3737))
|
||||||
|
- The test suite now passes on PostgreSQL. ([\#3740](https://github.com/matrix-org/synapse/issues/3740))
|
||||||
|
- Fix MAU cache invalidation due to missing yield ([\#3746](https://github.com/matrix-org/synapse/issues/3746))
|
||||||
|
- Make sure that we close db connections opened during init ([\#3764](https://github.com/matrix-org/synapse/issues/3764))
|
||||||
|
|
||||||
|
|
||||||
Synapse 0.33.3 (2018-08-22)
|
Synapse 0.33.3 (2018-08-22)
|
||||||
===========================
|
===========================
|
||||||
|
|
||||||
|
|
|
@ -30,12 +30,28 @@ use github's pull request workflow to review the contribution, and either ask
|
||||||
you to make any refinements needed or merge it and make them ourselves. The
|
you to make any refinements needed or merge it and make them ourselves. The
|
||||||
changes will then land on master when we next do a release.
|
changes will then land on master when we next do a release.
|
||||||
|
|
||||||
We use `Jenkins <http://matrix.org/jenkins>`_ and
|
We use `CircleCI <https://circleci.com/gh/matrix-org>`_ and `Travis CI
|
||||||
`Travis <https://travis-ci.org/matrix-org/synapse>`_ for continuous
|
<https://travis-ci.org/matrix-org/synapse>`_ for continuous integration. All
|
||||||
integration. All pull requests to synapse get automatically tested by Travis;
|
pull requests to synapse get automatically tested by Travis and CircleCI.
|
||||||
the Jenkins builds require an adminstrator to start them. If your change
|
If your change breaks the build, this will be shown in GitHub, so please
|
||||||
breaks the build, this will be shown in github, so please keep an eye on the
|
keep an eye on the pull request for feedback.
|
||||||
pull request for feedback.
|
|
||||||
|
To run unit tests in a local development environment, you can use:
|
||||||
|
|
||||||
|
- ``tox -e py27`` (requires tox to be installed by ``pip install tox``) for
|
||||||
|
SQLite-backed Synapse on Python 2.7.
|
||||||
|
- ``tox -e py35`` for SQLite-backed Synapse on Python 3.5.
|
||||||
|
- ``tox -e py36`` for SQLite-backed Synapse on Python 3.6.
|
||||||
|
- ``tox -e py27-postgres`` for PostgreSQL-backed Synapse on Python 2.7
|
||||||
|
(requires a running local PostgreSQL with access to create databases).
|
||||||
|
- ``./test_postgresql.sh`` for PostgreSQL-backed Synapse on Python 2.7
|
||||||
|
(requires Docker). Entirely self-contained, recommended if you don't want to
|
||||||
|
set up PostgreSQL yourself.
|
||||||
|
|
||||||
|
Docker images are available for running the integration tests (SyTest) locally,
|
||||||
|
see the `documentation in the SyTest repo
|
||||||
|
<https://github.com/matrix-org/sytest/blob/develop/docker/README.md>`_ for more
|
||||||
|
information.
|
||||||
|
|
||||||
Code style
|
Code style
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
|
@ -59,9 +75,10 @@ To create a changelog entry, make a new file in the ``changelog.d``
|
||||||
file named in the format of ``PRnumber.type``. The type can be
|
file named in the format of ``PRnumber.type``. The type can be
|
||||||
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
one of ``feature``, ``bugfix``, ``removal`` (also used for
|
||||||
deprecations), or ``misc`` (for internal-only changes). The content of
|
deprecations), or ``misc`` (for internal-only changes). The content of
|
||||||
the file is your changelog entry, which can contain RestructuredText
|
the file is your changelog entry, which can contain Markdown
|
||||||
formatting. A note of contributors is welcomed in changelogs for
|
formatting. Adding credits to the changelog is encouraged, we value
|
||||||
non-misc changes (the content of misc changes is not displayed).
|
your contributions and would like to have you shouted out in the
|
||||||
|
release notes!
|
||||||
|
|
||||||
For example, a fix in PR #1234 would have its changelog entry in
|
For example, a fix in PR #1234 would have its changelog entry in
|
||||||
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
``changelog.d/1234.bugfix``, and contain content like "The security levels of
|
||||||
|
@ -76,7 +93,8 @@ AUTHORS.rst file for the project in question. Please feel free to include a
|
||||||
change to AUTHORS.rst in your pull request to list yourself and a short
|
change to AUTHORS.rst in your pull request to list yourself and a short
|
||||||
description of the area(s) you've worked on. Also, we sometimes have swag to
|
description of the area(s) you've worked on. Also, we sometimes have swag to
|
||||||
give away to contributors - if you feel that Matrix-branded apparel is missing
|
give away to contributors - if you feel that Matrix-branded apparel is missing
|
||||||
from your life, please mail us your shipping address to matrix at matrix.org and we'll try to fix it :)
|
from your life, please mail us your shipping address to matrix at matrix.org and
|
||||||
|
we'll try to fix it :)
|
||||||
|
|
||||||
Sign off
|
Sign off
|
||||||
~~~~~~~~
|
~~~~~~~~
|
||||||
|
@ -143,4 +161,9 @@ flag to ``git commit``, which uses the name and email set in your
|
||||||
Conclusion
|
Conclusion
|
||||||
~~~~~~~~~~
|
~~~~~~~~~~
|
||||||
|
|
||||||
That's it! Matrix is a very open and collaborative project as you might expect given our obsession with open communication. If we're going to successfully matrix together all the fragmented communication technologies out there we are reliant on contributions and collaboration from the community to do so. So please get involved - and we hope you have as much fun hacking on Matrix as we do!
|
That's it! Matrix is a very open and collaborative project as you might expect
|
||||||
|
given our obsession with open communication. If we're going to successfully
|
||||||
|
matrix together all the fragmented communication technologies out there we are
|
||||||
|
reliant on contributions and collaboration from the community to do so. So
|
||||||
|
please get involved - and we hope you have as much fun hacking on Matrix as we
|
||||||
|
do!
|
||||||
|
|
10
MANIFEST.in
10
MANIFEST.in
|
@ -12,23 +12,20 @@ recursive-include synapse/storage/schema *.sql
|
||||||
recursive-include synapse/storage/schema *.py
|
recursive-include synapse/storage/schema *.py
|
||||||
|
|
||||||
recursive-include docs *
|
recursive-include docs *
|
||||||
recursive-include res *
|
|
||||||
recursive-include scripts *
|
recursive-include scripts *
|
||||||
recursive-include scripts-dev *
|
recursive-include scripts-dev *
|
||||||
recursive-include synapse *.pyi
|
recursive-include synapse *.pyi
|
||||||
recursive-include tests *.py
|
recursive-include tests *.py
|
||||||
|
|
||||||
|
recursive-include synapse/res *
|
||||||
recursive-include synapse/static *.css
|
recursive-include synapse/static *.css
|
||||||
recursive-include synapse/static *.gif
|
recursive-include synapse/static *.gif
|
||||||
recursive-include synapse/static *.html
|
recursive-include synapse/static *.html
|
||||||
recursive-include synapse/static *.js
|
recursive-include synapse/static *.js
|
||||||
|
|
||||||
exclude jenkins.sh
|
|
||||||
exclude jenkins*.sh
|
|
||||||
exclude jenkins*
|
|
||||||
exclude Dockerfile
|
exclude Dockerfile
|
||||||
exclude .dockerignore
|
exclude .dockerignore
|
||||||
recursive-exclude jenkins *.sh
|
exclude test_postgresql.sh
|
||||||
|
|
||||||
include pyproject.toml
|
include pyproject.toml
|
||||||
recursive-include changelog.d *
|
recursive-include changelog.d *
|
||||||
|
@ -37,3 +34,6 @@ prune .github
|
||||||
prune demo/etc
|
prune demo/etc
|
||||||
prune docker
|
prune docker
|
||||||
prune .circleci
|
prune .circleci
|
||||||
|
|
||||||
|
exclude jenkins*
|
||||||
|
recursive-exclude jenkins *.sh
|
||||||
|
|
35
MAP.rst
35
MAP.rst
|
@ -1,35 +0,0 @@
|
||||||
Directory Structure
|
|
||||||
===================
|
|
||||||
|
|
||||||
Warning: this may be a bit stale...
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
.
|
|
||||||
├── cmdclient Basic CLI python Matrix client
|
|
||||||
├── demo Scripts for running standalone Matrix demos
|
|
||||||
├── docs All doc, including the draft Matrix API spec
|
|
||||||
│ ├── client-server The client-server Matrix API spec
|
|
||||||
│ ├── model Domain-specific elements of the Matrix API spec
|
|
||||||
│ ├── server-server The server-server model of the Matrix API spec
|
|
||||||
│ └── sphinx The internal API doc of the Synapse homeserver
|
|
||||||
├── experiments Early experiments of using Synapse's internal APIs
|
|
||||||
├── graph Visualisation of Matrix's distributed message store
|
|
||||||
├── synapse The reference Matrix homeserver implementation
|
|
||||||
│ ├── api Common building blocks for the APIs
|
|
||||||
│ │ ├── events Definition of state representation Events
|
|
||||||
│ │ └── streams Definition of streamable Event objects
|
|
||||||
│ ├── app The __main__ entry point for the homeserver
|
|
||||||
│ ├── crypto The PKI client/server used for secure federation
|
|
||||||
│ │ └── resource PKI helper objects (e.g. keys)
|
|
||||||
│ ├── federation Server-server state replication logic
|
|
||||||
│ ├── handlers The main business logic of the homeserver
|
|
||||||
│ ├── http Wrappers around Twisted's HTTP server & client
|
|
||||||
│ ├── rest Servlet-style RESTful API
|
|
||||||
│ ├── storage Persistence subsystem (currently only sqlite3)
|
|
||||||
│ │ └── schema sqlite persistence schema
|
|
||||||
│ └── util Synapse-specific utilities
|
|
||||||
├── tests Unit tests for the Synapse homeserver
|
|
||||||
└── webclient Basic AngularJS Matrix web client
|
|
||||||
|
|
||||||
|
|
150
README.rst
150
README.rst
|
@ -81,7 +81,7 @@ Thanks for using Matrix!
|
||||||
Synapse Installation
|
Synapse Installation
|
||||||
====================
|
====================
|
||||||
|
|
||||||
Synapse is the reference python/twisted Matrix homeserver implementation.
|
Synapse is the reference Python/Twisted Matrix homeserver implementation.
|
||||||
|
|
||||||
System requirements:
|
System requirements:
|
||||||
|
|
||||||
|
@ -91,12 +91,13 @@ System requirements:
|
||||||
|
|
||||||
Installing from source
|
Installing from source
|
||||||
----------------------
|
----------------------
|
||||||
|
|
||||||
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
(Prebuilt packages are available for some platforms - see `Platform-Specific
|
||||||
Instructions`_.)
|
Instructions`_.)
|
||||||
|
|
||||||
Synapse is written in python but some of the libraries it uses are written in
|
Synapse is written in Python but some of the libraries it uses are written in
|
||||||
C. So before we can install synapse itself we need a working C compiler and the
|
C. So before we can install Synapse itself we need a working C compiler and the
|
||||||
header files for python C extensions.
|
header files for Python C extensions.
|
||||||
|
|
||||||
Installing prerequisites on Ubuntu or Debian::
|
Installing prerequisites on Ubuntu or Debian::
|
||||||
|
|
||||||
|
@ -143,21 +144,27 @@ Installing prerequisites on OpenBSD::
|
||||||
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
doas pkg_add python libffi py-pip py-setuptools sqlite3 py-virtualenv \
|
||||||
libxslt
|
libxslt
|
||||||
|
|
||||||
To install the synapse homeserver run::
|
To install the Synapse homeserver run::
|
||||||
|
|
||||||
virtualenv -p python2.7 ~/.synapse
|
virtualenv -p python2.7 ~/.synapse
|
||||||
source ~/.synapse/bin/activate
|
source ~/.synapse/bin/activate
|
||||||
pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
pip install https://github.com/matrix-org/synapse/tarball/master
|
pip install matrix-synapse
|
||||||
|
|
||||||
This installs synapse, along with the libraries it uses, into a virtual
|
This installs Synapse, along with the libraries it uses, into a virtual
|
||||||
environment under ``~/.synapse``. Feel free to pick a different directory
|
environment under ``~/.synapse``. Feel free to pick a different directory
|
||||||
if you prefer.
|
if you prefer.
|
||||||
|
|
||||||
|
This Synapse installation can then be later upgraded by using pip again with the
|
||||||
|
update flag::
|
||||||
|
|
||||||
|
source ~/.synapse/bin/activate
|
||||||
|
pip install -U matrix-synapse
|
||||||
|
|
||||||
In case of problems, please see the _`Troubleshooting` section below.
|
In case of problems, please see the _`Troubleshooting` section below.
|
||||||
|
|
||||||
There is an offical synapse image available at
|
There is an offical synapse image available at
|
||||||
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
|
https://hub.docker.com/r/matrixdotorg/synapse/tags/ which can be used with
|
||||||
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
|
the docker-compose file available at `contrib/docker <contrib/docker>`_. Further information on
|
||||||
this including configuration options is available in the README on
|
this including configuration options is available in the README on
|
||||||
|
@ -167,12 +174,13 @@ Alternatively, Andreas Peters (previously Silvio Fricke) has contributed a
|
||||||
Dockerfile to automate a synapse server in a single Docker image, at
|
Dockerfile to automate a synapse server in a single Docker image, at
|
||||||
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
https://hub.docker.com/r/avhost/docker-matrix/tags/
|
||||||
|
|
||||||
Also, Martin Giess has created an auto-deployment process with vagrant/ansible,
|
Slavi Pantaleev has created an Ansible playbook,
|
||||||
tested with VirtualBox/AWS/DigitalOcean - see
|
which installs the offical Docker image of Matrix Synapse
|
||||||
https://github.com/EMnify/matrix-synapse-auto-deploy
|
along with many other Matrix-related services (Postgres database, riot-web, coturn, mxisd, SSL support, etc.).
|
||||||
for details.
|
For more details, see
|
||||||
|
https://github.com/spantaleev/matrix-docker-ansible-deploy
|
||||||
|
|
||||||
Configuring synapse
|
Configuring Synapse
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
Before you can start Synapse, you will need to generate a configuration
|
Before you can start Synapse, you will need to generate a configuration
|
||||||
|
@ -254,26 +262,6 @@ Setting up a TURN server
|
||||||
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
For reliable VoIP calls to be routed via this homeserver, you MUST configure
|
||||||
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
a TURN server. See `<docs/turn-howto.rst>`_ for details.
|
||||||
|
|
||||||
IPv6
|
|
||||||
----
|
|
||||||
|
|
||||||
As of Synapse 0.19 we finally support IPv6, many thanks to @kyrias and @glyph
|
|
||||||
for providing PR #1696.
|
|
||||||
|
|
||||||
However, for federation to work on hosts with IPv6 DNS servers you **must**
|
|
||||||
be running Twisted 17.1.0 or later - see https://github.com/matrix-org/synapse/issues/1002
|
|
||||||
for details. We can't make Synapse depend on Twisted 17.1 by default
|
|
||||||
yet as it will break most older distributions (see https://github.com/matrix-org/synapse/pull/1909)
|
|
||||||
so if you are using operating system dependencies you'll have to install your
|
|
||||||
own Twisted 17.1 package via pip or backports etc.
|
|
||||||
|
|
||||||
If you're running in a virtualenv then pip should have installed the newest
|
|
||||||
Twisted automatically, but if your virtualenv is old you will need to manually
|
|
||||||
upgrade to a newer Twisted dependency via:
|
|
||||||
|
|
||||||
pip install Twisted>=17.1.0
|
|
||||||
|
|
||||||
|
|
||||||
Running Synapse
|
Running Synapse
|
||||||
===============
|
===============
|
||||||
|
|
||||||
|
@ -449,8 +437,7 @@ settings require a slightly more difficult installation process.
|
||||||
using the ``.`` command, rather than ``bash``'s ``source``.
|
using the ``.`` command, rather than ``bash``'s ``source``.
|
||||||
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
|
5) Optionally, use ``pip`` to install ``lxml``, which Synapse needs to parse
|
||||||
webpages for their titles.
|
webpages for their titles.
|
||||||
6) Use ``pip`` to install this repository: ``pip install
|
6) Use ``pip`` to install this repository: ``pip install matrix-synapse``
|
||||||
https://github.com/matrix-org/synapse/tarball/master``
|
|
||||||
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
|
7) Optionally, change ``_synapse``'s shell to ``/bin/false`` to reduce the
|
||||||
chance of a compromised Synapse server being used to take over your box.
|
chance of a compromised Synapse server being used to take over your box.
|
||||||
|
|
||||||
|
@ -464,37 +451,13 @@ https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/misc/matrix-
|
||||||
|
|
||||||
Windows Install
|
Windows Install
|
||||||
---------------
|
---------------
|
||||||
Synapse can be installed on Cygwin. It requires the following Cygwin packages:
|
|
||||||
|
|
||||||
- gcc
|
|
||||||
- git
|
|
||||||
- libffi-devel
|
|
||||||
- openssl (and openssl-devel, python-openssl)
|
|
||||||
- python
|
|
||||||
- python-setuptools
|
|
||||||
|
|
||||||
The content repository requires additional packages and will be unable to process
|
|
||||||
uploads without them:
|
|
||||||
|
|
||||||
- libjpeg8
|
|
||||||
- libjpeg8-devel
|
|
||||||
- zlib
|
|
||||||
|
|
||||||
If you choose to install Synapse without these packages, you will need to reinstall
|
|
||||||
``pillow`` for changes to be applied, e.g. ``pip uninstall pillow`` ``pip install
|
|
||||||
pillow --user``
|
|
||||||
|
|
||||||
Troubleshooting:
|
|
||||||
|
|
||||||
- You may need to upgrade ``setuptools`` to get this to work correctly:
|
|
||||||
``pip install setuptools --upgrade``.
|
|
||||||
- You may encounter errors indicating that ``ffi.h`` is missing, even with
|
|
||||||
``libffi-devel`` installed. If you do, copy the ``.h`` files:
|
|
||||||
``cp /usr/lib/libffi-3.0.13/include/*.h /usr/include``
|
|
||||||
- You may need to install libsodium from source in order to install PyNacl. If
|
|
||||||
you do, you may need to create a symlink to ``libsodium.a`` so ``ld`` can find
|
|
||||||
it: ``ln -s /usr/local/lib/libsodium.a /usr/lib/libsodium.a``
|
|
||||||
|
|
||||||
|
If you wish to run or develop Synapse on Windows, the Windows Subsystem For
|
||||||
|
Linux provides a Linux environment on Windows 10 which is capable of using the
|
||||||
|
Debian, Fedora, or source installation methods. More information about WSL can
|
||||||
|
be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10 for
|
||||||
|
Windows 10 and https://docs.microsoft.com/en-us/windows/wsl/install-on-server
|
||||||
|
for Windows Server.
|
||||||
|
|
||||||
Troubleshooting
|
Troubleshooting
|
||||||
===============
|
===============
|
||||||
|
@ -502,7 +465,7 @@ Troubleshooting
|
||||||
Troubleshooting Installation
|
Troubleshooting Installation
|
||||||
----------------------------
|
----------------------------
|
||||||
|
|
||||||
Synapse requires pip 1.7 or later, so if your OS provides too old a version you
|
Synapse requires pip 8 or later, so if your OS provides too old a version you
|
||||||
may need to manually upgrade it::
|
may need to manually upgrade it::
|
||||||
|
|
||||||
sudo pip install --upgrade pip
|
sudo pip install --upgrade pip
|
||||||
|
@ -537,28 +500,6 @@ failing, e.g.::
|
||||||
|
|
||||||
pip install twisted
|
pip install twisted
|
||||||
|
|
||||||
On OS X, if you encounter clang: error: unknown argument: '-mno-fused-madd' you
|
|
||||||
will need to export CFLAGS=-Qunused-arguments.
|
|
||||||
|
|
||||||
Troubleshooting Running
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
If synapse fails with ``missing "sodium.h"`` crypto errors, you may need
|
|
||||||
to manually upgrade PyNaCL, as synapse uses NaCl (https://nacl.cr.yp.to/) for
|
|
||||||
encryption and digital signatures.
|
|
||||||
Unfortunately PyNACL currently has a few issues
|
|
||||||
(https://github.com/pyca/pynacl/issues/53) and
|
|
||||||
(https://github.com/pyca/pynacl/issues/79) that mean it may not install
|
|
||||||
correctly, causing all tests to fail with errors about missing "sodium.h". To
|
|
||||||
fix try re-installing from PyPI or directly from
|
|
||||||
(https://github.com/pyca/pynacl)::
|
|
||||||
|
|
||||||
# Install from PyPI
|
|
||||||
pip install --user --upgrade --force pynacl
|
|
||||||
|
|
||||||
# Install from github
|
|
||||||
pip install --user https://github.com/pyca/pynacl/tarball/master
|
|
||||||
|
|
||||||
Running out of File Handles
|
Running out of File Handles
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
@ -716,7 +657,8 @@ Using a reverse proxy with Synapse
|
||||||
|
|
||||||
It is recommended to put a reverse proxy such as
|
It is recommended to put a reverse proxy such as
|
||||||
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
`nginx <https://nginx.org/en/docs/http/ngx_http_proxy_module.html>`_,
|
||||||
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_ or
|
`Apache <https://httpd.apache.org/docs/current/mod/mod_proxy_http.html>`_,
|
||||||
|
`Caddy <https://caddyserver.com/docs/proxy>`_ or
|
||||||
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
`HAProxy <https://www.haproxy.org/>`_ in front of Synapse. One advantage of
|
||||||
doing so is that it means that you can expose the default https port (443) to
|
doing so is that it means that you can expose the default https port (443) to
|
||||||
Matrix clients without needing to run Synapse with root privileges.
|
Matrix clients without needing to run Synapse with root privileges.
|
||||||
|
@ -747,6 +689,26 @@ so an example nginx configuration might look like::
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
an example Caddy configuration might look like::
|
||||||
|
|
||||||
|
matrix.example.com {
|
||||||
|
proxy /_matrix http://localhost:8008 {
|
||||||
|
transparent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
and an example Apache configuration might look like::
|
||||||
|
|
||||||
|
<VirtualHost *:443>
|
||||||
|
SSLEngine on
|
||||||
|
ServerName matrix.example.com;
|
||||||
|
|
||||||
|
<Location /_matrix>
|
||||||
|
ProxyPass http://127.0.0.1:8008/_matrix nocanon
|
||||||
|
ProxyPassReverse http://127.0.0.1:8008/_matrix
|
||||||
|
</Location>
|
||||||
|
</VirtualHost>
|
||||||
|
|
||||||
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
You will also want to set ``bind_addresses: ['127.0.0.1']`` and ``x_forwarded: true``
|
||||||
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
for port 8008 in ``homeserver.yaml`` to ensure that client IP addresses are
|
||||||
recorded correctly.
|
recorded correctly.
|
||||||
|
@ -901,7 +863,7 @@ to install using pip and a virtualenv::
|
||||||
|
|
||||||
virtualenv -p python2.7 env
|
virtualenv -p python2.7 env
|
||||||
source env/bin/activate
|
source env/bin/activate
|
||||||
python synapse/python_dependencies.py | xargs pip install
|
python -m synapse.python_dependencies | xargs pip install
|
||||||
pip install lxml mock
|
pip install lxml mock
|
||||||
|
|
||||||
This will run a process of downloading and installing all the needed
|
This will run a process of downloading and installing all the needed
|
||||||
|
@ -956,5 +918,13 @@ variable. The default is 0.5, which can be decreased to reduce RAM usage
|
||||||
in memory constrained enviroments, or increased if performance starts to
|
in memory constrained enviroments, or increased if performance starts to
|
||||||
degrade.
|
degrade.
|
||||||
|
|
||||||
|
Using `libjemalloc <http://jemalloc.net/>`_ can also yield a significant
|
||||||
|
improvement in overall amount, and especially in terms of giving back RAM
|
||||||
|
to the OS. To use it, the library must simply be put in the LD_PRELOAD
|
||||||
|
environment variable when launching Synapse. On Debian, this can be done
|
||||||
|
by installing the ``libjemalloc1`` package and adding this line to
|
||||||
|
``/etc/default/matrix-synapse``::
|
||||||
|
|
||||||
|
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
|
||||||
|
|
||||||
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
.. _`key_management`: https://matrix.org/docs/spec/server_server/unstable.html#retrieving-server-keys
|
||||||
|
|
19
UPGRADE.rst
19
UPGRADE.rst
|
@ -18,7 +18,7 @@ instructions that may be required are listed later in this document.
|
||||||
|
|
||||||
.. code:: bash
|
.. code:: bash
|
||||||
|
|
||||||
pip install --upgrade --process-dependency-links https://github.com/matrix-org/synapse/tarball/master
|
pip install --upgrade --process-dependency-links matrix-synapse
|
||||||
|
|
||||||
# restart synapse
|
# restart synapse
|
||||||
synctl restart
|
synctl restart
|
||||||
|
@ -48,11 +48,24 @@ returned by the Client-Server API:
|
||||||
# configured on port 443.
|
# configured on port 443.
|
||||||
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
curl -kv https://<host.name>/_matrix/client/versions 2>&1 | grep "Server:"
|
||||||
|
|
||||||
Upgrading to $NEXT_VERSION
|
Upgrading to v0.33.7
|
||||||
|
====================
|
||||||
|
|
||||||
|
This release removes the example email notification templates from
|
||||||
|
``res/templates`` (they are now internal to the python package). This should
|
||||||
|
only affect you if you (a) deploy your Synapse instance from a git checkout or
|
||||||
|
a github snapshot URL, and (b) have email notifications enabled.
|
||||||
|
|
||||||
|
If you have email notifications enabled, you should ensure that
|
||||||
|
``email.template_dir`` is either configured to point at a directory where you
|
||||||
|
have installed customised templates, or leave it unset to use the default
|
||||||
|
templates.
|
||||||
|
|
||||||
|
Upgrading to v0.27.3
|
||||||
====================
|
====================
|
||||||
|
|
||||||
This release expands the anonymous usage stats sent if the opt-in
|
This release expands the anonymous usage stats sent if the opt-in
|
||||||
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
``report_stats`` configuration is set to ``true``. We now capture RSS memory
|
||||||
and cpu use at a very coarse level. This requires administrators to install
|
and cpu use at a very coarse level. This requires administrators to install
|
||||||
the optional ``psutil`` python module.
|
the optional ``psutil`` python module.
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,13 @@
|
||||||
FROM docker.io/python:2-alpine3.8
|
ARG PYTHON_VERSION=2
|
||||||
|
|
||||||
RUN apk add --no-cache --virtual .nacl_deps \
|
###
|
||||||
|
### Stage 0: builder
|
||||||
|
###
|
||||||
|
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8 as builder
|
||||||
|
|
||||||
|
# install the OS build deps
|
||||||
|
|
||||||
|
RUN apk add \
|
||||||
build-base \
|
build-base \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
libjpeg-turbo-dev \
|
libjpeg-turbo-dev \
|
||||||
|
@ -8,25 +15,46 @@ RUN apk add --no-cache --virtual .nacl_deps \
|
||||||
libxslt-dev \
|
libxslt-dev \
|
||||||
linux-headers \
|
linux-headers \
|
||||||
postgresql-dev \
|
postgresql-dev \
|
||||||
su-exec \
|
|
||||||
zlib-dev
|
zlib-dev
|
||||||
|
|
||||||
COPY . /synapse
|
# build things which have slow build steps, before we copy synapse, so that
|
||||||
|
# the layer can be cached.
|
||||||
|
#
|
||||||
|
# (we really just care about caching a wheel here, as the "pip install" below
|
||||||
|
# will install them again.)
|
||||||
|
|
||||||
# A wheel cache may be provided in ./cache for faster build
|
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||||
RUN cd /synapse \
|
cryptography \
|
||||||
&& pip install --upgrade \
|
msgpack-python \
|
||||||
|
pillow \
|
||||||
|
pynacl
|
||||||
|
|
||||||
|
# now install synapse and all of the python deps to /install.
|
||||||
|
|
||||||
|
COPY . /synapse
|
||||||
|
RUN pip install --prefix="/install" --no-warn-script-location \
|
||||||
lxml \
|
lxml \
|
||||||
pip \
|
|
||||||
psycopg2 \
|
psycopg2 \
|
||||||
setuptools \
|
/synapse
|
||||||
&& mkdir -p /synapse/cache \
|
|
||||||
&& pip install -f /synapse/cache --upgrade --process-dependency-links . \
|
###
|
||||||
&& mv /synapse/docker/start.py /synapse/docker/conf / \
|
### Stage 1: runtime
|
||||||
&& rm -rf \
|
###
|
||||||
setup.cfg \
|
|
||||||
setup.py \
|
FROM docker.io/python:${PYTHON_VERSION}-alpine3.8
|
||||||
synapse
|
|
||||||
|
RUN apk add --no-cache --virtual .runtime_deps \
|
||||||
|
libffi \
|
||||||
|
libjpeg-turbo \
|
||||||
|
libressl \
|
||||||
|
libxslt \
|
||||||
|
libpq \
|
||||||
|
zlib \
|
||||||
|
su-exec
|
||||||
|
|
||||||
|
COPY --from=builder /install /usr/local
|
||||||
|
COPY ./docker/start.py /start.py
|
||||||
|
COPY ./docker/conf /conf
|
||||||
|
|
||||||
VOLUME ["/data"]
|
VOLUME ["/data"]
|
||||||
|
|
||||||
|
|
12
docker/Dockerfile-pgtests
Normal file
12
docker/Dockerfile-pgtests
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
# Use the Sytest image that comes with a lot of the build dependencies
|
||||||
|
# pre-installed
|
||||||
|
FROM matrixdotorg/sytest:latest
|
||||||
|
|
||||||
|
# The Sytest image doesn't come with python, so install that
|
||||||
|
RUN apt-get -qq install -y python python-dev python-pip
|
||||||
|
|
||||||
|
# We need tox to run the tests in run_pg_tests.sh
|
||||||
|
RUN pip install tox
|
||||||
|
|
||||||
|
ADD run_pg_tests.sh /pg_tests.sh
|
||||||
|
ENTRYPOINT /pg_tests.sh
|
|
@ -88,6 +88,7 @@ variables are available for configuration:
|
||||||
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
* ``SYNAPSE_TURN_URIS``, set this variable to the coma-separated list of TURN
|
||||||
uris to enable TURN for this homeserver.
|
uris to enable TURN for this homeserver.
|
||||||
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
* ``SYNAPSE_TURN_SECRET``, set this to the TURN shared secret if required.
|
||||||
|
* ``SYNAPSE_MAX_UPLOAD_SIZE``, set this variable to change the max upload size [default `10M`].
|
||||||
|
|
||||||
Shared secrets, that will be initialized to random values if not set:
|
Shared secrets, that will be initialized to random values if not set:
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ listeners:
|
||||||
{% if not SYNAPSE_NO_TLS %}
|
{% if not SYNAPSE_NO_TLS %}
|
||||||
-
|
-
|
||||||
port: 8448
|
port: 8448
|
||||||
bind_addresses: ['0.0.0.0']
|
bind_addresses: ['::']
|
||||||
type: http
|
type: http
|
||||||
tls: true
|
tls: true
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
|
@ -34,7 +34,7 @@ listeners:
|
||||||
|
|
||||||
- port: 8008
|
- port: 8008
|
||||||
tls: false
|
tls: false
|
||||||
bind_addresses: ['0.0.0.0']
|
bind_addresses: ['::']
|
||||||
type: http
|
type: http
|
||||||
x_forwarded: false
|
x_forwarded: false
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ federation_rc_concurrent: 3
|
||||||
|
|
||||||
media_store_path: "/data/media"
|
media_store_path: "/data/media"
|
||||||
uploads_path: "/data/uploads"
|
uploads_path: "/data/uploads"
|
||||||
max_upload_size: "10M"
|
max_upload_size: "{{ SYNAPSE_MAX_UPLOAD_SIZE or "10M" }}"
|
||||||
max_image_pixels: "32M"
|
max_image_pixels: "32M"
|
||||||
dynamic_thumbnails: false
|
dynamic_thumbnails: false
|
||||||
|
|
||||||
|
@ -211,7 +211,9 @@ email:
|
||||||
require_transport_security: False
|
require_transport_security: False
|
||||||
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
notif_from: "{{ SYNAPSE_SMTP_FROM or "hostmaster@" + SYNAPSE_SERVER_NAME }}"
|
||||||
app_name: Matrix
|
app_name: Matrix
|
||||||
template_dir: res/templates
|
# if template_dir is unset, uses the example templates that are part of
|
||||||
|
# the Synapse distribution.
|
||||||
|
#template_dir: res/templates
|
||||||
notif_template_html: notif_mail.html
|
notif_template_html: notif_mail.html
|
||||||
notif_template_text: notif_mail.txt
|
notif_template_text: notif_mail.txt
|
||||||
notif_for_new_users: True
|
notif_for_new_users: True
|
||||||
|
|
20
docker/run_pg_tests.sh
Executable file
20
docker/run_pg_tests.sh
Executable file
|
@ -0,0 +1,20 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# This script runs the PostgreSQL tests inside a Docker container. It expects
|
||||||
|
# the relevant source files to be mounted into /src (done automatically by the
|
||||||
|
# caller script). It will set up the database, run it, and then use the tox
|
||||||
|
# configuration to run the tests.
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Set PGUSER so Synapse's tests know what user to connect to the database with
|
||||||
|
export PGUSER=postgres
|
||||||
|
|
||||||
|
# Initialise & start the database
|
||||||
|
su -c '/usr/lib/postgresql/9.6/bin/initdb -D /var/lib/postgresql/data -E "UTF-8" --lc-collate="en_US.UTF-8" --lc-ctype="en_US.UTF-8" --username=postgres' postgres
|
||||||
|
su -c '/usr/lib/postgresql/9.6/bin/pg_ctl -w -D /var/lib/postgresql/data start' postgres
|
||||||
|
|
||||||
|
# Run the tests
|
||||||
|
cd /src
|
||||||
|
export TRIAL_FLAGS="-j 4"
|
||||||
|
tox --workdir=/tmp -e py27-postgres
|
|
@ -5,6 +5,7 @@ import os
|
||||||
import sys
|
import sys
|
||||||
import subprocess
|
import subprocess
|
||||||
import glob
|
import glob
|
||||||
|
import codecs
|
||||||
|
|
||||||
# Utility functions
|
# Utility functions
|
||||||
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
|
convert = lambda src, dst, environ: open(dst, "w").write(jinja2.Template(open(src).read()).render(**environ))
|
||||||
|
@ -23,7 +24,7 @@ def generate_secrets(environ, secrets):
|
||||||
with open(filename) as handle: value = handle.read()
|
with open(filename) as handle: value = handle.read()
|
||||||
else:
|
else:
|
||||||
print("Generating a random secret for {}".format(name))
|
print("Generating a random secret for {}".format(name))
|
||||||
value = os.urandom(32).encode("hex")
|
value = codecs.encode(os.urandom(32), "hex").decode()
|
||||||
with open(filename, "w") as handle: handle.write(value)
|
with open(filename, "w") as handle: handle.write(value)
|
||||||
environ[secret] = value
|
environ[secret] = value
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ replication endpoints that it's talking to on the main synapse process.
|
||||||
``worker_replication_port`` should point to the TCP replication listener port and
|
``worker_replication_port`` should point to the TCP replication listener port and
|
||||||
``worker_replication_http_port`` should point to the HTTP replication port.
|
``worker_replication_http_port`` should point to the HTTP replication port.
|
||||||
|
|
||||||
Currently, only the ``event_creator`` worker requires specifying
|
Currently, the ``event_creator`` and ``federation_reader`` workers require specifying
|
||||||
``worker_replication_http_port``.
|
``worker_replication_http_port``.
|
||||||
|
|
||||||
For instance::
|
For instance::
|
||||||
|
@ -265,6 +265,7 @@ Handles some event creation. It can handle REST endpoints matching::
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
||||||
^/_matrix/client/(api/v1|r0|unstable)/join/
|
^/_matrix/client/(api/v1|r0|unstable)/join/
|
||||||
|
^/_matrix/client/(api/v1|r0|unstable)/profile/
|
||||||
|
|
||||||
It will create events locally and then send them on to the main synapse
|
It will create events locally and then send them on to the main synapse
|
||||||
instance to be persisted and handled.
|
instance to be persisted and handled.
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
export HAPROXY_BIN=/home/haproxy/haproxy-1.6.11/haproxy
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
|
||||||
./dendron/jenkins/build_dendron.sh
|
|
||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
|
||||||
--haproxy \
|
|
|
@ -1,20 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
./jenkins/clone.sh dendron https://github.com/matrix-org/dendron.git
|
|
||||||
./dendron/jenkins/build_dendron.sh
|
|
||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
||||||
--dendron $WORKSPACE/dendron/bin/dendron \
|
|
|
@ -1,22 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
# Output test results as junit xml
|
|
||||||
export TRIAL_FLAGS="--reporter=subunit"
|
|
||||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
|
||||||
# Write coverage reports to a separate file for each process
|
|
||||||
export COVERAGE_OPTS="-p"
|
|
||||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
|
||||||
|
|
||||||
# Output flake8 violations to violations.flake8.log
|
|
||||||
export PEP8SUFFIX="--output-file=violations.flake8.log"
|
|
||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
|
||||||
|
|
||||||
tox -e packaging -e pep8
|
|
|
@ -1,18 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
|
|
||||||
./sytest/jenkins/prep_sytest_for_postgres.sh
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
|
@ -1,16 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export WORKSPACE
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
./jenkins/prepare_synapse.sh
|
|
||||||
./jenkins/clone.sh sytest https://github.com/matrix-org/sytest.git
|
|
||||||
|
|
||||||
./sytest/jenkins/install_and_run.sh \
|
|
||||||
--python $WORKSPACE/.tox/py27/bin/python \
|
|
||||||
--synapse-directory $WORKSPACE \
|
|
|
@ -1,30 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
: ${WORKSPACE:="$(pwd)"}
|
|
||||||
|
|
||||||
export PYTHONDONTWRITEBYTECODE=yep
|
|
||||||
export SYNAPSE_CACHE_FACTOR=1
|
|
||||||
|
|
||||||
# Output test results as junit xml
|
|
||||||
export TRIAL_FLAGS="--reporter=subunit"
|
|
||||||
export TOXSUFFIX="| subunit-1to2 | subunit2junitxml --no-passthrough --output-to=results.xml"
|
|
||||||
# Write coverage reports to a separate file for each process
|
|
||||||
export COVERAGE_OPTS="-p"
|
|
||||||
export DUMP_COVERAGE_COMMAND="coverage help"
|
|
||||||
|
|
||||||
# Output flake8 violations to violations.flake8.log
|
|
||||||
# Don't exit with non-0 status code on Jenkins,
|
|
||||||
# so that the build steps continue and a later step can decided whether to
|
|
||||||
# UNSTABLE or FAILURE this build.
|
|
||||||
export PEP8SUFFIX="--output-file=violations.flake8.log || echo flake8 finished with status code \$?"
|
|
||||||
|
|
||||||
rm .coverage* || echo "No coverage files to remove"
|
|
||||||
|
|
||||||
tox --notest -e py27
|
|
||||||
TOX_BIN=$WORKSPACE/.tox/py27/bin
|
|
||||||
python synapse/python_dependencies.py | xargs -n1 $TOX_BIN/pip install
|
|
||||||
$TOX_BIN/pip install lxml
|
|
||||||
|
|
||||||
tox -e py27
|
|
|
@ -1,44 +0,0 @@
|
||||||
#! /bin/bash
|
|
||||||
|
|
||||||
# This clones a project from github into a named subdirectory
|
|
||||||
# If the project has a branch with the same name as this branch
|
|
||||||
# then it will checkout that branch after cloning.
|
|
||||||
# Otherwise it will checkout "origin/develop."
|
|
||||||
# The first argument is the name of the directory to checkout
|
|
||||||
# the branch into.
|
|
||||||
# The second argument is the URL of the remote repository to checkout.
|
|
||||||
# Usually something like https://github.com/matrix-org/sytest.git
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
NAME=$1
|
|
||||||
PROJECT=$2
|
|
||||||
BASE=".$NAME-base"
|
|
||||||
|
|
||||||
# Update our mirror.
|
|
||||||
if [ ! -d ".$NAME-base" ]; then
|
|
||||||
# Create a local mirror of the source repository.
|
|
||||||
# This saves us from having to download the entire repository
|
|
||||||
# when this script is next run.
|
|
||||||
git clone "$PROJECT" "$BASE" --mirror
|
|
||||||
else
|
|
||||||
# Fetch any updates from the source repository.
|
|
||||||
(cd "$BASE"; git fetch -p)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Remove the existing repository so that we have a clean copy
|
|
||||||
rm -rf "$NAME"
|
|
||||||
# Cloning with --shared means that we will share portions of the
|
|
||||||
# .git directory with our local mirror.
|
|
||||||
git clone "$BASE" "$NAME" --shared
|
|
||||||
|
|
||||||
# Jenkins may have supplied us with the name of the branch in the
|
|
||||||
# environment. Otherwise we will have to guess based on the current
|
|
||||||
# commit.
|
|
||||||
: ${GIT_BRANCH:="origin/$(git rev-parse --abbrev-ref HEAD)"}
|
|
||||||
cd "$NAME"
|
|
||||||
# check out the relevant branch
|
|
||||||
git checkout "${GIT_BRANCH}" || (
|
|
||||||
echo >&2 "No ref ${GIT_BRANCH} found, falling back to develop"
|
|
||||||
git checkout "origin/develop"
|
|
||||||
)
|
|
|
@ -31,5 +31,5 @@ $TOX_BIN/pip install 'setuptools>=18.5'
|
||||||
$TOX_BIN/pip install 'pip>=10'
|
$TOX_BIN/pip install 'pip>=10'
|
||||||
|
|
||||||
{ python synapse/python_dependencies.py
|
{ python synapse/python_dependencies.py
|
||||||
echo lxml psycopg2
|
echo lxml
|
||||||
} | xargs $TOX_BIN/pip install
|
} | xargs $TOX_BIN/pip install
|
||||||
|
|
|
@ -1,21 +1,20 @@
|
||||||
from synapse.events import FrozenEvent
|
from __future__ import print_function
|
||||||
from synapse.api.auth import Auth
|
|
||||||
|
|
||||||
from mock import Mock
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from mock import Mock
|
||||||
|
|
||||||
|
from synapse.api.auth import Auth
|
||||||
|
from synapse.events import FrozenEvent
|
||||||
|
|
||||||
|
|
||||||
def check_auth(auth, auth_chain, events):
|
def check_auth(auth, auth_chain, events):
|
||||||
auth_chain.sort(key=lambda e: e.depth)
|
auth_chain.sort(key=lambda e: e.depth)
|
||||||
|
|
||||||
auth_map = {
|
auth_map = {e.event_id: e for e in auth_chain}
|
||||||
e.event_id: e
|
|
||||||
for e in auth_chain
|
|
||||||
}
|
|
||||||
|
|
||||||
create_events = {}
|
create_events = {}
|
||||||
for e in auth_chain:
|
for e in auth_chain:
|
||||||
|
@ -25,31 +24,26 @@ def check_auth(auth, auth_chain, events):
|
||||||
for e in itertools.chain(auth_chain, events):
|
for e in itertools.chain(auth_chain, events):
|
||||||
auth_events_list = [auth_map[i] for i, _ in e.auth_events]
|
auth_events_list = [auth_map[i] for i, _ in e.auth_events]
|
||||||
|
|
||||||
auth_events = {
|
auth_events = {(e.type, e.state_key): e for e in auth_events_list}
|
||||||
(e.type, e.state_key): e
|
|
||||||
for e in auth_events_list
|
|
||||||
}
|
|
||||||
|
|
||||||
auth_events[("m.room.create", "")] = create_events[e.room_id]
|
auth_events[("m.room.create", "")] = create_events[e.room_id]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
auth.check(e, auth_events=auth_events)
|
auth.check(e, auth_events=auth_events)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
print "Failed:", e.event_id, e.type, e.state_key
|
print("Failed:", e.event_id, e.type, e.state_key)
|
||||||
print "Auth_events:", auth_events
|
print("Auth_events:", auth_events)
|
||||||
print ex
|
print(ex)
|
||||||
print json.dumps(e.get_dict(), sort_keys=True, indent=4)
|
print(json.dumps(e.get_dict(), sort_keys=True, indent=4))
|
||||||
# raise
|
# raise
|
||||||
print "Success:", e.event_id, e.type, e.state_key
|
print("Success:", e.event_id, e.type, e.state_key)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'json',
|
'json', nargs='?', type=argparse.FileType('r'), default=sys.stdin
|
||||||
nargs='?',
|
|
||||||
type=argparse.FileType('r'),
|
|
||||||
default=sys.stdin,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
|
@ -1,10 +1,15 @@
|
||||||
from synapse.crypto.event_signing import *
|
|
||||||
from unpaddedbase64 import encode_base64
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import hashlib
|
import hashlib
|
||||||
import sys
|
|
||||||
import json
|
import json
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from unpaddedbase64 import encode_base64
|
||||||
|
|
||||||
|
from synapse.crypto.event_signing import (
|
||||||
|
check_event_content_hash,
|
||||||
|
compute_event_reference_hash,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class dictobj(dict):
|
class dictobj(dict):
|
||||||
|
@ -24,27 +29,26 @@ class dictobj(dict):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
|
parser.add_argument(
|
||||||
default=sys.stdin)
|
"input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin
|
||||||
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
|
|
||||||
event_json = dictobj(json.load(args.input_json))
|
event_json = dictobj(json.load(args.input_json))
|
||||||
|
|
||||||
algorithms = {
|
algorithms = {"sha256": hashlib.sha256}
|
||||||
"sha256": hashlib.sha256,
|
|
||||||
}
|
|
||||||
|
|
||||||
for alg_name in event_json.hashes:
|
for alg_name in event_json.hashes:
|
||||||
if check_event_content_hash(event_json, algorithms[alg_name]):
|
if check_event_content_hash(event_json, algorithms[alg_name]):
|
||||||
print "PASS content hash %s" % (alg_name,)
|
print("PASS content hash %s" % (alg_name,))
|
||||||
else:
|
else:
|
||||||
print "FAIL content hash %s" % (alg_name,)
|
print("FAIL content hash %s" % (alg_name,))
|
||||||
|
|
||||||
for algorithm in algorithms.values():
|
for algorithm in algorithms.values():
|
||||||
name, h_bytes = compute_event_reference_hash(event_json, algorithm)
|
name, h_bytes = compute_event_reference_hash(event_json, algorithm)
|
||||||
print "Reference hash %s: %s" % (name, encode_base64(h_bytes))
|
print("Reference hash %s: %s" % (name, encode_base64(h_bytes)))
|
||||||
|
|
||||||
if __name__=="__main__":
|
|
||||||
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
|
|
||||||
from signedjson.sign import verify_signed_json
|
import argparse
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import urllib2
|
||||||
|
|
||||||
|
import dns.resolver
|
||||||
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
from signedjson.key import decode_verify_key_bytes, write_signing_keys
|
||||||
|
from signedjson.sign import verify_signed_json
|
||||||
from unpaddedbase64 import decode_base64
|
from unpaddedbase64 import decode_base64
|
||||||
|
|
||||||
import urllib2
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import dns.resolver
|
|
||||||
import pprint
|
|
||||||
import argparse
|
|
||||||
import logging
|
|
||||||
|
|
||||||
def get_targets(server_name):
|
def get_targets(server_name):
|
||||||
if ":" in server_name:
|
if ":" in server_name:
|
||||||
|
@ -23,6 +23,7 @@ def get_targets(server_name):
|
||||||
except dns.resolver.NXDOMAIN:
|
except dns.resolver.NXDOMAIN:
|
||||||
yield (server_name, 8448)
|
yield (server_name, 8448)
|
||||||
|
|
||||||
|
|
||||||
def get_server_keys(server_name, target, port):
|
def get_server_keys(server_name, target, port):
|
||||||
url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
url = "https://%s:%i/_matrix/key/v1" % (target, port)
|
||||||
keys = json.load(urllib2.urlopen(url))
|
keys = json.load(urllib2.urlopen(url))
|
||||||
|
@ -33,12 +34,14 @@ def get_server_keys(server_name, target, port):
|
||||||
verify_keys[key_id] = verify_key
|
verify_keys[key_id] = verify_key
|
||||||
return verify_keys
|
return verify_keys
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("signature_name")
|
parser.add_argument("signature_name")
|
||||||
parser.add_argument("input_json", nargs="?", type=argparse.FileType('r'),
|
parser.add_argument(
|
||||||
default=sys.stdin)
|
"input_json", nargs="?", type=argparse.FileType('r'), default=sys.stdin
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
|
@ -48,24 +51,23 @@ def main():
|
||||||
for target, port in get_targets(server_name):
|
for target, port in get_targets(server_name):
|
||||||
try:
|
try:
|
||||||
keys = get_server_keys(server_name, target, port)
|
keys = get_server_keys(server_name, target, port)
|
||||||
print "Using keys from https://%s:%s/_matrix/key/v1" % (target, port)
|
print("Using keys from https://%s:%s/_matrix/key/v1" % (target, port))
|
||||||
write_signing_keys(sys.stdout, keys.values())
|
write_signing_keys(sys.stdout, keys.values())
|
||||||
break
|
break
|
||||||
except:
|
except Exception:
|
||||||
logging.exception("Error talking to %s:%s", target, port)
|
logging.exception("Error talking to %s:%s", target, port)
|
||||||
|
|
||||||
json_to_check = json.load(args.input_json)
|
json_to_check = json.load(args.input_json)
|
||||||
print "Checking JSON:"
|
print("Checking JSON:")
|
||||||
for key_id in json_to_check["signatures"][args.signature_name]:
|
for key_id in json_to_check["signatures"][args.signature_name]:
|
||||||
try:
|
try:
|
||||||
key = keys[key_id]
|
key = keys[key_id]
|
||||||
verify_signed_json(json_to_check, args.signature_name, key)
|
verify_signed_json(json_to_check, args.signature_name, key)
|
||||||
print "PASS %s" % (key_id,)
|
print("PASS %s" % (key_id,))
|
||||||
except:
|
except Exception:
|
||||||
logging.exception("Check for key %s failed" % (key_id,))
|
logging.exception("Check for key %s failed" % (key_id,))
|
||||||
print "FAIL %s" % (key_id,)
|
print("FAIL %s" % (key_id,))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,21 @@
|
||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
import psycopg2
|
import psycopg2
|
||||||
import yaml
|
import yaml
|
||||||
import sys
|
from canonicaljson import encode_canonical_json
|
||||||
import json
|
|
||||||
import time
|
|
||||||
import hashlib
|
|
||||||
from unpaddedbase64 import encode_base64
|
|
||||||
from signedjson.key import read_signing_keys
|
from signedjson.key import read_signing_keys
|
||||||
from signedjson.sign import sign_json
|
from signedjson.sign import sign_json
|
||||||
from canonicaljson import encode_canonical_json
|
from unpaddedbase64 import encode_base64
|
||||||
|
|
||||||
|
if six.PY2:
|
||||||
|
db_type = six.moves.builtins.buffer
|
||||||
|
else:
|
||||||
|
db_type = memoryview
|
||||||
|
|
||||||
|
|
||||||
def select_v1_keys(connection):
|
def select_v1_keys(connection):
|
||||||
|
@ -39,7 +47,9 @@ def select_v2_json(connection):
|
||||||
cursor.close()
|
cursor.close()
|
||||||
results = {}
|
results = {}
|
||||||
for server_name, key_id, key_json in rows:
|
for server_name, key_id, key_json in rows:
|
||||||
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
|
results.setdefault(server_name, {})[key_id] = json.loads(
|
||||||
|
str(key_json).decode("utf-8")
|
||||||
|
)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,10 +57,7 @@ def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||||
return {
|
return {
|
||||||
"old_verify_keys": {},
|
"old_verify_keys": {},
|
||||||
"server_name": server_name,
|
"server_name": server_name,
|
||||||
"verify_keys": {
|
"verify_keys": {key_id: {"key": key} for key_id, key in keys.items()},
|
||||||
key_id: {"key": key}
|
|
||||||
for key_id, key in keys.items()
|
|
||||||
},
|
|
||||||
"valid_until_ts": valid_until,
|
"valid_until_ts": valid_until,
|
||||||
"tls_fingerprints": [fingerprint(certificate)],
|
"tls_fingerprints": [fingerprint(certificate)],
|
||||||
}
|
}
|
||||||
|
@ -65,7 +72,7 @@ def rows_v2(server, json):
|
||||||
valid_until = json["valid_until_ts"]
|
valid_until = json["valid_until_ts"]
|
||||||
key_json = encode_canonical_json(json)
|
key_json = encode_canonical_json(json)
|
||||||
for key_id in json["verify_keys"]:
|
for key_id in json["verify_keys"]:
|
||||||
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
|
yield (server, key_id, "-", valid_until, valid_until, db_type(key_json))
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -87,7 +94,7 @@ def main():
|
||||||
|
|
||||||
result = {}
|
result = {}
|
||||||
for server in keys:
|
for server in keys:
|
||||||
if not server in json:
|
if server not in json:
|
||||||
v2_json = convert_v1_to_v2(
|
v2_json = convert_v1_to_v2(
|
||||||
server, valid_until, keys[server], certificates[server]
|
server, valid_until, keys[server], certificates[server]
|
||||||
)
|
)
|
||||||
|
@ -96,10 +103,7 @@ def main():
|
||||||
|
|
||||||
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||||
|
|
||||||
rows = list(
|
rows = list(row for server, json in result.items() for row in rows_v2(server, json))
|
||||||
row for server, json in result.items()
|
|
||||||
for row in rows_v2(server, json)
|
|
||||||
)
|
|
||||||
|
|
||||||
cursor = connection.cursor()
|
cursor = connection.cursor()
|
||||||
cursor.executemany(
|
cursor.executemany(
|
||||||
|
@ -107,7 +111,7 @@ def main():
|
||||||
" server_name, key_id, from_server,"
|
" server_name, key_id, from_server,"
|
||||||
" ts_added_ms, ts_valid_until_ms, key_json"
|
" ts_added_ms, ts_valid_until_ms, key_json"
|
||||||
") VALUES (%s, %s, %s, %s, %s, %s)",
|
") VALUES (%s, %s, %s, %s, %s, %s)",
|
||||||
rows
|
rows,
|
||||||
)
|
)
|
||||||
connection.commit()
|
connection.commit()
|
||||||
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
#!/usr/bin/perl -pi
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
$copyright = <<EOT;
|
|
||||||
/* Copyright 2016 OpenMarket Ltd
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
EOT
|
|
||||||
|
|
||||||
s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
|
|
|
@ -1,33 +0,0 @@
|
||||||
#!/usr/bin/perl -pi
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
$copyright = <<EOT;
|
|
||||||
# Copyright 2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
EOT
|
|
||||||
|
|
||||||
s/^(# -\*- coding: utf-8 -\*-\n)?/$1$copyright/ if ($. == 1);
|
|
|
@ -1,8 +1,16 @@
|
||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
import ast
|
import ast
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
class DefinitionVisitor(ast.NodeVisitor):
|
class DefinitionVisitor(ast.NodeVisitor):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(DefinitionVisitor, self).__init__()
|
super(DefinitionVisitor, self).__init__()
|
||||||
|
@ -42,15 +50,18 @@ def non_empty(defs):
|
||||||
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
functions = {name: non_empty(f) for name, f in defs['def'].items()}
|
||||||
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
classes = {name: non_empty(f) for name, f in defs['class'].items()}
|
||||||
result = {}
|
result = {}
|
||||||
if functions: result['def'] = functions
|
if functions:
|
||||||
if classes: result['class'] = classes
|
result['def'] = functions
|
||||||
|
if classes:
|
||||||
|
result['class'] = classes
|
||||||
names = defs['names']
|
names = defs['names']
|
||||||
uses = []
|
uses = []
|
||||||
for name in names.get('Load', ()):
|
for name in names.get('Load', ()):
|
||||||
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
if name not in names.get('Param', ()) and name not in names.get('Store', ()):
|
||||||
uses.append(name)
|
uses.append(name)
|
||||||
uses.extend(defs['attrs'])
|
uses.extend(defs['attrs'])
|
||||||
if uses: result['uses'] = uses
|
if uses:
|
||||||
|
result['uses'] = uses
|
||||||
result['names'] = names
|
result['names'] = names
|
||||||
result['attrs'] = defs['attrs']
|
result['attrs'] = defs['attrs']
|
||||||
return result
|
return result
|
||||||
|
@ -95,7 +106,6 @@ def used_names(prefix, item, defs, names):
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys, os, argparse, re
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description='Find definitions.')
|
parser = argparse.ArgumentParser(description='Find definitions.')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
|
@ -105,24 +115,28 @@ if __name__ == '__main__':
|
||||||
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
"--ignore", action="append", metavar="REGEXP", help="Ignore a pattern"
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--pattern", action="append", metavar="REGEXP",
|
"--pattern", action="append", metavar="REGEXP", help="Search for a pattern"
|
||||||
help="Search for a pattern"
|
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"directories", nargs='+', metavar="DIR",
|
"directories",
|
||||||
help="Directories to search for definitions"
|
nargs='+',
|
||||||
|
metavar="DIR",
|
||||||
|
help="Directories to search for definitions",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--referrers", default=0, type=int,
|
"--referrers",
|
||||||
help="Include referrers up to the given depth"
|
default=0,
|
||||||
|
type=int,
|
||||||
|
help="Include referrers up to the given depth",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--referred", default=0, type=int,
|
"--referred",
|
||||||
help="Include referred down to the given depth"
|
default=0,
|
||||||
|
type=int,
|
||||||
|
help="Include referred down to the given depth",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--format", default="yaml",
|
"--format", default="yaml", help="Output format, one of 'yaml' or 'dot'"
|
||||||
help="Output format, one of 'yaml' or 'dot'"
|
|
||||||
)
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
@ -162,7 +176,7 @@ if __name__ == '__main__':
|
||||||
for used_by in entry.get("used", ()):
|
for used_by in entry.get("used", ()):
|
||||||
referrers.add(used_by)
|
referrers.add(used_by)
|
||||||
for name, definition in names.items():
|
for name, definition in names.items():
|
||||||
if not name in referrers:
|
if name not in referrers:
|
||||||
continue
|
continue
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||||
continue
|
continue
|
||||||
|
@ -176,7 +190,7 @@ if __name__ == '__main__':
|
||||||
for uses in entry.get("uses", ()):
|
for uses in entry.get("uses", ()):
|
||||||
referred.add(uses)
|
referred.add(uses)
|
||||||
for name, definition in names.items():
|
for name, definition in names.items():
|
||||||
if not name in referred:
|
if name not in referred:
|
||||||
continue
|
continue
|
||||||
if ignore and any(pattern.match(name) for pattern in ignore):
|
if ignore and any(pattern.match(name) for pattern in ignore):
|
||||||
continue
|
continue
|
||||||
|
@ -185,12 +199,12 @@ if __name__ == '__main__':
|
||||||
if args.format == 'yaml':
|
if args.format == 'yaml':
|
||||||
yaml.dump(result, sys.stdout, default_flow_style=False)
|
yaml.dump(result, sys.stdout, default_flow_style=False)
|
||||||
elif args.format == 'dot':
|
elif args.format == 'dot':
|
||||||
print "digraph {"
|
print("digraph {")
|
||||||
for name, entry in result.items():
|
for name, entry in result.items():
|
||||||
print name
|
print(name)
|
||||||
for used_by in entry.get("used", ()):
|
for used_by in entry.get("used", ()):
|
||||||
if used_by in result:
|
if used_by in result:
|
||||||
print used_by, "->", name
|
print(used_by, "->", name)
|
||||||
print "}"
|
print("}")
|
||||||
else:
|
else:
|
||||||
raise ValueError("Unknown format %r" % (args.format))
|
raise ValueError("Unknown format %r" % (args.format))
|
||||||
|
|
|
@ -1,8 +1,11 @@
|
||||||
#!/usr/bin/env python2
|
#!/usr/bin/env python2
|
||||||
|
|
||||||
import pymacaroons
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import pymacaroons
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
if len(sys.argv) == 1:
|
||||||
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
sys.stderr.write("usage: %s macaroon [key]\n" % (sys.argv[0],))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -11,14 +14,14 @@ macaroon_string = sys.argv[1]
|
||||||
key = sys.argv[2] if len(sys.argv) > 2 else None
|
key = sys.argv[2] if len(sys.argv) > 2 else None
|
||||||
|
|
||||||
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
macaroon = pymacaroons.Macaroon.deserialize(macaroon_string)
|
||||||
print macaroon.inspect()
|
print(macaroon.inspect())
|
||||||
|
|
||||||
print ""
|
print("")
|
||||||
|
|
||||||
verifier = pymacaroons.Verifier()
|
verifier = pymacaroons.Verifier()
|
||||||
verifier.satisfy_general(lambda c: True)
|
verifier.satisfy_general(lambda c: True)
|
||||||
try:
|
try:
|
||||||
verifier.verify(macaroon, key)
|
verifier.verify(macaroon, key)
|
||||||
print "Signature is correct"
|
print("Signature is correct")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print e.message
|
print(str(e))
|
||||||
|
|
|
@ -18,21 +18,21 @@
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
from urlparse import urlparse, urlunparse
|
from urlparse import urlparse, urlunparse
|
||||||
|
|
||||||
import nacl.signing
|
import nacl.signing
|
||||||
import json
|
|
||||||
import base64
|
|
||||||
import requests
|
import requests
|
||||||
import sys
|
|
||||||
|
|
||||||
from requests.adapters import HTTPAdapter
|
|
||||||
import srvlookup
|
import srvlookup
|
||||||
import yaml
|
import yaml
|
||||||
|
from requests.adapters import HTTPAdapter
|
||||||
|
|
||||||
# uncomment the following to enable debug logging of http requests
|
# uncomment the following to enable debug logging of http requests
|
||||||
#from httplib import HTTPConnection
|
# from httplib import HTTPConnection
|
||||||
#HTTPConnection.debuglevel = 1
|
# HTTPConnection.debuglevel = 1
|
||||||
|
|
||||||
|
|
||||||
def encode_base64(input_bytes):
|
def encode_base64(input_bytes):
|
||||||
"""Encode bytes as a base64 string without any padding."""
|
"""Encode bytes as a base64 string without any padding."""
|
||||||
|
@ -58,15 +58,15 @@ def decode_base64(input_string):
|
||||||
|
|
||||||
def encode_canonical_json(value):
|
def encode_canonical_json(value):
|
||||||
return json.dumps(
|
return json.dumps(
|
||||||
value,
|
value,
|
||||||
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
# Encode code-points outside of ASCII as UTF-8 rather than \u escapes
|
||||||
ensure_ascii=False,
|
ensure_ascii=False,
|
||||||
# Remove unecessary white space.
|
# Remove unecessary white space.
|
||||||
separators=(',',':'),
|
separators=(',', ':'),
|
||||||
# Sort the keys of dictionaries.
|
# Sort the keys of dictionaries.
|
||||||
sort_keys=True,
|
sort_keys=True,
|
||||||
# Encode the resulting unicode as UTF-8 bytes.
|
# Encode the resulting unicode as UTF-8 bytes.
|
||||||
).encode("UTF-8")
|
).encode("UTF-8")
|
||||||
|
|
||||||
|
|
||||||
def sign_json(json_object, signing_key, signing_name):
|
def sign_json(json_object, signing_key, signing_name):
|
||||||
|
@ -88,6 +88,7 @@ def sign_json(json_object, signing_key, signing_name):
|
||||||
|
|
||||||
NACL_ED25519 = "ed25519"
|
NACL_ED25519 = "ed25519"
|
||||||
|
|
||||||
|
|
||||||
def decode_signing_key_base64(algorithm, version, key_base64):
|
def decode_signing_key_base64(algorithm, version, key_base64):
|
||||||
"""Decode a base64 encoded signing key
|
"""Decode a base64 encoded signing key
|
||||||
Args:
|
Args:
|
||||||
|
@ -143,14 +144,12 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||||
authorization_headers = []
|
authorization_headers = []
|
||||||
|
|
||||||
for key, sig in signed_json["signatures"][origin_name].items():
|
for key, sig in signed_json["signatures"][origin_name].items():
|
||||||
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (
|
header = "X-Matrix origin=%s,key=\"%s\",sig=\"%s\"" % (origin_name, key, sig)
|
||||||
origin_name, key, sig,
|
|
||||||
)
|
|
||||||
authorization_headers.append(bytes(header))
|
authorization_headers.append(bytes(header))
|
||||||
print ("Authorization: %s" % header, file=sys.stderr)
|
print("Authorization: %s" % header, file=sys.stderr)
|
||||||
|
|
||||||
dest = "matrix://%s%s" % (destination, path)
|
dest = "matrix://%s%s" % (destination, path)
|
||||||
print ("Requesting %s" % dest, file=sys.stderr)
|
print("Requesting %s" % dest, file=sys.stderr)
|
||||||
|
|
||||||
s = requests.Session()
|
s = requests.Session()
|
||||||
s.mount("matrix://", MatrixConnectionAdapter())
|
s.mount("matrix://", MatrixConnectionAdapter())
|
||||||
|
@ -158,10 +157,7 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||||
result = s.request(
|
result = s.request(
|
||||||
method=method,
|
method=method,
|
||||||
url=dest,
|
url=dest,
|
||||||
headers={
|
headers={"Host": destination, "Authorization": authorization_headers[0]},
|
||||||
"Host": destination,
|
|
||||||
"Authorization": authorization_headers[0]
|
|
||||||
},
|
|
||||||
verify=False,
|
verify=False,
|
||||||
data=content,
|
data=content,
|
||||||
)
|
)
|
||||||
|
@ -171,50 +167,50 @@ def request_json(method, origin_name, origin_key, destination, path, content):
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description=
|
description="Signs and sends a federation request to a matrix homeserver"
|
||||||
"Signs and sends a federation request to a matrix homeserver",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-N", "--server-name",
|
"-N",
|
||||||
|
"--server-name",
|
||||||
help="Name to give as the local homeserver. If unspecified, will be "
|
help="Name to give as the local homeserver. If unspecified, will be "
|
||||||
"read from the config file.",
|
"read from the config file.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-k", "--signing-key-path",
|
"-k",
|
||||||
|
"--signing-key-path",
|
||||||
help="Path to the file containing the private ed25519 key to sign the "
|
help="Path to the file containing the private ed25519 key to sign the "
|
||||||
"request with.",
|
"request with.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-c", "--config",
|
"-c",
|
||||||
|
"--config",
|
||||||
default="homeserver.yaml",
|
default="homeserver.yaml",
|
||||||
help="Path to server config file. Ignored if --server-name and "
|
help="Path to server config file. Ignored if --server-name and "
|
||||||
"--signing-key-path are both given.",
|
"--signing-key-path are both given.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-d", "--destination",
|
"-d",
|
||||||
|
"--destination",
|
||||||
default="matrix.org",
|
default="matrix.org",
|
||||||
help="name of the remote homeserver. We will do SRV lookups and "
|
help="name of the remote homeserver. We will do SRV lookups and "
|
||||||
"connect appropriately.",
|
"connect appropriately.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-X", "--method",
|
"-X",
|
||||||
|
"--method",
|
||||||
help="HTTP method to use for the request. Defaults to GET if --data is"
|
help="HTTP method to use for the request. Defaults to GET if --data is"
|
||||||
"unspecified, POST if it is."
|
"unspecified, POST if it is.",
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument("--body", help="Data to send as the body of the HTTP request")
|
||||||
"--body",
|
|
||||||
help="Data to send as the body of the HTTP request"
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"path",
|
"path", help="request path. We will add '/_matrix/federation/v1/' to this."
|
||||||
help="request path. We will add '/_matrix/federation/v1/' to this."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
@ -227,13 +223,15 @@ def main():
|
||||||
|
|
||||||
result = request_json(
|
result = request_json(
|
||||||
args.method,
|
args.method,
|
||||||
args.server_name, key, args.destination,
|
args.server_name,
|
||||||
|
key,
|
||||||
|
args.destination,
|
||||||
"/_matrix/federation/v1/" + args.path,
|
"/_matrix/federation/v1/" + args.path,
|
||||||
content=args.body,
|
content=args.body,
|
||||||
)
|
)
|
||||||
|
|
||||||
json.dump(result, sys.stdout)
|
json.dump(result, sys.stdout)
|
||||||
print ("")
|
print("")
|
||||||
|
|
||||||
|
|
||||||
def read_args_from_config(args):
|
def read_args_from_config(args):
|
||||||
|
@ -253,7 +251,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||||
return s, 8448
|
return s, 8448
|
||||||
|
|
||||||
if ":" in s:
|
if ":" in s:
|
||||||
out = s.rsplit(":",1)
|
out = s.rsplit(":", 1)
|
||||||
try:
|
try:
|
||||||
port = int(out[1])
|
port = int(out[1])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
|
@ -263,7 +261,7 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||||
try:
|
try:
|
||||||
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
srv = srvlookup.lookup("matrix", "tcp", s)[0]
|
||||||
return srv.host, srv.port
|
return srv.host, srv.port
|
||||||
except:
|
except Exception:
|
||||||
return s, 8448
|
return s, 8448
|
||||||
|
|
||||||
def get_connection(self, url, proxies=None):
|
def get_connection(self, url, proxies=None):
|
||||||
|
@ -272,10 +270,9 @@ class MatrixConnectionAdapter(HTTPAdapter):
|
||||||
(host, port) = self.lookup(parsed.netloc)
|
(host, port) = self.lookup(parsed.netloc)
|
||||||
netloc = "%s:%d" % (host, port)
|
netloc = "%s:%d" % (host, port)
|
||||||
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
print("Connecting to %s" % (netloc,), file=sys.stderr)
|
||||||
url = urlunparse((
|
url = urlunparse(
|
||||||
"https", netloc, parsed.path, parsed.params, parsed.query,
|
("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
|
||||||
parsed.fragment,
|
)
|
||||||
))
|
|
||||||
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
|
return super(MatrixConnectionAdapter, self).get_connection(url, proxies)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,23 +1,31 @@
|
||||||
from synapse.storage.pdu import PduStore
|
from __future__ import print_function
|
||||||
from synapse.storage.signatures import SignatureStore
|
|
||||||
from synapse.storage._base import SQLBaseStore
|
|
||||||
from synapse.federation.units import Pdu
|
|
||||||
from synapse.crypto.event_signing import (
|
|
||||||
add_event_pdu_content_hash, compute_pdu_event_reference_hash
|
|
||||||
)
|
|
||||||
from synapse.api.events.utils import prune_pdu
|
|
||||||
from unpaddedbase64 import encode_base64, decode_base64
|
|
||||||
from canonicaljson import encode_canonical_json
|
|
||||||
import sqlite3
|
import sqlite3
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
from unpaddedbase64 import decode_base64, encode_base64
|
||||||
|
|
||||||
|
from synapse.crypto.event_signing import (
|
||||||
|
add_event_pdu_content_hash,
|
||||||
|
compute_pdu_event_reference_hash,
|
||||||
|
)
|
||||||
|
from synapse.federation.units import Pdu
|
||||||
|
from synapse.storage._base import SQLBaseStore
|
||||||
|
from synapse.storage.pdu import PduStore
|
||||||
|
from synapse.storage.signatures import SignatureStore
|
||||||
|
|
||||||
|
|
||||||
class Store(object):
|
class Store(object):
|
||||||
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
|
_get_pdu_tuples = PduStore.__dict__["_get_pdu_tuples"]
|
||||||
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
|
_get_pdu_content_hashes_txn = SignatureStore.__dict__["_get_pdu_content_hashes_txn"]
|
||||||
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
|
_get_prev_pdu_hashes_txn = SignatureStore.__dict__["_get_prev_pdu_hashes_txn"]
|
||||||
_get_pdu_origin_signatures_txn = SignatureStore.__dict__["_get_pdu_origin_signatures_txn"]
|
_get_pdu_origin_signatures_txn = SignatureStore.__dict__[
|
||||||
|
"_get_pdu_origin_signatures_txn"
|
||||||
|
]
|
||||||
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
|
_store_pdu_content_hash_txn = SignatureStore.__dict__["_store_pdu_content_hash_txn"]
|
||||||
_store_pdu_reference_hash_txn = SignatureStore.__dict__["_store_pdu_reference_hash_txn"]
|
_store_pdu_reference_hash_txn = SignatureStore.__dict__[
|
||||||
|
"_store_pdu_reference_hash_txn"
|
||||||
|
]
|
||||||
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
|
_store_prev_pdu_hash_txn = SignatureStore.__dict__["_store_prev_pdu_hash_txn"]
|
||||||
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
_simple_insert_txn = SQLBaseStore.__dict__["_simple_insert_txn"]
|
||||||
|
|
||||||
|
@ -26,9 +34,7 @@ store = Store()
|
||||||
|
|
||||||
|
|
||||||
def select_pdus(cursor):
|
def select_pdus(cursor):
|
||||||
cursor.execute(
|
cursor.execute("SELECT pdu_id, origin FROM pdus ORDER BY depth ASC")
|
||||||
"SELECT pdu_id, origin FROM pdus ORDER BY depth ASC"
|
|
||||||
)
|
|
||||||
|
|
||||||
ids = cursor.fetchall()
|
ids = cursor.fetchall()
|
||||||
|
|
||||||
|
@ -41,23 +47,30 @@ def select_pdus(cursor):
|
||||||
for pdu in pdus:
|
for pdu in pdus:
|
||||||
try:
|
try:
|
||||||
if pdu.prev_pdus:
|
if pdu.prev_pdus:
|
||||||
print "PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
|
print("PROCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||||
for pdu_id, origin, hashes in pdu.prev_pdus:
|
for pdu_id, origin, hashes in pdu.prev_pdus:
|
||||||
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
|
ref_alg, ref_hsh = reference_hashes[(pdu_id, origin)]
|
||||||
hashes[ref_alg] = encode_base64(ref_hsh)
|
hashes[ref_alg] = encode_base64(ref_hsh)
|
||||||
store._store_prev_pdu_hash_txn(cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh)
|
store._store_prev_pdu_hash_txn(
|
||||||
print "SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus
|
cursor, pdu.pdu_id, pdu.origin, pdu_id, origin, ref_alg, ref_hsh
|
||||||
|
)
|
||||||
|
print("SUCCESS", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||||
pdu = add_event_pdu_content_hash(pdu)
|
pdu = add_event_pdu_content_hash(pdu)
|
||||||
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
|
ref_alg, ref_hsh = compute_pdu_event_reference_hash(pdu)
|
||||||
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
|
reference_hashes[(pdu.pdu_id, pdu.origin)] = (ref_alg, ref_hsh)
|
||||||
store._store_pdu_reference_hash_txn(cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh)
|
store._store_pdu_reference_hash_txn(
|
||||||
|
cursor, pdu.pdu_id, pdu.origin, ref_alg, ref_hsh
|
||||||
|
)
|
||||||
|
|
||||||
for alg, hsh_base64 in pdu.hashes.items():
|
for alg, hsh_base64 in pdu.hashes.items():
|
||||||
print alg, hsh_base64
|
print(alg, hsh_base64)
|
||||||
store._store_pdu_content_hash_txn(cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64))
|
store._store_pdu_content_hash_txn(
|
||||||
|
cursor, pdu.pdu_id, pdu.origin, alg, decode_base64(hsh_base64)
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
print("FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus)
|
||||||
|
|
||||||
except:
|
|
||||||
print "FAILED_", pdu.pdu_id, pdu.origin, pdu.prev_pdus
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
conn = sqlite3.connect(sys.argv[1])
|
conn = sqlite3.connect(sys.argv[1])
|
||||||
|
@ -65,5 +78,6 @@ def main():
|
||||||
select_pdus(cursor)
|
select_pdus(cursor)
|
||||||
conn.commit()
|
conn.commit()
|
||||||
|
|
||||||
if __name__=='__main__':
|
|
||||||
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,18 +1,17 @@
|
||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
import ast
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import ast
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
PATTERNS_V1 = []
|
PATTERNS_V1 = []
|
||||||
PATTERNS_V2 = []
|
PATTERNS_V2 = []
|
||||||
|
|
||||||
RESULT = {
|
RESULT = {"v1": PATTERNS_V1, "v2": PATTERNS_V2}
|
||||||
"v1": PATTERNS_V1,
|
|
||||||
"v2": PATTERNS_V2,
|
|
||||||
}
|
|
||||||
|
|
||||||
class CallVisitor(ast.NodeVisitor):
|
class CallVisitor(ast.NodeVisitor):
|
||||||
def visit_Call(self, node):
|
def visit_Call(self, node):
|
||||||
|
@ -21,7 +20,6 @@ class CallVisitor(ast.NodeVisitor):
|
||||||
else:
|
else:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
if name == "client_path_patterns":
|
if name == "client_path_patterns":
|
||||||
PATTERNS_V1.append(node.args[0].s)
|
PATTERNS_V1.append(node.args[0].s)
|
||||||
elif name == "client_v2_patterns":
|
elif name == "client_v2_patterns":
|
||||||
|
@ -42,8 +40,10 @@ def find_patterns_in_file(filepath):
|
||||||
parser = argparse.ArgumentParser(description='Find url patterns.')
|
parser = argparse.ArgumentParser(description='Find url patterns.')
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"directories", nargs='+', metavar="DIR",
|
"directories",
|
||||||
help="Directories to search for definitions"
|
nargs='+',
|
||||||
|
metavar="DIR",
|
||||||
|
help="Directories to search for definitions",
|
||||||
)
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
9
scripts-dev/next_github_number.sh
Executable file
9
scripts-dev/next_github_number.sh
Executable file
|
@ -0,0 +1,9 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Fetch the current GitHub issue number, add one to it -- presto! The likely
|
||||||
|
# next PR number.
|
||||||
|
CURRENT_NUMBER=`curl -s "https://api.github.com/repos/matrix-org/synapse/issues?state=all&per_page=1" | jq -r ".[0].number"`
|
||||||
|
CURRENT_NUMBER=$((CURRENT_NUMBER+1))
|
||||||
|
echo $CURRENT_NUMBER
|
|
@ -1,57 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
## CAUTION:
|
|
||||||
## This script will remove (hopefully) all trace of the given room ID from
|
|
||||||
## your homeserver.db
|
|
||||||
|
|
||||||
## Do not run it lightly.
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
if [ "$1" == "-h" ] || [ "$1" == "" ]; then
|
|
||||||
echo "Call with ROOM_ID as first option and then pipe it into the database. So for instance you might run"
|
|
||||||
echo " nuke-room-from-db.sh <room_id> | sqlite3 homeserver.db"
|
|
||||||
echo "or"
|
|
||||||
echo " nuke-room-from-db.sh <room_id> | psql --dbname=synapse"
|
|
||||||
exit
|
|
||||||
fi
|
|
||||||
|
|
||||||
ROOMID="$1"
|
|
||||||
|
|
||||||
cat <<EOF
|
|
||||||
DELETE FROM event_forward_extremities WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_backward_extremities WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_edges WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_depth WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_forward_extremities WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM events WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_json WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_events WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM current_state_events WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_memberships WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM feedback WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM topics WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_names WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM rooms WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_hosts WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_aliases WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_groups WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM state_groups_state WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM receipts_graph WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM receipts_linearized WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_search WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM guest_access WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM history_visibility WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_tags WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_tags_revisions WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM room_account_data WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_push_actions WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM local_invites WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM pusher_throttle WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_reports WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM public_room_list_stream WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM stream_ordering_to_exterm WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM event_auth WHERE room_id = '$ROOMID';
|
|
||||||
DELETE FROM appservice_room_list WHERE room_id = '$ROOMID';
|
|
||||||
VACUUM;
|
|
||||||
EOF
|
|
|
@ -1,8 +1,9 @@
|
||||||
import requests
|
|
||||||
import collections
|
import collections
|
||||||
|
import json
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import json
|
|
||||||
|
import requests
|
||||||
|
|
||||||
Entry = collections.namedtuple("Entry", "name position rows")
|
Entry = collections.namedtuple("Entry", "name position rows")
|
||||||
|
|
||||||
|
@ -30,11 +31,11 @@ def parse_response(content):
|
||||||
|
|
||||||
|
|
||||||
def replicate(server, streams):
|
def replicate(server, streams):
|
||||||
return parse_response(requests.get(
|
return parse_response(
|
||||||
server + "/_synapse/replication",
|
requests.get(
|
||||||
verify=False,
|
server + "/_synapse/replication", verify=False, params=streams
|
||||||
params=streams
|
).content
|
||||||
).content)
|
)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
@ -45,16 +46,16 @@ def main():
|
||||||
try:
|
try:
|
||||||
streams = {
|
streams = {
|
||||||
row.name: row.position
|
row.name: row.position
|
||||||
for row in replicate(server, {"streams":"-1"})["streams"].rows
|
for row in replicate(server, {"streams": "-1"})["streams"].rows
|
||||||
}
|
}
|
||||||
except requests.exceptions.ConnectionError as e:
|
except requests.exceptions.ConnectionError:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
results = replicate(server, streams)
|
results = replicate(server, streams)
|
||||||
except:
|
except Exception:
|
||||||
sys.stdout.write("connection_lost("+ repr(streams) + ")\n")
|
sys.stdout.write("connection_lost(" + repr(streams) + ")\n")
|
||||||
break
|
break
|
||||||
for update in results.values():
|
for update in results.values():
|
||||||
for row in update.rows:
|
for row in update.rows:
|
||||||
|
@ -62,6 +63,5 @@ def main():
|
||||||
streams[update.name] = update.position
|
streams[update.name] = update.position
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
if __name__=='__main__':
|
|
||||||
main()
|
main()
|
||||||
|
|
|
@ -1,12 +1,10 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
|
import getpass
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import bcrypt
|
import bcrypt
|
||||||
import getpass
|
|
||||||
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
bcrypt_rounds=12
|
bcrypt_rounds=12
|
||||||
|
@ -52,4 +50,3 @@ if __name__ == "__main__":
|
||||||
password = prompt_for_pass()
|
password = prompt_for_pass()
|
||||||
|
|
||||||
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
print bcrypt.hashpw(password + password_pepper, bcrypt.gensalt(bcrypt_rounds))
|
||||||
|
|
||||||
|
|
|
@ -36,12 +36,9 @@ from __future__ import print_function
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import shutil
|
import shutil
|
||||||
|
import sys
|
||||||
|
|
||||||
from synapse.rest.media.v1.filepath import MediaFilePaths
|
from synapse.rest.media.v1.filepath import MediaFilePaths
|
||||||
|
|
||||||
|
@ -77,24 +74,23 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||||
if not os.path.exists(original_file):
|
if not os.path.exists(original_file):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Original for %s/%s (%s) does not exist",
|
"Original for %s/%s (%s) does not exist",
|
||||||
origin_server, file_id, original_file,
|
origin_server,
|
||||||
|
file_id,
|
||||||
|
original_file,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
mkdir_and_move(
|
mkdir_and_move(
|
||||||
original_file,
|
original_file, dest_paths.remote_media_filepath(origin_server, file_id)
|
||||||
dest_paths.remote_media_filepath(origin_server, file_id),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# now look for thumbnails
|
# now look for thumbnails
|
||||||
original_thumb_dir = src_paths.remote_media_thumbnail_dir(
|
original_thumb_dir = src_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
||||||
origin_server, file_id,
|
|
||||||
)
|
|
||||||
if not os.path.exists(original_thumb_dir):
|
if not os.path.exists(original_thumb_dir):
|
||||||
return
|
return
|
||||||
|
|
||||||
mkdir_and_move(
|
mkdir_and_move(
|
||||||
original_thumb_dir,
|
original_thumb_dir,
|
||||||
dest_paths.remote_media_thumbnail_dir(origin_server, file_id)
|
dest_paths.remote_media_thumbnail_dir(origin_server, file_id),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -109,24 +105,16 @@ def mkdir_and_move(original_file, dest_file):
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description=__doc__,
|
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
|
||||||
formatter_class = argparse.RawDescriptionHelpFormatter,
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-v", action='store_true', help='enable debug logging')
|
|
||||||
parser.add_argument(
|
|
||||||
"src_repo",
|
|
||||||
help="Path to source content repo",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"dest_repo",
|
|
||||||
help="Path to source content repo",
|
|
||||||
)
|
)
|
||||||
|
parser.add_argument("-v", action='store_true', help='enable debug logging')
|
||||||
|
parser.add_argument("src_repo", help="Path to source content repo")
|
||||||
|
parser.add_argument("dest_repo", help="Path to source content repo")
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
logging_config = {
|
logging_config = {
|
||||||
"level": logging.DEBUG if args.v else logging.INFO,
|
"level": logging.DEBUG if args.v else logging.INFO,
|
||||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||||
}
|
}
|
||||||
logging.basicConfig(**logging_config)
|
logging.basicConfig(**logging_config)
|
||||||
|
|
||||||
|
|
|
@ -14,187 +14,9 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import argparse
|
from synapse._scripts.register_new_matrix_user import main
|
||||||
import getpass
|
|
||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
import json
|
|
||||||
import sys
|
|
||||||
import urllib2
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
def request_registration(user, password, server_location, shared_secret, admin=False):
|
|
||||||
req = urllib2.Request(
|
|
||||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
|
||||||
headers={'Content-Type': 'application/json'}
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if sys.version_info[:3] >= (2, 7, 9):
|
|
||||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
|
||||||
import ssl
|
|
||||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
|
||||||
else:
|
|
||||||
f = urllib2.urlopen(req)
|
|
||||||
body = f.read()
|
|
||||||
f.close()
|
|
||||||
nonce = json.loads(body)["nonce"]
|
|
||||||
except urllib2.HTTPError as e:
|
|
||||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
|
||||||
if 400 <= e.code < 500:
|
|
||||||
if e.info().type == "application/json":
|
|
||||||
resp = json.load(e)
|
|
||||||
if "error" in resp:
|
|
||||||
print resp["error"]
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
mac = hmac.new(
|
|
||||||
key=shared_secret,
|
|
||||||
digestmod=hashlib.sha1,
|
|
||||||
)
|
|
||||||
|
|
||||||
mac.update(nonce)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update(user)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update(password)
|
|
||||||
mac.update("\x00")
|
|
||||||
mac.update("admin" if admin else "notadmin")
|
|
||||||
|
|
||||||
mac = mac.hexdigest()
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"nonce": nonce,
|
|
||||||
"username": user,
|
|
||||||
"password": password,
|
|
||||||
"mac": mac,
|
|
||||||
"admin": admin,
|
|
||||||
}
|
|
||||||
|
|
||||||
server_location = server_location.rstrip("/")
|
|
||||||
|
|
||||||
print "Sending registration request..."
|
|
||||||
|
|
||||||
req = urllib2.Request(
|
|
||||||
"%s/_matrix/client/r0/admin/register" % (server_location,),
|
|
||||||
data=json.dumps(data),
|
|
||||||
headers={'Content-Type': 'application/json'}
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
if sys.version_info[:3] >= (2, 7, 9):
|
|
||||||
# As of version 2.7.9, urllib2 now checks SSL certs
|
|
||||||
import ssl
|
|
||||||
f = urllib2.urlopen(req, context=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
|
|
||||||
else:
|
|
||||||
f = urllib2.urlopen(req)
|
|
||||||
f.read()
|
|
||||||
f.close()
|
|
||||||
print "Success."
|
|
||||||
except urllib2.HTTPError as e:
|
|
||||||
print "ERROR! Received %d %s" % (e.code, e.reason,)
|
|
||||||
if 400 <= e.code < 500:
|
|
||||||
if e.info().type == "application/json":
|
|
||||||
resp = json.load(e)
|
|
||||||
if "error" in resp:
|
|
||||||
print resp["error"]
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def register_new_user(user, password, server_location, shared_secret, admin):
|
|
||||||
if not user:
|
|
||||||
try:
|
|
||||||
default_user = getpass.getuser()
|
|
||||||
except:
|
|
||||||
default_user = None
|
|
||||||
|
|
||||||
if default_user:
|
|
||||||
user = raw_input("New user localpart [%s]: " % (default_user,))
|
|
||||||
if not user:
|
|
||||||
user = default_user
|
|
||||||
else:
|
|
||||||
user = raw_input("New user localpart: ")
|
|
||||||
|
|
||||||
if not user:
|
|
||||||
print "Invalid user name"
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if not password:
|
|
||||||
password = getpass.getpass("Password: ")
|
|
||||||
|
|
||||||
if not password:
|
|
||||||
print "Password cannot be blank."
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
confirm_password = getpass.getpass("Confirm password: ")
|
|
||||||
|
|
||||||
if password != confirm_password:
|
|
||||||
print "Passwords do not match"
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if not admin:
|
|
||||||
admin = raw_input("Make admin [no]: ")
|
|
||||||
if admin in ("y", "yes", "true"):
|
|
||||||
admin = True
|
|
||||||
else:
|
|
||||||
admin = False
|
|
||||||
|
|
||||||
request_registration(user, password, server_location, shared_secret, bool(admin))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(
|
main()
|
||||||
description="Used to register new users with a given home server when"
|
|
||||||
" registration has been disabled. The home server must be"
|
|
||||||
" configured with the 'registration_shared_secret' option"
|
|
||||||
" set.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-u", "--user",
|
|
||||||
default=None,
|
|
||||||
help="Local part of the new user. Will prompt if omitted.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-p", "--password",
|
|
||||||
default=None,
|
|
||||||
help="New password for user. Will prompt if omitted.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-a", "--admin",
|
|
||||||
action="store_true",
|
|
||||||
help="Register new user as an admin. Will prompt if omitted.",
|
|
||||||
)
|
|
||||||
|
|
||||||
group = parser.add_mutually_exclusive_group(required=True)
|
|
||||||
group.add_argument(
|
|
||||||
"-c", "--config",
|
|
||||||
type=argparse.FileType('r'),
|
|
||||||
help="Path to server config file. Used to read in shared secret.",
|
|
||||||
)
|
|
||||||
|
|
||||||
group.add_argument(
|
|
||||||
"-k", "--shared-secret",
|
|
||||||
help="Shared secret as defined in server config file.",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"server_url",
|
|
||||||
default="https://localhost:8448",
|
|
||||||
nargs='?',
|
|
||||||
help="URL to use to talk to the home server. Defaults to "
|
|
||||||
" 'https://localhost:8448'.",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
if "config" in args and args.config:
|
|
||||||
config = yaml.safe_load(args.config)
|
|
||||||
secret = config.get("registration_shared_secret", None)
|
|
||||||
if not secret:
|
|
||||||
print "No 'registration_shared_secret' defined in config."
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
secret = args.shared_secret
|
|
||||||
|
|
||||||
register_new_user(args.user, args.password, args.server_url, secret, args.admin)
|
|
||||||
|
|
|
@ -15,23 +15,23 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
from twisted.internet import defer, reactor
|
|
||||||
from twisted.enterprise import adbapi
|
|
||||||
|
|
||||||
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
|
||||||
from synapse.storage.engines import create_engine
|
|
||||||
from synapse.storage.prepare_database import prepare_database
|
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import curses
|
import curses
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import yaml
|
|
||||||
|
|
||||||
from six import string_types
|
from six import string_types
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from twisted.enterprise import adbapi
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
|
|
||||||
|
from synapse.storage._base import LoggingTransaction, SQLBaseStore
|
||||||
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.prepare_database import prepare_database
|
||||||
|
|
||||||
logger = logging.getLogger("synapse_port_db")
|
logger = logging.getLogger("synapse_port_db")
|
||||||
|
|
||||||
|
@ -105,6 +105,7 @@ class Store(object):
|
||||||
|
|
||||||
*All* database interactions should go through this object.
|
*All* database interactions should go through this object.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, db_pool, engine):
|
def __init__(self, db_pool, engine):
|
||||||
self.db_pool = db_pool
|
self.db_pool = db_pool
|
||||||
self.database_engine = engine
|
self.database_engine = engine
|
||||||
|
@ -135,7 +136,8 @@ class Store(object):
|
||||||
txn = conn.cursor()
|
txn = conn.cursor()
|
||||||
return func(
|
return func(
|
||||||
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
LoggingTransaction(txn, desc, self.database_engine, [], []),
|
||||||
*args, **kwargs
|
*args,
|
||||||
|
**kwargs
|
||||||
)
|
)
|
||||||
except self.database_engine.module.DatabaseError as e:
|
except self.database_engine.module.DatabaseError as e:
|
||||||
if self.database_engine.is_deadlock(e):
|
if self.database_engine.is_deadlock(e):
|
||||||
|
@ -158,22 +160,20 @@ class Store(object):
|
||||||
def r(txn):
|
def r(txn):
|
||||||
txn.execute(sql, args)
|
txn.execute(sql, args)
|
||||||
return txn.fetchall()
|
return txn.fetchall()
|
||||||
|
|
||||||
return self.runInteraction("execute_sql", r)
|
return self.runInteraction("execute_sql", r)
|
||||||
|
|
||||||
def insert_many_txn(self, txn, table, headers, rows):
|
def insert_many_txn(self, txn, table, headers, rows):
|
||||||
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
sql = "INSERT INTO %s (%s) VALUES (%s)" % (
|
||||||
table,
|
table,
|
||||||
", ".join(k for k in headers),
|
", ".join(k for k in headers),
|
||||||
", ".join("%s" for _ in headers)
|
", ".join("%s" for _ in headers),
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
txn.executemany(sql, rows)
|
txn.executemany(sql, rows)
|
||||||
except:
|
except Exception:
|
||||||
logger.exception(
|
logger.exception("Failed to insert: %s", table)
|
||||||
"Failed to insert: %s",
|
|
||||||
table,
|
|
||||||
)
|
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
@ -206,7 +206,7 @@ class Porter(object):
|
||||||
"table_name": table,
|
"table_name": table,
|
||||||
"forward_rowid": 1,
|
"forward_rowid": 1,
|
||||||
"backward_rowid": 0,
|
"backward_rowid": 0,
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
forward_chunk = 1
|
forward_chunk = 1
|
||||||
|
@ -221,10 +221,10 @@ class Porter(object):
|
||||||
table, forward_chunk, backward_chunk
|
table, forward_chunk, backward_chunk
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
|
||||||
def delete_all(txn):
|
def delete_all(txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"DELETE FROM port_from_sqlite3 WHERE table_name = %s",
|
"DELETE FROM port_from_sqlite3 WHERE table_name = %s", (table,)
|
||||||
(table,)
|
|
||||||
)
|
)
|
||||||
txn.execute("TRUNCATE %s CASCADE" % (table,))
|
txn.execute("TRUNCATE %s CASCADE" % (table,))
|
||||||
|
|
||||||
|
@ -232,11 +232,7 @@ class Porter(object):
|
||||||
|
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table="port_from_sqlite3",
|
table="port_from_sqlite3",
|
||||||
values={
|
values={"table_name": table, "forward_rowid": 1, "backward_rowid": 0},
|
||||||
"table_name": table,
|
|
||||||
"forward_rowid": 1,
|
|
||||||
"backward_rowid": 0,
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
forward_chunk = 1
|
forward_chunk = 1
|
||||||
|
@ -251,12 +247,16 @@ class Porter(object):
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_table(self, table, postgres_size, table_size, forward_chunk,
|
def handle_table(
|
||||||
backward_chunk):
|
self, table, postgres_size, table_size, forward_chunk, backward_chunk
|
||||||
|
):
|
||||||
logger.info(
|
logger.info(
|
||||||
"Table %s: %i/%i (rows %i-%i) already ported",
|
"Table %s: %i/%i (rows %i-%i) already ported",
|
||||||
table, postgres_size, table_size,
|
table,
|
||||||
backward_chunk+1, forward_chunk-1,
|
postgres_size,
|
||||||
|
table_size,
|
||||||
|
backward_chunk + 1,
|
||||||
|
forward_chunk - 1,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not table_size:
|
if not table_size:
|
||||||
|
@ -271,7 +271,9 @@ class Porter(object):
|
||||||
return
|
return
|
||||||
|
|
||||||
if table in (
|
if table in (
|
||||||
"user_directory", "user_directory_search", "users_who_share_rooms",
|
"user_directory",
|
||||||
|
"user_directory_search",
|
||||||
|
"users_who_share_rooms",
|
||||||
"users_in_pubic_room",
|
"users_in_pubic_room",
|
||||||
):
|
):
|
||||||
# We don't port these tables, as they're a faff and we can regenreate
|
# We don't port these tables, as they're a faff and we can regenreate
|
||||||
|
@ -283,37 +285,35 @@ class Porter(object):
|
||||||
# We need to make sure there is a single row, `(X, null), as that is
|
# We need to make sure there is a single row, `(X, null), as that is
|
||||||
# what synapse expects to be there.
|
# what synapse expects to be there.
|
||||||
yield self.postgres_store._simple_insert(
|
yield self.postgres_store._simple_insert(
|
||||||
table=table,
|
table=table, values={"stream_id": None}
|
||||||
values={"stream_id": None},
|
|
||||||
)
|
)
|
||||||
self.progress.update(table, table_size) # Mark table as done
|
self.progress.update(table, table_size) # Mark table as done
|
||||||
return
|
return
|
||||||
|
|
||||||
forward_select = (
|
forward_select = (
|
||||||
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?"
|
"SELECT rowid, * FROM %s WHERE rowid >= ? ORDER BY rowid LIMIT ?" % (table,)
|
||||||
% (table,)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
backward_select = (
|
backward_select = (
|
||||||
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?"
|
"SELECT rowid, * FROM %s WHERE rowid <= ? ORDER BY rowid LIMIT ?" % (table,)
|
||||||
% (table,)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
do_forward = [True]
|
do_forward = [True]
|
||||||
do_backward = [True]
|
do_backward = [True]
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
def r(txn):
|
def r(txn):
|
||||||
forward_rows = []
|
forward_rows = []
|
||||||
backward_rows = []
|
backward_rows = []
|
||||||
if do_forward[0]:
|
if do_forward[0]:
|
||||||
txn.execute(forward_select, (forward_chunk, self.batch_size,))
|
txn.execute(forward_select, (forward_chunk, self.batch_size))
|
||||||
forward_rows = txn.fetchall()
|
forward_rows = txn.fetchall()
|
||||||
if not forward_rows:
|
if not forward_rows:
|
||||||
do_forward[0] = False
|
do_forward[0] = False
|
||||||
|
|
||||||
if do_backward[0]:
|
if do_backward[0]:
|
||||||
txn.execute(backward_select, (backward_chunk, self.batch_size,))
|
txn.execute(backward_select, (backward_chunk, self.batch_size))
|
||||||
backward_rows = txn.fetchall()
|
backward_rows = txn.fetchall()
|
||||||
if not backward_rows:
|
if not backward_rows:
|
||||||
do_backward[0] = False
|
do_backward[0] = False
|
||||||
|
@ -325,9 +325,7 @@ class Porter(object):
|
||||||
|
|
||||||
return headers, forward_rows, backward_rows
|
return headers, forward_rows, backward_rows
|
||||||
|
|
||||||
headers, frows, brows = yield self.sqlite_store.runInteraction(
|
headers, frows, brows = yield self.sqlite_store.runInteraction("select", r)
|
||||||
"select", r
|
|
||||||
)
|
|
||||||
|
|
||||||
if frows or brows:
|
if frows or brows:
|
||||||
if frows:
|
if frows:
|
||||||
|
@ -339,9 +337,7 @@ class Porter(object):
|
||||||
rows = self._convert_rows(table, headers, rows)
|
rows = self._convert_rows(table, headers, rows)
|
||||||
|
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
self.postgres_store.insert_many_txn(
|
self.postgres_store.insert_many_txn(txn, table, headers[1:], rows)
|
||||||
txn, table, headers[1:], rows
|
|
||||||
)
|
|
||||||
|
|
||||||
self.postgres_store._simple_update_one_txn(
|
self.postgres_store._simple_update_one_txn(
|
||||||
txn,
|
txn,
|
||||||
|
@ -362,8 +358,9 @@ class Porter(object):
|
||||||
return
|
return
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_search_table(self, postgres_size, table_size, forward_chunk,
|
def handle_search_table(
|
||||||
backward_chunk):
|
self, postgres_size, table_size, forward_chunk, backward_chunk
|
||||||
|
):
|
||||||
select = (
|
select = (
|
||||||
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
"SELECT es.rowid, es.*, e.origin_server_ts, e.stream_ordering"
|
||||||
" FROM event_search as es"
|
" FROM event_search as es"
|
||||||
|
@ -373,8 +370,9 @@ class Porter(object):
|
||||||
)
|
)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
def r(txn):
|
def r(txn):
|
||||||
txn.execute(select, (forward_chunk, self.batch_size,))
|
txn.execute(select, (forward_chunk, self.batch_size))
|
||||||
rows = txn.fetchall()
|
rows = txn.fetchall()
|
||||||
headers = [column[0] for column in txn.description]
|
headers = [column[0] for column in txn.description]
|
||||||
|
|
||||||
|
@ -402,18 +400,21 @@ class Porter(object):
|
||||||
else:
|
else:
|
||||||
rows_dict.append(d)
|
rows_dict.append(d)
|
||||||
|
|
||||||
txn.executemany(sql, [
|
txn.executemany(
|
||||||
(
|
sql,
|
||||||
row["event_id"],
|
[
|
||||||
row["room_id"],
|
(
|
||||||
row["key"],
|
row["event_id"],
|
||||||
row["sender"],
|
row["room_id"],
|
||||||
row["value"],
|
row["key"],
|
||||||
row["origin_server_ts"],
|
row["sender"],
|
||||||
row["stream_ordering"],
|
row["value"],
|
||||||
)
|
row["origin_server_ts"],
|
||||||
for row in rows_dict
|
row["stream_ordering"],
|
||||||
])
|
)
|
||||||
|
for row in rows_dict
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
self.postgres_store._simple_update_one_txn(
|
self.postgres_store._simple_update_one_txn(
|
||||||
txn,
|
txn,
|
||||||
|
@ -437,7 +438,8 @@ class Porter(object):
|
||||||
def setup_db(self, db_config, database_engine):
|
def setup_db(self, db_config, database_engine):
|
||||||
db_conn = database_engine.module.connect(
|
db_conn = database_engine.module.connect(
|
||||||
**{
|
**{
|
||||||
k: v for k, v in db_config.get("args", {}).items()
|
k: v
|
||||||
|
for k, v in db_config.get("args", {}).items()
|
||||||
if not k.startswith("cp_")
|
if not k.startswith("cp_")
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -450,13 +452,11 @@ class Porter(object):
|
||||||
def run(self):
|
def run(self):
|
||||||
try:
|
try:
|
||||||
sqlite_db_pool = adbapi.ConnectionPool(
|
sqlite_db_pool = adbapi.ConnectionPool(
|
||||||
self.sqlite_config["name"],
|
self.sqlite_config["name"], **self.sqlite_config["args"]
|
||||||
**self.sqlite_config["args"]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
postgres_db_pool = adbapi.ConnectionPool(
|
postgres_db_pool = adbapi.ConnectionPool(
|
||||||
self.postgres_config["name"],
|
self.postgres_config["name"], **self.postgres_config["args"]
|
||||||
**self.postgres_config["args"]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
sqlite_engine = create_engine(sqlite_config)
|
sqlite_engine = create_engine(sqlite_config)
|
||||||
|
@ -465,9 +465,7 @@ class Porter(object):
|
||||||
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
self.sqlite_store = Store(sqlite_db_pool, sqlite_engine)
|
||||||
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
self.postgres_store = Store(postgres_db_pool, postgres_engine)
|
||||||
|
|
||||||
yield self.postgres_store.execute(
|
yield self.postgres_store.execute(postgres_engine.check_database)
|
||||||
postgres_engine.check_database
|
|
||||||
)
|
|
||||||
|
|
||||||
# Step 1. Set up databases.
|
# Step 1. Set up databases.
|
||||||
self.progress.set_state("Preparing SQLite3")
|
self.progress.set_state("Preparing SQLite3")
|
||||||
|
@ -477,6 +475,7 @@ class Porter(object):
|
||||||
self.setup_db(postgres_config, postgres_engine)
|
self.setup_db(postgres_config, postgres_engine)
|
||||||
|
|
||||||
self.progress.set_state("Creating port tables")
|
self.progress.set_state("Creating port tables")
|
||||||
|
|
||||||
def create_port_table(txn):
|
def create_port_table(txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
"CREATE TABLE IF NOT EXISTS port_from_sqlite3 ("
|
||||||
|
@ -501,10 +500,9 @@ class Porter(object):
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield self.postgres_store.runInteraction(
|
yield self.postgres_store.runInteraction("alter_table", alter_table)
|
||||||
"alter_table", alter_table
|
except Exception:
|
||||||
)
|
# On Error Resume Next
|
||||||
except Exception as e:
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
yield self.postgres_store.runInteraction(
|
yield self.postgres_store.runInteraction(
|
||||||
|
@ -514,11 +512,7 @@ class Porter(object):
|
||||||
# Step 2. Get tables.
|
# Step 2. Get tables.
|
||||||
self.progress.set_state("Fetching tables")
|
self.progress.set_state("Fetching tables")
|
||||||
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
sqlite_tables = yield self.sqlite_store._simple_select_onecol(
|
||||||
table="sqlite_master",
|
table="sqlite_master", keyvalues={"type": "table"}, retcol="name"
|
||||||
keyvalues={
|
|
||||||
"type": "table",
|
|
||||||
},
|
|
||||||
retcol="name",
|
|
||||||
)
|
)
|
||||||
|
|
||||||
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
postgres_tables = yield self.postgres_store._simple_select_onecol(
|
||||||
|
@ -545,18 +539,14 @@ class Porter(object):
|
||||||
# Step 4. Do the copying.
|
# Step 4. Do the copying.
|
||||||
self.progress.set_state("Copying to postgres")
|
self.progress.set_state("Copying to postgres")
|
||||||
yield defer.gatherResults(
|
yield defer.gatherResults(
|
||||||
[
|
[self.handle_table(*res) for res in setup_res], consumeErrors=True
|
||||||
self.handle_table(*res)
|
|
||||||
for res in setup_res
|
|
||||||
],
|
|
||||||
consumeErrors=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Step 5. Do final post-processing
|
# Step 5. Do final post-processing
|
||||||
yield self._setup_state_group_id_seq()
|
yield self._setup_state_group_id_seq()
|
||||||
|
|
||||||
self.progress.done()
|
self.progress.done()
|
||||||
except:
|
except Exception:
|
||||||
global end_error_exec_info
|
global end_error_exec_info
|
||||||
end_error_exec_info = sys.exc_info()
|
end_error_exec_info = sys.exc_info()
|
||||||
logger.exception("")
|
logger.exception("")
|
||||||
|
@ -566,9 +556,7 @@ class Porter(object):
|
||||||
def _convert_rows(self, table, headers, rows):
|
def _convert_rows(self, table, headers, rows):
|
||||||
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
|
bool_col_names = BOOLEAN_COLUMNS.get(table, [])
|
||||||
|
|
||||||
bool_cols = [
|
bool_cols = [i for i, h in enumerate(headers) if h in bool_col_names]
|
||||||
i for i, h in enumerate(headers) if h in bool_col_names
|
|
||||||
]
|
|
||||||
|
|
||||||
class BadValueException(Exception):
|
class BadValueException(Exception):
|
||||||
pass
|
pass
|
||||||
|
@ -577,18 +565,21 @@ class Porter(object):
|
||||||
if j in bool_cols:
|
if j in bool_cols:
|
||||||
return bool(col)
|
return bool(col)
|
||||||
elif isinstance(col, string_types) and "\0" in col:
|
elif isinstance(col, string_types) and "\0" in col:
|
||||||
logger.warn("DROPPING ROW: NUL value in table %s col %s: %r", table, headers[j], col)
|
logger.warn(
|
||||||
raise BadValueException();
|
"DROPPING ROW: NUL value in table %s col %s: %r",
|
||||||
|
table,
|
||||||
|
headers[j],
|
||||||
|
col,
|
||||||
|
)
|
||||||
|
raise BadValueException()
|
||||||
return col
|
return col
|
||||||
|
|
||||||
outrows = []
|
outrows = []
|
||||||
for i, row in enumerate(rows):
|
for i, row in enumerate(rows):
|
||||||
try:
|
try:
|
||||||
outrows.append(tuple(
|
outrows.append(
|
||||||
conv(j, col)
|
tuple(conv(j, col) for j, col in enumerate(row) if j > 0)
|
||||||
for j, col in enumerate(row)
|
)
|
||||||
if j > 0
|
|
||||||
))
|
|
||||||
except BadValueException:
|
except BadValueException:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -616,9 +607,7 @@ class Porter(object):
|
||||||
|
|
||||||
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
return headers, [r for r in rows if r[ts_ind] < yesterday]
|
||||||
|
|
||||||
headers, rows = yield self.sqlite_store.runInteraction(
|
headers, rows = yield self.sqlite_store.runInteraction("select", r)
|
||||||
"select", r,
|
|
||||||
)
|
|
||||||
|
|
||||||
rows = self._convert_rows("sent_transactions", headers, rows)
|
rows = self._convert_rows("sent_transactions", headers, rows)
|
||||||
|
|
||||||
|
@ -639,7 +628,7 @@ class Porter(object):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
|
"SELECT rowid FROM sent_transactions WHERE ts >= ?"
|
||||||
" ORDER BY rowid ASC LIMIT 1",
|
" ORDER BY rowid ASC LIMIT 1",
|
||||||
(yesterday,)
|
(yesterday,),
|
||||||
)
|
)
|
||||||
|
|
||||||
rows = txn.fetchall()
|
rows = txn.fetchall()
|
||||||
|
@ -657,21 +646,17 @@ class Porter(object):
|
||||||
"table_name": "sent_transactions",
|
"table_name": "sent_transactions",
|
||||||
"forward_rowid": next_chunk,
|
"forward_rowid": next_chunk,
|
||||||
"backward_rowid": 0,
|
"backward_rowid": 0,
|
||||||
}
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_sent_table_size(txn):
|
def get_sent_table_size(txn):
|
||||||
txn.execute(
|
txn.execute(
|
||||||
"SELECT count(*) FROM sent_transactions"
|
"SELECT count(*) FROM sent_transactions" " WHERE ts >= ?", (yesterday,)
|
||||||
" WHERE ts >= ?",
|
|
||||||
(yesterday,)
|
|
||||||
)
|
)
|
||||||
size, = txn.fetchone()
|
size, = txn.fetchone()
|
||||||
return int(size)
|
return int(size)
|
||||||
|
|
||||||
remaining_count = yield self.sqlite_store.execute(
|
remaining_count = yield self.sqlite_store.execute(get_sent_table_size)
|
||||||
get_sent_table_size
|
|
||||||
)
|
|
||||||
|
|
||||||
total_count = remaining_count + inserted_rows
|
total_count = remaining_count + inserted_rows
|
||||||
|
|
||||||
|
@ -680,13 +665,11 @@ class Porter(object):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
def _get_remaining_count_to_port(self, table, forward_chunk, backward_chunk):
|
||||||
frows = yield self.sqlite_store.execute_sql(
|
frows = yield self.sqlite_store.execute_sql(
|
||||||
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,),
|
"SELECT count(*) FROM %s WHERE rowid >= ?" % (table,), forward_chunk
|
||||||
forward_chunk,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
brows = yield self.sqlite_store.execute_sql(
|
brows = yield self.sqlite_store.execute_sql(
|
||||||
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,),
|
"SELECT count(*) FROM %s WHERE rowid <= ?" % (table,), backward_chunk
|
||||||
backward_chunk,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(frows[0][0] + brows[0][0])
|
defer.returnValue(frows[0][0] + brows[0][0])
|
||||||
|
@ -694,7 +677,7 @@ class Porter(object):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _get_already_ported_count(self, table):
|
def _get_already_ported_count(self, table):
|
||||||
rows = yield self.postgres_store.execute_sql(
|
rows = yield self.postgres_store.execute_sql(
|
||||||
"SELECT count(*) FROM %s" % (table,),
|
"SELECT count(*) FROM %s" % (table,)
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(rows[0][0])
|
defer.returnValue(rows[0][0])
|
||||||
|
@ -717,22 +700,21 @@ class Porter(object):
|
||||||
def _setup_state_group_id_seq(self):
|
def _setup_state_group_id_seq(self):
|
||||||
def r(txn):
|
def r(txn):
|
||||||
txn.execute("SELECT MAX(id) FROM state_groups")
|
txn.execute("SELECT MAX(id) FROM state_groups")
|
||||||
next_id = txn.fetchone()[0]+1
|
next_id = txn.fetchone()[0] + 1
|
||||||
txn.execute(
|
txn.execute("ALTER SEQUENCE state_group_id_seq RESTART WITH %s", (next_id,))
|
||||||
"ALTER SEQUENCE state_group_id_seq RESTART WITH %s",
|
|
||||||
(next_id,),
|
|
||||||
)
|
|
||||||
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
return self.postgres_store.runInteraction("setup_state_group_id_seq", r)
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
##############################################
|
||||||
###### The following is simply UI stuff ######
|
# The following is simply UI stuff
|
||||||
##############################################
|
##############################################
|
||||||
|
|
||||||
|
|
||||||
class Progress(object):
|
class Progress(object):
|
||||||
"""Used to report progress of the port
|
"""Used to report progress of the port
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.tables = {}
|
self.tables = {}
|
||||||
|
|
||||||
|
@ -758,6 +740,7 @@ class Progress(object):
|
||||||
class CursesProgress(Progress):
|
class CursesProgress(Progress):
|
||||||
"""Reports progress to a curses window
|
"""Reports progress to a curses window
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, stdscr):
|
def __init__(self, stdscr):
|
||||||
self.stdscr = stdscr
|
self.stdscr = stdscr
|
||||||
|
|
||||||
|
@ -801,7 +784,7 @@ class CursesProgress(Progress):
|
||||||
duration = int(now) - int(self.start_time)
|
duration = int(now) - int(self.start_time)
|
||||||
|
|
||||||
minutes, seconds = divmod(duration, 60)
|
minutes, seconds = divmod(duration, 60)
|
||||||
duration_str = '%02dm %02ds' % (minutes, seconds,)
|
duration_str = '%02dm %02ds' % (minutes, seconds)
|
||||||
|
|
||||||
if self.finished:
|
if self.finished:
|
||||||
status = "Time spent: %s (Done!)" % (duration_str,)
|
status = "Time spent: %s (Done!)" % (duration_str,)
|
||||||
|
@ -814,16 +797,12 @@ class CursesProgress(Progress):
|
||||||
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
|
est_remaining_str = '%02dm %02ds remaining' % divmod(est_remaining, 60)
|
||||||
else:
|
else:
|
||||||
est_remaining_str = "Unknown"
|
est_remaining_str = "Unknown"
|
||||||
status = (
|
status = "Time spent: %s (est. remaining: %s)" % (
|
||||||
"Time spent: %s (est. remaining: %s)"
|
duration_str,
|
||||||
% (duration_str, est_remaining_str,)
|
est_remaining_str,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(0, 0, status, curses.A_BOLD)
|
||||||
0, 0,
|
|
||||||
status,
|
|
||||||
curses.A_BOLD,
|
|
||||||
)
|
|
||||||
|
|
||||||
max_len = max([len(t) for t in self.tables.keys()])
|
max_len = max([len(t) for t in self.tables.keys()])
|
||||||
|
|
||||||
|
@ -831,9 +810,7 @@ class CursesProgress(Progress):
|
||||||
middle_space = 1
|
middle_space = 1
|
||||||
|
|
||||||
items = self.tables.items()
|
items = self.tables.items()
|
||||||
items.sort(
|
items.sort(key=lambda i: (i[1]["perc"], i[0]))
|
||||||
key=lambda i: (i[1]["perc"], i[0]),
|
|
||||||
)
|
|
||||||
|
|
||||||
for i, (table, data) in enumerate(items):
|
for i, (table, data) in enumerate(items):
|
||||||
if i + 2 >= rows:
|
if i + 2 >= rows:
|
||||||
|
@ -844,9 +821,7 @@ class CursesProgress(Progress):
|
||||||
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
color = curses.color_pair(2) if perc == 100 else curses.color_pair(1)
|
||||||
|
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(
|
||||||
i + 2, left_margin + max_len - len(table),
|
i + 2, left_margin + max_len - len(table), table, curses.A_BOLD | color
|
||||||
table,
|
|
||||||
curses.A_BOLD | color,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
size = 20
|
size = 20
|
||||||
|
@ -857,15 +832,13 @@ class CursesProgress(Progress):
|
||||||
)
|
)
|
||||||
|
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(
|
||||||
i + 2, left_margin + max_len + middle_space,
|
i + 2,
|
||||||
|
left_margin + max_len + middle_space,
|
||||||
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
"%s %3d%% (%d/%d)" % (progress, perc, data["num_done"], data["total"]),
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.finished:
|
if self.finished:
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(rows - 1, 0, "Press any key to exit...")
|
||||||
rows - 1, 0,
|
|
||||||
"Press any key to exit...",
|
|
||||||
)
|
|
||||||
|
|
||||||
self.stdscr.refresh()
|
self.stdscr.refresh()
|
||||||
self.last_update = time.time()
|
self.last_update = time.time()
|
||||||
|
@ -877,29 +850,25 @@ class CursesProgress(Progress):
|
||||||
|
|
||||||
def set_state(self, state):
|
def set_state(self, state):
|
||||||
self.stdscr.clear()
|
self.stdscr.clear()
|
||||||
self.stdscr.addstr(
|
self.stdscr.addstr(0, 0, state + "...", curses.A_BOLD)
|
||||||
0, 0,
|
|
||||||
state + "...",
|
|
||||||
curses.A_BOLD,
|
|
||||||
)
|
|
||||||
self.stdscr.refresh()
|
self.stdscr.refresh()
|
||||||
|
|
||||||
|
|
||||||
class TerminalProgress(Progress):
|
class TerminalProgress(Progress):
|
||||||
"""Just prints progress to the terminal
|
"""Just prints progress to the terminal
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def update(self, table, num_done):
|
def update(self, table, num_done):
|
||||||
super(TerminalProgress, self).update(table, num_done)
|
super(TerminalProgress, self).update(table, num_done)
|
||||||
|
|
||||||
data = self.tables[table]
|
data = self.tables[table]
|
||||||
|
|
||||||
print "%s: %d%% (%d/%d)" % (
|
print(
|
||||||
table, data["perc"],
|
"%s: %d%% (%d/%d)" % (table, data["perc"], data["num_done"], data["total"])
|
||||||
data["num_done"], data["total"],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def set_state(self, state):
|
def set_state(self, state):
|
||||||
print state + "..."
|
print(state + "...")
|
||||||
|
|
||||||
|
|
||||||
##############################################
|
##############################################
|
||||||
|
@ -909,34 +878,38 @@ class TerminalProgress(Progress):
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description="A script to port an existing synapse SQLite database to"
|
description="A script to port an existing synapse SQLite database to"
|
||||||
" a new PostgreSQL database."
|
" a new PostgreSQL database."
|
||||||
)
|
)
|
||||||
parser.add_argument("-v", action='store_true')
|
parser.add_argument("-v", action='store_true')
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--sqlite-database", required=True,
|
"--sqlite-database",
|
||||||
|
required=True,
|
||||||
help="The snapshot of the SQLite database file. This must not be"
|
help="The snapshot of the SQLite database file. This must not be"
|
||||||
" currently used by a running synapse server"
|
" currently used by a running synapse server",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--postgres-config", type=argparse.FileType('r'), required=True,
|
"--postgres-config",
|
||||||
help="The database config file for the PostgreSQL database"
|
type=argparse.FileType('r'),
|
||||||
|
required=True,
|
||||||
|
help="The database config file for the PostgreSQL database",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--curses", action='store_true',
|
"--curses", action='store_true', help="display a curses based progress UI"
|
||||||
help="display a curses based progress UI"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--batch-size", type=int, default=1000,
|
"--batch-size",
|
||||||
|
type=int,
|
||||||
|
default=1000,
|
||||||
help="The number of rows to select from the SQLite table each"
|
help="The number of rows to select from the SQLite table each"
|
||||||
" iteration [default=1000]",
|
" iteration [default=1000]",
|
||||||
)
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
logging_config = {
|
logging_config = {
|
||||||
"level": logging.DEBUG if args.v else logging.INFO,
|
"level": logging.DEBUG if args.v else logging.INFO,
|
||||||
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s"
|
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.curses:
|
if args.curses:
|
||||||
|
|
16
setup.cfg
16
setup.cfg
|
@ -14,17 +14,17 @@ ignore =
|
||||||
pylint.cfg
|
pylint.cfg
|
||||||
tox.ini
|
tox.ini
|
||||||
|
|
||||||
[pep8]
|
|
||||||
max-line-length = 90
|
|
||||||
# W503 requires that binary operators be at the end, not start, of lines. Erik
|
|
||||||
# doesn't like it. E203 is contrary to PEP8.
|
|
||||||
ignore = W503,E203
|
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
# note that flake8 inherits the "ignore" settings from "pep8" (because it uses
|
|
||||||
# pep8 to do those checks), but not the "max-line-length" setting
|
|
||||||
max-line-length = 90
|
max-line-length = 90
|
||||||
|
|
||||||
|
# see https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes
|
||||||
|
# for error codes. The ones we ignore are:
|
||||||
|
# W503: line break before binary operator
|
||||||
|
# W504: line break after binary operator
|
||||||
|
# E203: whitespace before ':' (which is contrary to pep8?)
|
||||||
|
# E731: do not assign a lambda expression, use a def
|
||||||
|
ignore=W503,W504,E203,E731
|
||||||
|
|
||||||
[isort]
|
[isort]
|
||||||
line_length = 89
|
line_length = 89
|
||||||
not_skip = __init__.py
|
not_skip = __init__.py
|
||||||
|
|
6
setup.py
6
setup.py
|
@ -1,6 +1,8 @@
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
# Copyright 2014-2017 OpenMarket Ltd
|
||||||
|
# Copyright 2017 Vector Creations Ltd
|
||||||
|
# Copyright 2017-2018 New Vector Ltd
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
@ -86,7 +88,7 @@ setup(
|
||||||
name="matrix-synapse",
|
name="matrix-synapse",
|
||||||
version=version,
|
version=version,
|
||||||
packages=find_packages(exclude=["tests", "tests.*"]),
|
packages=find_packages(exclude=["tests", "tests.*"]),
|
||||||
description="Reference Synapse Home Server",
|
description="Reference homeserver for the Matrix decentralised comms protocol",
|
||||||
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
install_requires=dependencies['requirements'](include_conditional=True).keys(),
|
||||||
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
dependency_links=dependencies["DEPENDENCY_LINKS"].values(),
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
|
|
|
@ -17,4 +17,14 @@
|
||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.33.3"
|
try:
|
||||||
|
from twisted.internet import protocol
|
||||||
|
from twisted.internet.protocol import Factory
|
||||||
|
from twisted.names.dns import DNSDatagramProtocol
|
||||||
|
protocol.Factory.noisy = False
|
||||||
|
Factory.noisy = False
|
||||||
|
DNSDatagramProtocol.noisy = False
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
__version__ = "0.33.8"
|
||||||
|
|
0
synapse/_scripts/__init__.py
Normal file
0
synapse/_scripts/__init__.py
Normal file
215
synapse/_scripts/register_new_matrix_user.py
Normal file
215
synapse/_scripts/register_new_matrix_user.py
Normal file
|
@ -0,0 +1,215 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015, 2016 OpenMarket Ltd
|
||||||
|
# Copyright 2018 New Vector
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import getpass
|
||||||
|
import hashlib
|
||||||
|
import hmac
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from six.moves import input
|
||||||
|
|
||||||
|
import requests as _requests
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
def request_registration(
|
||||||
|
user,
|
||||||
|
password,
|
||||||
|
server_location,
|
||||||
|
shared_secret,
|
||||||
|
admin=False,
|
||||||
|
requests=_requests,
|
||||||
|
_print=print,
|
||||||
|
exit=sys.exit,
|
||||||
|
):
|
||||||
|
|
||||||
|
url = "%s/_matrix/client/r0/admin/register" % (server_location,)
|
||||||
|
|
||||||
|
# Get the nonce
|
||||||
|
r = requests.get(url, verify=False)
|
||||||
|
|
||||||
|
if r.status_code is not 200:
|
||||||
|
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||||
|
if 400 <= r.status_code < 500:
|
||||||
|
try:
|
||||||
|
_print(r.json()["error"])
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return exit(1)
|
||||||
|
|
||||||
|
nonce = r.json()["nonce"]
|
||||||
|
|
||||||
|
mac = hmac.new(key=shared_secret.encode('utf8'), digestmod=hashlib.sha1)
|
||||||
|
|
||||||
|
mac.update(nonce.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(user.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(password.encode('utf8'))
|
||||||
|
mac.update(b"\x00")
|
||||||
|
mac.update(b"admin" if admin else b"notadmin")
|
||||||
|
|
||||||
|
mac = mac.hexdigest()
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"nonce": nonce,
|
||||||
|
"username": user,
|
||||||
|
"password": password,
|
||||||
|
"mac": mac,
|
||||||
|
"admin": admin,
|
||||||
|
}
|
||||||
|
|
||||||
|
_print("Sending registration request...")
|
||||||
|
r = requests.post(url, json=data, verify=False)
|
||||||
|
|
||||||
|
if r.status_code is not 200:
|
||||||
|
_print("ERROR! Received %d %s" % (r.status_code, r.reason))
|
||||||
|
if 400 <= r.status_code < 500:
|
||||||
|
try:
|
||||||
|
_print(r.json()["error"])
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return exit(1)
|
||||||
|
|
||||||
|
_print("Success!")
|
||||||
|
|
||||||
|
|
||||||
|
def register_new_user(user, password, server_location, shared_secret, admin):
|
||||||
|
if not user:
|
||||||
|
try:
|
||||||
|
default_user = getpass.getuser()
|
||||||
|
except Exception:
|
||||||
|
default_user = None
|
||||||
|
|
||||||
|
if default_user:
|
||||||
|
user = input("New user localpart [%s]: " % (default_user,))
|
||||||
|
if not user:
|
||||||
|
user = default_user
|
||||||
|
else:
|
||||||
|
user = input("New user localpart: ")
|
||||||
|
|
||||||
|
if not user:
|
||||||
|
print("Invalid user name")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if not password:
|
||||||
|
password = getpass.getpass("Password: ")
|
||||||
|
|
||||||
|
if not password:
|
||||||
|
print("Password cannot be blank.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
confirm_password = getpass.getpass("Confirm password: ")
|
||||||
|
|
||||||
|
if password != confirm_password:
|
||||||
|
print("Passwords do not match")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if admin is None:
|
||||||
|
admin = input("Make admin [no]: ")
|
||||||
|
if admin in ("y", "yes", "true"):
|
||||||
|
admin = True
|
||||||
|
else:
|
||||||
|
admin = False
|
||||||
|
|
||||||
|
request_registration(user, password, server_location, shared_secret, bool(admin))
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
logging.captureWarnings(True)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Used to register new users with a given home server when"
|
||||||
|
" registration has been disabled. The home server must be"
|
||||||
|
" configured with the 'registration_shared_secret' option"
|
||||||
|
" set."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-u",
|
||||||
|
"--user",
|
||||||
|
default=None,
|
||||||
|
help="Local part of the new user. Will prompt if omitted.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-p",
|
||||||
|
"--password",
|
||||||
|
default=None,
|
||||||
|
help="New password for user. Will prompt if omitted.",
|
||||||
|
)
|
||||||
|
admin_group = parser.add_mutually_exclusive_group()
|
||||||
|
admin_group.add_argument(
|
||||||
|
"-a",
|
||||||
|
"--admin",
|
||||||
|
action="store_true",
|
||||||
|
help=(
|
||||||
|
"Register new user as an admin. "
|
||||||
|
"Will prompt if --no-admin is not set either."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
admin_group.add_argument(
|
||||||
|
"--no-admin",
|
||||||
|
action="store_true",
|
||||||
|
help=(
|
||||||
|
"Register new user as a regular user. "
|
||||||
|
"Will prompt if --admin is not set either."
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
group = parser.add_mutually_exclusive_group(required=True)
|
||||||
|
group.add_argument(
|
||||||
|
"-c",
|
||||||
|
"--config",
|
||||||
|
type=argparse.FileType('r'),
|
||||||
|
help="Path to server config file. Used to read in shared secret.",
|
||||||
|
)
|
||||||
|
|
||||||
|
group.add_argument(
|
||||||
|
"-k", "--shared-secret", help="Shared secret as defined in server config file."
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"server_url",
|
||||||
|
default="https://localhost:8448",
|
||||||
|
nargs='?',
|
||||||
|
help="URL to use to talk to the home server. Defaults to "
|
||||||
|
" 'https://localhost:8448'.",
|
||||||
|
)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if "config" in args and args.config:
|
||||||
|
config = yaml.safe_load(args.config)
|
||||||
|
secret = config.get("registration_shared_secret", None)
|
||||||
|
if not secret:
|
||||||
|
print("No 'registration_shared_secret' defined in config.")
|
||||||
|
sys.exit(1)
|
||||||
|
else:
|
||||||
|
secret = args.shared_secret
|
||||||
|
|
||||||
|
admin = None
|
||||||
|
if args.admin or args.no_admin:
|
||||||
|
admin = args.admin
|
||||||
|
|
||||||
|
register_new_user(args.user, args.password, args.server_url, secret, admin)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
|
@ -26,6 +26,7 @@ import synapse.types
|
||||||
from synapse import event_auth
|
from synapse import event_auth
|
||||||
from synapse.api.constants import EventTypes, JoinRules, Membership
|
from synapse.api.constants import EventTypes, JoinRules, Membership
|
||||||
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
from synapse.api.errors import AuthError, Codes, ResourceLimitError
|
||||||
|
from synapse.config.server import is_threepid_reserved
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
from synapse.util.caches import CACHE_SIZE_FACTOR, register_cache
|
||||||
from synapse.util.caches.lrucache import LruCache
|
from synapse.util.caches.lrucache import LruCache
|
||||||
|
@ -775,34 +776,56 @@ class Auth(object):
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def check_auth_blocking(self, user_id=None):
|
def check_auth_blocking(self, user_id=None, threepid=None):
|
||||||
"""Checks if the user should be rejected for some external reason,
|
"""Checks if the user should be rejected for some external reason,
|
||||||
such as monthly active user limiting or global disable flag
|
such as monthly active user limiting or global disable flag
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
user_id(str|None): If present, checks for presence against existing
|
user_id(str|None): If present, checks for presence against existing
|
||||||
MAU cohort
|
MAU cohort
|
||||||
|
|
||||||
|
threepid(dict|None): If present, checks for presence against configured
|
||||||
|
reserved threepid. Used in cases where the user is trying register
|
||||||
|
with a MAU blocked server, normally they would be rejected but their
|
||||||
|
threepid is on the reserved list. user_id and
|
||||||
|
threepid should never be set at the same time.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Never fail an auth check for the server notices users
|
||||||
|
# This can be a problem where event creation is prohibited due to blocking
|
||||||
|
if user_id == self.hs.config.server_notices_mxid:
|
||||||
|
return
|
||||||
|
|
||||||
if self.hs.config.hs_disabled:
|
if self.hs.config.hs_disabled:
|
||||||
raise ResourceLimitError(
|
raise ResourceLimitError(
|
||||||
403, self.hs.config.hs_disabled_message,
|
403, self.hs.config.hs_disabled_message,
|
||||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||||
admin_uri=self.hs.config.admin_uri,
|
admin_contact=self.hs.config.admin_contact,
|
||||||
limit_type=self.hs.config.hs_disabled_limit_type
|
limit_type=self.hs.config.hs_disabled_limit_type
|
||||||
)
|
)
|
||||||
if self.hs.config.limit_usage_by_mau is True:
|
if self.hs.config.limit_usage_by_mau is True:
|
||||||
# If the user is already part of the MAU cohort
|
assert not (user_id and threepid)
|
||||||
|
|
||||||
|
# If the user is already part of the MAU cohort or a trial user
|
||||||
if user_id:
|
if user_id:
|
||||||
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
timestamp = yield self.store.user_last_seen_monthly_active(user_id)
|
||||||
if timestamp:
|
if timestamp:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
is_trial = yield self.store.is_trial_user(user_id)
|
||||||
|
if is_trial:
|
||||||
|
return
|
||||||
|
elif threepid:
|
||||||
|
# If the user does not exist yet, but is signing up with a
|
||||||
|
# reserved threepid then pass auth check
|
||||||
|
if is_threepid_reserved(self.hs.config, threepid):
|
||||||
|
return
|
||||||
# Else if there is no room in the MAU bucket, bail
|
# Else if there is no room in the MAU bucket, bail
|
||||||
current_mau = yield self.store.get_monthly_active_count()
|
current_mau = yield self.store.get_monthly_active_count()
|
||||||
if current_mau >= self.hs.config.max_mau_value:
|
if current_mau >= self.hs.config.max_mau_value:
|
||||||
raise ResourceLimitError(
|
raise ResourceLimitError(
|
||||||
403, "Monthly Active User Limit Exceeded",
|
403, "Monthly Active User Limit Exceeded",
|
||||||
|
admin_contact=self.hs.config.admin_contact,
|
||||||
admin_uri=self.hs.config.admin_uri,
|
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
|
||||||
limit_type="monthly_active_user"
|
limit_type="monthly_active_user"
|
||||||
)
|
)
|
||||||
|
|
|
@ -79,6 +79,7 @@ class EventTypes(object):
|
||||||
Name = "m.room.name"
|
Name = "m.room.name"
|
||||||
|
|
||||||
ServerACL = "m.room.server_acl"
|
ServerACL = "m.room.server_acl"
|
||||||
|
Pinned = "m.room.pinned_events"
|
||||||
|
|
||||||
|
|
||||||
class RejectedReason(object):
|
class RejectedReason(object):
|
||||||
|
@ -98,9 +99,17 @@ class ThirdPartyEntityKind(object):
|
||||||
LOCATION = "location"
|
LOCATION = "location"
|
||||||
|
|
||||||
|
|
||||||
|
class RoomVersions(object):
|
||||||
|
V1 = "1"
|
||||||
|
VDH_TEST = "vdh-test-version"
|
||||||
|
|
||||||
|
|
||||||
# the version we will give rooms which are created on this server
|
# the version we will give rooms which are created on this server
|
||||||
DEFAULT_ROOM_VERSION = "1"
|
DEFAULT_ROOM_VERSION = RoomVersions.V1
|
||||||
|
|
||||||
# vdh-test-version is a placeholder to get room versioning support working and tested
|
# vdh-test-version is a placeholder to get room versioning support working and tested
|
||||||
# until we have a working v2.
|
# until we have a working v2.
|
||||||
KNOWN_ROOM_VERSIONS = {"1", "vdh-test-version"}
|
KNOWN_ROOM_VERSIONS = {RoomVersions.V1, RoomVersions.VDH_TEST}
|
||||||
|
|
||||||
|
ServerNoticeMsgType = "m.server_notice"
|
||||||
|
ServerNoticeLimitReached = "m.server_notice.usage_limit_reached"
|
||||||
|
|
|
@ -56,9 +56,10 @@ class Codes(object):
|
||||||
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
SERVER_NOT_TRUSTED = "M_SERVER_NOT_TRUSTED"
|
||||||
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
CONSENT_NOT_GIVEN = "M_CONSENT_NOT_GIVEN"
|
||||||
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
CANNOT_LEAVE_SERVER_NOTICE_ROOM = "M_CANNOT_LEAVE_SERVER_NOTICE_ROOM"
|
||||||
RESOURCE_LIMIT_EXCEED = "M_RESOURCE_LIMIT_EXCEED"
|
RESOURCE_LIMIT_EXCEEDED = "M_RESOURCE_LIMIT_EXCEEDED"
|
||||||
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
UNSUPPORTED_ROOM_VERSION = "M_UNSUPPORTED_ROOM_VERSION"
|
||||||
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
INCOMPATIBLE_ROOM_VERSION = "M_INCOMPATIBLE_ROOM_VERSION"
|
||||||
|
WRONG_ROOM_KEYS_VERSION = "M_WRONG_ROOM_KEYS_VERSION"
|
||||||
|
|
||||||
|
|
||||||
class CodeMessageException(RuntimeError):
|
class CodeMessageException(RuntimeError):
|
||||||
|
@ -238,11 +239,11 @@ class ResourceLimitError(SynapseError):
|
||||||
"""
|
"""
|
||||||
def __init__(
|
def __init__(
|
||||||
self, code, msg,
|
self, code, msg,
|
||||||
errcode=Codes.RESOURCE_LIMIT_EXCEED,
|
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
|
||||||
admin_uri=None,
|
admin_contact=None,
|
||||||
limit_type=None,
|
limit_type=None,
|
||||||
):
|
):
|
||||||
self.admin_uri = admin_uri
|
self.admin_contact = admin_contact
|
||||||
self.limit_type = limit_type
|
self.limit_type = limit_type
|
||||||
super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
|
super(ResourceLimitError, self).__init__(code, msg, errcode=errcode)
|
||||||
|
|
||||||
|
@ -250,7 +251,7 @@ class ResourceLimitError(SynapseError):
|
||||||
return cs_error(
|
return cs_error(
|
||||||
self.msg,
|
self.msg,
|
||||||
self.errcode,
|
self.errcode,
|
||||||
admin_uri=self.admin_uri,
|
admin_contact=self.admin_contact,
|
||||||
limit_type=self.limit_type
|
limit_type=self.limit_type
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -312,6 +313,20 @@ class LimitExceededError(SynapseError):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RoomKeysVersionError(SynapseError):
|
||||||
|
"""A client has tried to upload to a non-current version of the room_keys store
|
||||||
|
"""
|
||||||
|
def __init__(self, current_version):
|
||||||
|
"""
|
||||||
|
Args:
|
||||||
|
current_version (str): the current version of the store they should have used
|
||||||
|
"""
|
||||||
|
super(RoomKeysVersionError, self).__init__(
|
||||||
|
403, "Wrong room_keys version", Codes.WRONG_ROOM_KEYS_VERSION
|
||||||
|
)
|
||||||
|
self.current_version = current_version
|
||||||
|
|
||||||
|
|
||||||
class IncompatibleRoomVersionError(SynapseError):
|
class IncompatibleRoomVersionError(SynapseError):
|
||||||
"""A server is trying to join a room whose version it does not support."""
|
"""A server is trying to join a room whose version it does not support."""
|
||||||
|
|
||||||
|
|
|
@ -172,7 +172,10 @@ USER_FILTER_SCHEMA = {
|
||||||
# events a lot easier as we can then use a negative lookbehind
|
# events a lot easier as we can then use a negative lookbehind
|
||||||
# assertion to split '\.' If we allowed \\ then it would
|
# assertion to split '\.' If we allowed \\ then it would
|
||||||
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
# incorrectly split '\\.' See synapse.events.utils.serialize_event
|
||||||
"pattern": "^((?!\\\).)*$"
|
#
|
||||||
|
# Note that because this is a regular expression, we have to escape
|
||||||
|
# each backslash in the pattern.
|
||||||
|
"pattern": r"^((?!\\\\).)*$"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -226,7 +229,7 @@ class Filtering(object):
|
||||||
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
jsonschema.validate(user_filter_json, USER_FILTER_SCHEMA,
|
||||||
format_checker=FormatChecker())
|
format_checker=FormatChecker())
|
||||||
except jsonschema.ValidationError as e:
|
except jsonschema.ValidationError as e:
|
||||||
raise SynapseError(400, e.message)
|
raise SynapseError(400, str(e))
|
||||||
|
|
||||||
|
|
||||||
class FilterCollection(object):
|
class FilterCollection(object):
|
||||||
|
@ -251,6 +254,7 @@ class FilterCollection(object):
|
||||||
"include_leave", False
|
"include_leave", False
|
||||||
)
|
)
|
||||||
self.event_fields = filter_json.get("event_fields", [])
|
self.event_fields = filter_json.get("event_fields", [])
|
||||||
|
self.event_format = filter_json.get("event_format", "client")
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
return "<FilterCollection %s>" % (json.dumps(self._filter_json),)
|
||||||
|
|
|
@ -64,7 +64,7 @@ class ConsentURIBuilder(object):
|
||||||
"""
|
"""
|
||||||
mac = hmac.new(
|
mac = hmac.new(
|
||||||
key=self._hmac_secret,
|
key=self._hmac_secret,
|
||||||
msg=user_id,
|
msg=user_id.encode('ascii'),
|
||||||
digestmod=sha256,
|
digestmod=sha256,
|
||||||
).hexdigest()
|
).hexdigest()
|
||||||
consent_uri = "%s_matrix/consent?%s" % (
|
consent_uri = "%s_matrix/consent?%s" % (
|
||||||
|
|
|
@ -24,7 +24,7 @@ try:
|
||||||
python_dependencies.check_requirements()
|
python_dependencies.check_requirements()
|
||||||
except python_dependencies.MissingRequirementError as e:
|
except python_dependencies.MissingRequirementError as e:
|
||||||
message = "\n".join([
|
message = "\n".join([
|
||||||
"Missing Requirement: %s" % (e.message,),
|
"Missing Requirement: %s" % (str(e),),
|
||||||
"To install run:",
|
"To install run:",
|
||||||
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
" pip install --upgrade --force \"%s\"" % (e.dependency,),
|
||||||
"",
|
"",
|
||||||
|
|
|
@ -17,6 +17,7 @@ import gc
|
||||||
import logging
|
import logging
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
|
import psutil
|
||||||
from daemonize import Daemonize
|
from daemonize import Daemonize
|
||||||
|
|
||||||
from twisted.internet import error, reactor
|
from twisted.internet import error, reactor
|
||||||
|
@ -24,12 +25,6 @@ from twisted.internet import error, reactor
|
||||||
from synapse.util import PreserveLoggingContext
|
from synapse.util import PreserveLoggingContext
|
||||||
from synapse.util.rlimit import change_resource_limit
|
from synapse.util.rlimit import change_resource_limit
|
||||||
|
|
||||||
try:
|
|
||||||
import affinity
|
|
||||||
except Exception:
|
|
||||||
affinity = None
|
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@ -89,15 +84,20 @@ def start_reactor(
|
||||||
with PreserveLoggingContext():
|
with PreserveLoggingContext():
|
||||||
logger.info("Running")
|
logger.info("Running")
|
||||||
if cpu_affinity is not None:
|
if cpu_affinity is not None:
|
||||||
if not affinity:
|
# Turn the bitmask into bits, reverse it so we go from 0 up
|
||||||
quit_with_error(
|
mask_to_bits = bin(cpu_affinity)[2:][::-1]
|
||||||
"Missing package 'affinity' required for cpu_affinity\n"
|
|
||||||
"option\n\n"
|
cpus = []
|
||||||
"Install by running:\n\n"
|
cpu_num = 0
|
||||||
" pip install affinity\n\n"
|
|
||||||
)
|
for i in mask_to_bits:
|
||||||
logger.info("Setting CPU affinity to %s" % cpu_affinity)
|
if i == "1":
|
||||||
affinity.set_process_affinity_mask(0, cpu_affinity)
|
cpus.append(cpu_num)
|
||||||
|
cpu_num += 1
|
||||||
|
|
||||||
|
p = psutil.Process()
|
||||||
|
p.cpu_affinity(cpus)
|
||||||
|
|
||||||
change_resource_limit(soft_file_limit)
|
change_resource_limit(soft_file_limit)
|
||||||
if gc_thresholds:
|
if gc_thresholds:
|
||||||
gc.set_threshold(*gc_thresholds)
|
gc.set_threshold(*gc_thresholds)
|
||||||
|
|
|
@ -51,10 +51,7 @@ class AppserviceSlaveStore(
|
||||||
|
|
||||||
|
|
||||||
class AppserviceServer(HomeServer):
|
class AppserviceServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = AppserviceSlaveStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = AppserviceSlaveStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -139,7 +136,7 @@ def start(config_options):
|
||||||
"Synapse appservice", config_options
|
"Synapse appservice", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.appservice"
|
assert config.worker_app == "synapse.app.appservice"
|
||||||
|
@ -175,7 +172,6 @@ def start(config_options):
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
|
|
@ -74,10 +74,7 @@ class ClientReaderSlavedStore(
|
||||||
|
|
||||||
|
|
||||||
class ClientReaderServer(HomeServer):
|
class ClientReaderServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = ClientReaderSlavedStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -156,7 +153,7 @@ def start(config_options):
|
||||||
"Synapse client reader", config_options
|
"Synapse client reader", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.client_reader"
|
assert config.worker_app == "synapse.app.client_reader"
|
||||||
|
@ -184,7 +181,6 @@ def start(config_options):
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
|
@ -45,6 +45,11 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
|
||||||
from synapse.replication.slave.storage.room import RoomStore
|
from synapse.replication.slave.storage.room import RoomStore
|
||||||
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
from synapse.replication.slave.storage.transactions import SlavedTransactionStore
|
||||||
from synapse.replication.tcp.client import ReplicationClientHandler
|
from synapse.replication.tcp.client import ReplicationClientHandler
|
||||||
|
from synapse.rest.client.v1.profile import (
|
||||||
|
ProfileAvatarURLRestServlet,
|
||||||
|
ProfileDisplaynameRestServlet,
|
||||||
|
ProfileRestServlet,
|
||||||
|
)
|
||||||
from synapse.rest.client.v1.room import (
|
from synapse.rest.client.v1.room import (
|
||||||
JoinRoomAliasServlet,
|
JoinRoomAliasServlet,
|
||||||
RoomMembershipRestServlet,
|
RoomMembershipRestServlet,
|
||||||
|
@ -53,6 +58,7 @@ from synapse.rest.client.v1.room import (
|
||||||
)
|
)
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage.engines import create_engine
|
from synapse.storage.engines import create_engine
|
||||||
|
from synapse.storage.user_directory import UserDirectoryStore
|
||||||
from synapse.util.httpresourcetree import create_resource_tree
|
from synapse.util.httpresourcetree import create_resource_tree
|
||||||
from synapse.util.logcontext import LoggingContext
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.manhole import manhole
|
from synapse.util.manhole import manhole
|
||||||
|
@ -62,6 +68,9 @@ logger = logging.getLogger("synapse.app.event_creator")
|
||||||
|
|
||||||
|
|
||||||
class EventCreatorSlavedStore(
|
class EventCreatorSlavedStore(
|
||||||
|
# FIXME(#3714): We need to add UserDirectoryStore as we write directly
|
||||||
|
# rather than going via the correct worker.
|
||||||
|
UserDirectoryStore,
|
||||||
DirectoryStore,
|
DirectoryStore,
|
||||||
SlavedTransactionStore,
|
SlavedTransactionStore,
|
||||||
SlavedProfileStore,
|
SlavedProfileStore,
|
||||||
|
@ -81,10 +90,7 @@ class EventCreatorSlavedStore(
|
||||||
|
|
||||||
|
|
||||||
class EventCreatorServer(HomeServer):
|
class EventCreatorServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = EventCreatorSlavedStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = EventCreatorSlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -101,6 +107,9 @@ class EventCreatorServer(HomeServer):
|
||||||
RoomMembershipRestServlet(self).register(resource)
|
RoomMembershipRestServlet(self).register(resource)
|
||||||
RoomStateEventRestServlet(self).register(resource)
|
RoomStateEventRestServlet(self).register(resource)
|
||||||
JoinRoomAliasServlet(self).register(resource)
|
JoinRoomAliasServlet(self).register(resource)
|
||||||
|
ProfileAvatarURLRestServlet(self).register(resource)
|
||||||
|
ProfileDisplaynameRestServlet(self).register(resource)
|
||||||
|
ProfileRestServlet(self).register(resource)
|
||||||
resources.update({
|
resources.update({
|
||||||
"/_matrix/client/r0": resource,
|
"/_matrix/client/r0": resource,
|
||||||
"/_matrix/client/unstable": resource,
|
"/_matrix/client/unstable": resource,
|
||||||
|
@ -160,7 +169,7 @@ def start(config_options):
|
||||||
"Synapse event creator", config_options
|
"Synapse event creator", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.event_creator"
|
assert config.worker_app == "synapse.app.event_creator"
|
||||||
|
@ -169,6 +178,9 @@ def start(config_options):
|
||||||
|
|
||||||
setup_logging(config, use_worker_options=True)
|
setup_logging(config, use_worker_options=True)
|
||||||
|
|
||||||
|
# This should only be done on the user directory worker or the master
|
||||||
|
config.update_user_directory = False
|
||||||
|
|
||||||
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
events.USE_FROZEN_DICTS = config.use_frozen_dicts
|
||||||
|
|
||||||
database_engine = create_engine(config.database_config)
|
database_engine = create_engine(config.database_config)
|
||||||
|
@ -190,7 +202,6 @@ def start(config_options):
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
|
@ -72,10 +72,7 @@ class FederationReaderSlavedStore(
|
||||||
|
|
||||||
|
|
||||||
class FederationReaderServer(HomeServer):
|
class FederationReaderServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = FederationReaderSlavedStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = FederationReaderSlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -143,7 +140,7 @@ def start(config_options):
|
||||||
"Synapse federation reader", config_options
|
"Synapse federation reader", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.federation_reader"
|
assert config.worker_app == "synapse.app.federation_reader"
|
||||||
|
@ -171,7 +168,6 @@ def start(config_options):
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
|
@ -78,10 +78,7 @@ class FederationSenderSlaveStore(
|
||||||
|
|
||||||
|
|
||||||
class FederationSenderServer(HomeServer):
|
class FederationSenderServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = FederationSenderSlaveStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = FederationSenderSlaveStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -163,7 +160,7 @@ def start(config_options):
|
||||||
"Synapse federation sender", config_options
|
"Synapse federation sender", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.federation_sender"
|
assert config.worker_app == "synapse.app.federation_sender"
|
||||||
|
@ -204,7 +201,6 @@ def start(config_options):
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
_base.start_worker_reactor("synapse-federation-sender", config)
|
_base.start_worker_reactor("synapse-federation-sender", config)
|
||||||
|
|
|
@ -68,7 +68,7 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
|
||||||
"Authorization": auth_headers,
|
"Authorization": auth_headers,
|
||||||
}
|
}
|
||||||
result = yield self.http_client.get_json(
|
result = yield self.http_client.get_json(
|
||||||
self.main_uri + request.uri,
|
self.main_uri + request.uri.decode('ascii'),
|
||||||
headers=headers,
|
headers=headers,
|
||||||
)
|
)
|
||||||
defer.returnValue((200, result))
|
defer.returnValue((200, result))
|
||||||
|
@ -125,7 +125,7 @@ class KeyUploadServlet(RestServlet):
|
||||||
"Authorization": auth_headers,
|
"Authorization": auth_headers,
|
||||||
}
|
}
|
||||||
result = yield self.http_client.post_json_get_json(
|
result = yield self.http_client.post_json_get_json(
|
||||||
self.main_uri + request.uri,
|
self.main_uri + request.uri.decode('ascii'),
|
||||||
body,
|
body,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
)
|
)
|
||||||
|
@ -148,10 +148,7 @@ class FrontendProxySlavedStore(
|
||||||
|
|
||||||
|
|
||||||
class FrontendProxyServer(HomeServer):
|
class FrontendProxyServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = FrontendProxySlavedStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = FrontendProxySlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -231,7 +228,7 @@ def start(config_options):
|
||||||
"Synapse frontend proxy", config_options
|
"Synapse frontend proxy", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.frontend_proxy"
|
assert config.worker_app == "synapse.app.frontend_proxy"
|
||||||
|
@ -261,7 +258,6 @@ def start(config_options):
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
|
@ -20,6 +20,7 @@ import sys
|
||||||
|
|
||||||
from six import iteritems
|
from six import iteritems
|
||||||
|
|
||||||
|
import psutil
|
||||||
from prometheus_client import Gauge
|
from prometheus_client import Gauge
|
||||||
|
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
|
@ -62,7 +63,7 @@ from synapse.rest.key.v1.server_key_resource import LocalKey
|
||||||
from synapse.rest.key.v2 import KeyApiV2Resource
|
from synapse.rest.key.v2 import KeyApiV2Resource
|
||||||
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
from synapse.rest.media.v0.content_repository import ContentRepoResource
|
||||||
from synapse.server import HomeServer
|
from synapse.server import HomeServer
|
||||||
from synapse.storage import are_all_users_on_domain
|
from synapse.storage import DataStore, are_all_users_on_domain
|
||||||
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
|
||||||
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
|
||||||
from synapse.util.caches import CACHE_SIZE_FACTOR
|
from synapse.util.caches import CACHE_SIZE_FACTOR
|
||||||
|
@ -111,6 +112,8 @@ def build_resource_for_web_client(hs):
|
||||||
|
|
||||||
|
|
||||||
class SynapseHomeServer(HomeServer):
|
class SynapseHomeServer(HomeServer):
|
||||||
|
DATASTORE_CLASS = DataStore
|
||||||
|
|
||||||
def _listener_http(self, config, listener_config):
|
def _listener_http(self, config, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
bind_addresses = listener_config["bind_addresses"]
|
bind_addresses = listener_config["bind_addresses"]
|
||||||
|
@ -299,12 +302,16 @@ class SynapseHomeServer(HomeServer):
|
||||||
try:
|
try:
|
||||||
database_engine.check_database(db_conn.cursor())
|
database_engine.check_database(db_conn.cursor())
|
||||||
except IncorrectDatabaseSetup as e:
|
except IncorrectDatabaseSetup as e:
|
||||||
quit_with_error(e.message)
|
quit_with_error(str(e))
|
||||||
|
|
||||||
|
|
||||||
# Gauges to expose monthly active user control metrics
|
# Gauges to expose monthly active user control metrics
|
||||||
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
|
||||||
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
|
||||||
|
registered_reserved_users_mau_gauge = Gauge(
|
||||||
|
"synapse_admin_mau:registered_reserved_users",
|
||||||
|
"Registered users with reserved threepids"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def setup(config_options):
|
def setup(config_options):
|
||||||
|
@ -322,7 +329,7 @@ def setup(config_options):
|
||||||
config_options,
|
config_options,
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not config:
|
if not config:
|
||||||
|
@ -356,13 +363,13 @@ def setup(config_options):
|
||||||
logger.info("Preparing database: %s...", config.database_config['name'])
|
logger.info("Preparing database: %s...", config.database_config['name'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
db_conn = hs.get_db_conn(run_new_connection=False)
|
with hs.get_db_conn(run_new_connection=False) as db_conn:
|
||||||
prepare_database(db_conn, database_engine, config=config)
|
prepare_database(db_conn, database_engine, config=config)
|
||||||
database_engine.on_new_connection(db_conn)
|
database_engine.on_new_connection(db_conn)
|
||||||
|
|
||||||
hs.run_startup_checks(db_conn, database_engine)
|
hs.run_startup_checks(db_conn, database_engine)
|
||||||
|
|
||||||
db_conn.commit()
|
db_conn.commit()
|
||||||
except UpgradeDatabaseException:
|
except UpgradeDatabaseException:
|
||||||
sys.stderr.write(
|
sys.stderr.write(
|
||||||
"\nFailed to upgrade database.\n"
|
"\nFailed to upgrade database.\n"
|
||||||
|
@ -378,10 +385,8 @@ def setup(config_options):
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
hs.get_pusherpool().start()
|
hs.get_pusherpool().start()
|
||||||
hs.get_state_handler().start_caching()
|
|
||||||
hs.get_datastore().start_profiling()
|
hs.get_datastore().start_profiling()
|
||||||
hs.get_datastore().start_doing_background_updates()
|
hs.get_datastore().start_doing_background_updates()
|
||||||
hs.get_federation_client().start_get_pdu_cache()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
@ -451,6 +456,10 @@ def run(hs):
|
||||||
stats["homeserver"] = hs.config.server_name
|
stats["homeserver"] = hs.config.server_name
|
||||||
stats["timestamp"] = now
|
stats["timestamp"] = now
|
||||||
stats["uptime_seconds"] = uptime
|
stats["uptime_seconds"] = uptime
|
||||||
|
version = sys.version_info
|
||||||
|
stats["python_version"] = "{}.{}.{}".format(
|
||||||
|
version.major, version.minor, version.micro
|
||||||
|
)
|
||||||
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
stats["total_users"] = yield hs.get_datastore().count_all_users()
|
||||||
|
|
||||||
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
|
||||||
|
@ -494,7 +503,6 @@ def run(hs):
|
||||||
|
|
||||||
def performance_stats_init():
|
def performance_stats_init():
|
||||||
try:
|
try:
|
||||||
import psutil
|
|
||||||
process = psutil.Process()
|
process = psutil.Process()
|
||||||
# Ensure we can fetch both, and make the initial request for cpu_percent
|
# Ensure we can fetch both, and make the initial request for cpu_percent
|
||||||
# so the next request will use this as the initial point.
|
# so the next request will use this as the initial point.
|
||||||
|
@ -502,12 +510,9 @@ def run(hs):
|
||||||
process.cpu_percent(interval=None)
|
process.cpu_percent(interval=None)
|
||||||
logger.info("report_stats can use psutil")
|
logger.info("report_stats can use psutil")
|
||||||
stats_process.append(process)
|
stats_process.append(process)
|
||||||
except (ImportError, AttributeError):
|
except (AttributeError):
|
||||||
logger.warn(
|
logger.warning(
|
||||||
"report_stats enabled but psutil is not installed or incorrect version."
|
"Unable to read memory/cpu stats. Disabling reporting."
|
||||||
" Disabling reporting of memory/cpu stats."
|
|
||||||
" Ensuring psutil is available will help matrix.org track performance"
|
|
||||||
" changes across releases."
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def generate_user_daily_visit_stats():
|
def generate_user_daily_visit_stats():
|
||||||
|
@ -522,25 +527,35 @@ def run(hs):
|
||||||
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
|
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
|
||||||
|
|
||||||
# monthly active user limiting functionality
|
# monthly active user limiting functionality
|
||||||
clock.looping_call(
|
def reap_monthly_active_users():
|
||||||
hs.get_datastore().reap_monthly_active_users, 1000 * 60 * 60
|
return run_as_background_process(
|
||||||
)
|
"reap_monthly_active_users",
|
||||||
hs.get_datastore().reap_monthly_active_users()
|
hs.get_datastore().reap_monthly_active_users,
|
||||||
|
)
|
||||||
|
clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60)
|
||||||
|
reap_monthly_active_users()
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def generate_monthly_active_users():
|
def generate_monthly_active_users():
|
||||||
count = 0
|
current_mau_count = 0
|
||||||
|
reserved_count = 0
|
||||||
|
store = hs.get_datastore()
|
||||||
if hs.config.limit_usage_by_mau:
|
if hs.config.limit_usage_by_mau:
|
||||||
count = yield hs.get_datastore().get_monthly_active_count()
|
current_mau_count = yield store.get_monthly_active_count()
|
||||||
current_mau_gauge.set(float(count))
|
reserved_count = yield store.get_registered_reserved_users_count()
|
||||||
|
current_mau_gauge.set(float(current_mau_count))
|
||||||
|
registered_reserved_users_mau_gauge.set(float(reserved_count))
|
||||||
max_mau_gauge.set(float(hs.config.max_mau_value))
|
max_mau_gauge.set(float(hs.config.max_mau_value))
|
||||||
|
|
||||||
hs.get_datastore().initialise_reserved_users(
|
def start_generate_monthly_active_users():
|
||||||
hs.config.mau_limits_reserved_threepids
|
return run_as_background_process(
|
||||||
)
|
"generate_monthly_active_users",
|
||||||
generate_monthly_active_users()
|
generate_monthly_active_users,
|
||||||
|
)
|
||||||
|
|
||||||
|
start_generate_monthly_active_users()
|
||||||
if hs.config.limit_usage_by_mau:
|
if hs.config.limit_usage_by_mau:
|
||||||
clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000)
|
clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
|
||||||
# End of monthly active user settings
|
# End of monthly active user settings
|
||||||
|
|
||||||
if hs.config.report_stats:
|
if hs.config.report_stats:
|
||||||
|
@ -556,7 +571,7 @@ def run(hs):
|
||||||
clock.call_later(5 * 60, start_phone_stats_home)
|
clock.call_later(5 * 60, start_phone_stats_home)
|
||||||
|
|
||||||
if hs.config.daemonize and hs.config.print_pidfile:
|
if hs.config.daemonize and hs.config.print_pidfile:
|
||||||
print (hs.config.pid_file)
|
print(hs.config.pid_file)
|
||||||
|
|
||||||
_base.start_reactor(
|
_base.start_reactor(
|
||||||
"synapse-homeserver",
|
"synapse-homeserver",
|
||||||
|
|
|
@ -60,10 +60,7 @@ class MediaRepositorySlavedStore(
|
||||||
|
|
||||||
|
|
||||||
class MediaRepositoryServer(HomeServer):
|
class MediaRepositoryServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = MediaRepositorySlavedStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = MediaRepositorySlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -136,7 +133,7 @@ def start(config_options):
|
||||||
"Synapse media repository", config_options
|
"Synapse media repository", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.media_repository"
|
assert config.worker_app == "synapse.app.media_repository"
|
||||||
|
@ -171,7 +168,6 @@ def start(config_options):
|
||||||
ss.start_listening(config.worker_listeners)
|
ss.start_listening(config.worker_listeners)
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
|
@ -28,6 +28,7 @@ from synapse.config.logger import setup_logging
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
|
from synapse.replication.slave.storage._base import __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.events import SlavedEventStore
|
from synapse.replication.slave.storage.events import SlavedEventStore
|
||||||
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
from synapse.replication.slave.storage.pushers import SlavedPusherStore
|
||||||
|
@ -49,39 +50,36 @@ class PusherSlaveStore(
|
||||||
SlavedAccountDataStore
|
SlavedAccountDataStore
|
||||||
):
|
):
|
||||||
update_pusher_last_stream_ordering_and_success = (
|
update_pusher_last_stream_ordering_and_success = (
|
||||||
DataStore.update_pusher_last_stream_ordering_and_success.__func__
|
__func__(DataStore.update_pusher_last_stream_ordering_and_success)
|
||||||
)
|
)
|
||||||
|
|
||||||
update_pusher_failing_since = (
|
update_pusher_failing_since = (
|
||||||
DataStore.update_pusher_failing_since.__func__
|
__func__(DataStore.update_pusher_failing_since)
|
||||||
)
|
)
|
||||||
|
|
||||||
update_pusher_last_stream_ordering = (
|
update_pusher_last_stream_ordering = (
|
||||||
DataStore.update_pusher_last_stream_ordering.__func__
|
__func__(DataStore.update_pusher_last_stream_ordering)
|
||||||
)
|
)
|
||||||
|
|
||||||
get_throttle_params_by_room = (
|
get_throttle_params_by_room = (
|
||||||
DataStore.get_throttle_params_by_room.__func__
|
__func__(DataStore.get_throttle_params_by_room)
|
||||||
)
|
)
|
||||||
|
|
||||||
set_throttle_params = (
|
set_throttle_params = (
|
||||||
DataStore.set_throttle_params.__func__
|
__func__(DataStore.set_throttle_params)
|
||||||
)
|
)
|
||||||
|
|
||||||
get_time_of_last_push_action_before = (
|
get_time_of_last_push_action_before = (
|
||||||
DataStore.get_time_of_last_push_action_before.__func__
|
__func__(DataStore.get_time_of_last_push_action_before)
|
||||||
)
|
)
|
||||||
|
|
||||||
get_profile_displayname = (
|
get_profile_displayname = (
|
||||||
DataStore.get_profile_displayname.__func__
|
__func__(DataStore.get_profile_displayname)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class PusherServer(HomeServer):
|
class PusherServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = PusherSlaveStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = PusherSlaveStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def remove_pusher(self, app_id, push_key, user_id):
|
def remove_pusher(self, app_id, push_key, user_id):
|
||||||
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
self.get_tcp_replication().send_remove_pusher(app_id, push_key, user_id)
|
||||||
|
@ -163,11 +161,11 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||||
else:
|
else:
|
||||||
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
yield self.start_pusher(row.user_id, row.app_id, row.pushkey)
|
||||||
elif stream_name == "events":
|
elif stream_name == "events":
|
||||||
self.pusher_pool.on_new_notifications(
|
yield self.pusher_pool.on_new_notifications(
|
||||||
token, token,
|
token, token,
|
||||||
)
|
)
|
||||||
elif stream_name == "receipts":
|
elif stream_name == "receipts":
|
||||||
self.pusher_pool.on_new_receipts(
|
yield self.pusher_pool.on_new_receipts(
|
||||||
token, token, set(row.room_id for row in rows)
|
token, token, set(row.room_id for row in rows)
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -185,7 +183,7 @@ class PusherReplicationHandler(ReplicationClientHandler):
|
||||||
def start_pusher(self, user_id, app_id, pushkey):
|
def start_pusher(self, user_id, app_id, pushkey):
|
||||||
key = "%s:%s" % (app_id, pushkey)
|
key = "%s:%s" % (app_id, pushkey)
|
||||||
logger.info("Starting pusher %r / %r", user_id, key)
|
logger.info("Starting pusher %r / %r", user_id, key)
|
||||||
return self.pusher_pool._refresh_pusher(app_id, pushkey, user_id)
|
return self.pusher_pool.start_pusher_by_id(app_id, pushkey, user_id)
|
||||||
|
|
||||||
|
|
||||||
def start(config_options):
|
def start(config_options):
|
||||||
|
@ -194,7 +192,7 @@ def start(config_options):
|
||||||
"Synapse pusher", config_options
|
"Synapse pusher", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.pusher"
|
assert config.worker_app == "synapse.app.pusher"
|
||||||
|
@ -231,7 +229,6 @@ def start(config_options):
|
||||||
def start():
|
def start():
|
||||||
ps.get_pusherpool().start()
|
ps.get_pusherpool().start()
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ from synapse.http.server import JsonResource
|
||||||
from synapse.http.site import SynapseSite
|
from synapse.http.site import SynapseSite
|
||||||
from synapse.metrics import RegistryProxy
|
from synapse.metrics import RegistryProxy
|
||||||
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
|
||||||
from synapse.replication.slave.storage._base import BaseSlavedStore
|
from synapse.replication.slave.storage._base import BaseSlavedStore, __func__
|
||||||
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
from synapse.replication.slave.storage.account_data import SlavedAccountDataStore
|
||||||
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
|
||||||
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
from synapse.replication.slave.storage.client_ips import SlavedClientIpStore
|
||||||
|
@ -147,7 +147,7 @@ class SynchrotronPresence(object):
|
||||||
and haven't come back yet. If there are poke the master about them.
|
and haven't come back yet. If there are poke the master about them.
|
||||||
"""
|
"""
|
||||||
now = self.clock.time_msec()
|
now = self.clock.time_msec()
|
||||||
for user_id, last_sync_ms in self.users_going_offline.items():
|
for user_id, last_sync_ms in list(self.users_going_offline.items()):
|
||||||
if now - last_sync_ms > 10 * 1000:
|
if now - last_sync_ms > 10 * 1000:
|
||||||
self.users_going_offline.pop(user_id, None)
|
self.users_going_offline.pop(user_id, None)
|
||||||
self.send_user_sync(user_id, False, last_sync_ms)
|
self.send_user_sync(user_id, False, last_sync_ms)
|
||||||
|
@ -156,9 +156,9 @@ class SynchrotronPresence(object):
|
||||||
# TODO Hows this supposed to work?
|
# TODO Hows this supposed to work?
|
||||||
pass
|
pass
|
||||||
|
|
||||||
get_states = PresenceHandler.get_states.__func__
|
get_states = __func__(PresenceHandler.get_states)
|
||||||
get_state = PresenceHandler.get_state.__func__
|
get_state = __func__(PresenceHandler.get_state)
|
||||||
current_state_for_users = PresenceHandler.current_state_for_users.__func__
|
current_state_for_users = __func__(PresenceHandler.current_state_for_users)
|
||||||
|
|
||||||
def user_syncing(self, user_id, affect_presence):
|
def user_syncing(self, user_id, affect_presence):
|
||||||
if affect_presence:
|
if affect_presence:
|
||||||
|
@ -208,7 +208,7 @@ class SynchrotronPresence(object):
|
||||||
) for row in rows]
|
) for row in rows]
|
||||||
|
|
||||||
for state in states:
|
for state in states:
|
||||||
self.user_to_current_state[row.user_id] = state
|
self.user_to_current_state[state.user_id] = state
|
||||||
|
|
||||||
stream_id = token
|
stream_id = token
|
||||||
yield self.notify_from_replication(states, stream_id)
|
yield self.notify_from_replication(states, stream_id)
|
||||||
|
@ -249,10 +249,7 @@ class SynchrotronApplicationService(object):
|
||||||
|
|
||||||
|
|
||||||
class SynchrotronServer(HomeServer):
|
class SynchrotronServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = SynchrotronSlavedStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = SynchrotronSlavedStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -413,7 +410,7 @@ def start(config_options):
|
||||||
"Synapse synchrotron", config_options
|
"Synapse synchrotron", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.synchrotron"
|
assert config.worker_app == "synapse.app.synchrotron"
|
||||||
|
@ -438,7 +435,6 @@ def start(config_options):
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ss.get_datastore().start_profiling()
|
ss.get_datastore().start_profiling()
|
||||||
ss.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
|
|
@ -1,284 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
# Copyright 2014-2016 OpenMarket Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import collections
|
|
||||||
import errno
|
|
||||||
import glob
|
|
||||||
import os
|
|
||||||
import os.path
|
|
||||||
import signal
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
from six import iteritems
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
SYNAPSE = [sys.executable, "-B", "-m", "synapse.app.homeserver"]
|
|
||||||
|
|
||||||
GREEN = "\x1b[1;32m"
|
|
||||||
YELLOW = "\x1b[1;33m"
|
|
||||||
RED = "\x1b[1;31m"
|
|
||||||
NORMAL = "\x1b[m"
|
|
||||||
|
|
||||||
|
|
||||||
def pid_running(pid):
|
|
||||||
try:
|
|
||||||
os.kill(pid, 0)
|
|
||||||
return True
|
|
||||||
except OSError as err:
|
|
||||||
if err.errno == errno.EPERM:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def write(message, colour=NORMAL, stream=sys.stdout):
|
|
||||||
if colour == NORMAL:
|
|
||||||
stream.write(message + "\n")
|
|
||||||
else:
|
|
||||||
stream.write(colour + message + NORMAL + "\n")
|
|
||||||
|
|
||||||
|
|
||||||
def abort(message, colour=RED, stream=sys.stderr):
|
|
||||||
write(message, colour, stream)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def start(configfile):
|
|
||||||
write("Starting ...")
|
|
||||||
args = SYNAPSE
|
|
||||||
args.extend(["--daemonize", "-c", configfile])
|
|
||||||
|
|
||||||
try:
|
|
||||||
subprocess.check_call(args)
|
|
||||||
write("started synapse.app.homeserver(%r)" %
|
|
||||||
(configfile,), colour=GREEN)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
write(
|
|
||||||
"error starting (exit code: %d); see above for logs" % e.returncode,
|
|
||||||
colour=RED,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def start_worker(app, configfile, worker_configfile):
|
|
||||||
args = [
|
|
||||||
"python", "-B",
|
|
||||||
"-m", app,
|
|
||||||
"-c", configfile,
|
|
||||||
"-c", worker_configfile
|
|
||||||
]
|
|
||||||
|
|
||||||
try:
|
|
||||||
subprocess.check_call(args)
|
|
||||||
write("started %s(%r)" % (app, worker_configfile), colour=GREEN)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
write(
|
|
||||||
"error starting %s(%r) (exit code: %d); see above for logs" % (
|
|
||||||
app, worker_configfile, e.returncode,
|
|
||||||
),
|
|
||||||
colour=RED,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def stop(pidfile, app):
|
|
||||||
if os.path.exists(pidfile):
|
|
||||||
pid = int(open(pidfile).read())
|
|
||||||
try:
|
|
||||||
os.kill(pid, signal.SIGTERM)
|
|
||||||
write("stopped %s" % (app,), colour=GREEN)
|
|
||||||
except OSError as err:
|
|
||||||
if err.errno == errno.ESRCH:
|
|
||||||
write("%s not running" % (app,), colour=YELLOW)
|
|
||||||
elif err.errno == errno.EPERM:
|
|
||||||
abort("Cannot stop %s: Operation not permitted" % (app,))
|
|
||||||
else:
|
|
||||||
abort("Cannot stop %s: Unknown error" % (app,))
|
|
||||||
|
|
||||||
|
|
||||||
Worker = collections.namedtuple("Worker", [
|
|
||||||
"app", "configfile", "pidfile", "cache_factor"
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser()
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"action",
|
|
||||||
choices=["start", "stop", "restart"],
|
|
||||||
help="whether to start, stop or restart the synapse",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"configfile",
|
|
||||||
nargs="?",
|
|
||||||
default="homeserver.yaml",
|
|
||||||
help="the homeserver config file, defaults to homeserver.yaml",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-w", "--worker",
|
|
||||||
metavar="WORKERCONFIG",
|
|
||||||
help="start or stop a single worker",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"-a", "--all-processes",
|
|
||||||
metavar="WORKERCONFIGDIR",
|
|
||||||
help="start or stop all the workers in the given directory"
|
|
||||||
" and the main synapse process",
|
|
||||||
)
|
|
||||||
|
|
||||||
options = parser.parse_args()
|
|
||||||
|
|
||||||
if options.worker and options.all_processes:
|
|
||||||
write(
|
|
||||||
'Cannot use "--worker" with "--all-processes"',
|
|
||||||
stream=sys.stderr
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
configfile = options.configfile
|
|
||||||
|
|
||||||
if not os.path.exists(configfile):
|
|
||||||
write(
|
|
||||||
"No config file found\n"
|
|
||||||
"To generate a config file, run '%s -c %s --generate-config"
|
|
||||||
" --server-name=<server name>'\n" % (
|
|
||||||
" ".join(SYNAPSE), options.configfile
|
|
||||||
),
|
|
||||||
stream=sys.stderr,
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
with open(configfile) as stream:
|
|
||||||
config = yaml.load(stream)
|
|
||||||
|
|
||||||
pidfile = config["pid_file"]
|
|
||||||
cache_factor = config.get("synctl_cache_factor")
|
|
||||||
start_stop_synapse = True
|
|
||||||
|
|
||||||
if cache_factor:
|
|
||||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
|
||||||
|
|
||||||
cache_factors = config.get("synctl_cache_factors", {})
|
|
||||||
for cache_name, factor in iteritems(cache_factors):
|
|
||||||
os.environ["SYNAPSE_CACHE_FACTOR_" + cache_name.upper()] = str(factor)
|
|
||||||
|
|
||||||
worker_configfiles = []
|
|
||||||
if options.worker:
|
|
||||||
start_stop_synapse = False
|
|
||||||
worker_configfile = options.worker
|
|
||||||
if not os.path.exists(worker_configfile):
|
|
||||||
write(
|
|
||||||
"No worker config found at %r" % (worker_configfile,),
|
|
||||||
stream=sys.stderr,
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
worker_configfiles.append(worker_configfile)
|
|
||||||
|
|
||||||
if options.all_processes:
|
|
||||||
# To start the main synapse with -a you need to add a worker file
|
|
||||||
# with worker_app == "synapse.app.homeserver"
|
|
||||||
start_stop_synapse = False
|
|
||||||
worker_configdir = options.all_processes
|
|
||||||
if not os.path.isdir(worker_configdir):
|
|
||||||
write(
|
|
||||||
"No worker config directory found at %r" % (worker_configdir,),
|
|
||||||
stream=sys.stderr,
|
|
||||||
)
|
|
||||||
sys.exit(1)
|
|
||||||
worker_configfiles.extend(sorted(glob.glob(
|
|
||||||
os.path.join(worker_configdir, "*.yaml")
|
|
||||||
)))
|
|
||||||
|
|
||||||
workers = []
|
|
||||||
for worker_configfile in worker_configfiles:
|
|
||||||
with open(worker_configfile) as stream:
|
|
||||||
worker_config = yaml.load(stream)
|
|
||||||
worker_app = worker_config["worker_app"]
|
|
||||||
if worker_app == "synapse.app.homeserver":
|
|
||||||
# We need to special case all of this to pick up options that may
|
|
||||||
# be set in the main config file or in this worker config file.
|
|
||||||
worker_pidfile = (
|
|
||||||
worker_config.get("pid_file")
|
|
||||||
or pidfile
|
|
||||||
)
|
|
||||||
worker_cache_factor = worker_config.get("synctl_cache_factor") or cache_factor
|
|
||||||
daemonize = worker_config.get("daemonize") or config.get("daemonize")
|
|
||||||
assert daemonize, "Main process must have daemonize set to true"
|
|
||||||
|
|
||||||
# The master process doesn't support using worker_* config.
|
|
||||||
for key in worker_config:
|
|
||||||
if key == "worker_app": # But we allow worker_app
|
|
||||||
continue
|
|
||||||
assert not key.startswith("worker_"), \
|
|
||||||
"Main process cannot use worker_* config"
|
|
||||||
else:
|
|
||||||
worker_pidfile = worker_config["worker_pid_file"]
|
|
||||||
worker_daemonize = worker_config["worker_daemonize"]
|
|
||||||
assert worker_daemonize, "In config %r: expected '%s' to be True" % (
|
|
||||||
worker_configfile, "worker_daemonize")
|
|
||||||
worker_cache_factor = worker_config.get("synctl_cache_factor")
|
|
||||||
workers.append(Worker(
|
|
||||||
worker_app, worker_configfile, worker_pidfile, worker_cache_factor,
|
|
||||||
))
|
|
||||||
|
|
||||||
action = options.action
|
|
||||||
|
|
||||||
if action == "stop" or action == "restart":
|
|
||||||
for worker in workers:
|
|
||||||
stop(worker.pidfile, worker.app)
|
|
||||||
|
|
||||||
if start_stop_synapse:
|
|
||||||
stop(pidfile, "synapse.app.homeserver")
|
|
||||||
|
|
||||||
# Wait for synapse to actually shutdown before starting it again
|
|
||||||
if action == "restart":
|
|
||||||
running_pids = []
|
|
||||||
if start_stop_synapse and os.path.exists(pidfile):
|
|
||||||
running_pids.append(int(open(pidfile).read()))
|
|
||||||
for worker in workers:
|
|
||||||
if os.path.exists(worker.pidfile):
|
|
||||||
running_pids.append(int(open(worker.pidfile).read()))
|
|
||||||
if len(running_pids) > 0:
|
|
||||||
write("Waiting for process to exit before restarting...")
|
|
||||||
for running_pid in running_pids:
|
|
||||||
while pid_running(running_pid):
|
|
||||||
time.sleep(0.2)
|
|
||||||
write("All processes exited; now restarting...")
|
|
||||||
|
|
||||||
if action == "start" or action == "restart":
|
|
||||||
if start_stop_synapse:
|
|
||||||
# Check if synapse is already running
|
|
||||||
if os.path.exists(pidfile) and pid_running(int(open(pidfile).read())):
|
|
||||||
abort("synapse.app.homeserver already running")
|
|
||||||
start(configfile)
|
|
||||||
|
|
||||||
for worker in workers:
|
|
||||||
if worker.cache_factor:
|
|
||||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(worker.cache_factor)
|
|
||||||
|
|
||||||
start_worker(worker.app, configfile, worker.configfile)
|
|
||||||
|
|
||||||
if cache_factor:
|
|
||||||
os.environ["SYNAPSE_CACHE_FACTOR"] = str(cache_factor)
|
|
||||||
else:
|
|
||||||
os.environ.pop("SYNAPSE_CACHE_FACTOR", None)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
|
@ -94,10 +94,7 @@ class UserDirectorySlaveStore(
|
||||||
|
|
||||||
|
|
||||||
class UserDirectoryServer(HomeServer):
|
class UserDirectoryServer(HomeServer):
|
||||||
def setup(self):
|
DATASTORE_CLASS = UserDirectorySlaveStore
|
||||||
logger.info("Setting up.")
|
|
||||||
self.datastore = UserDirectorySlaveStore(self.get_db_conn(), self)
|
|
||||||
logger.info("Finished setting up.")
|
|
||||||
|
|
||||||
def _listen_http(self, listener_config):
|
def _listen_http(self, listener_config):
|
||||||
port = listener_config["port"]
|
port = listener_config["port"]
|
||||||
|
@ -191,7 +188,7 @@ def start(config_options):
|
||||||
"Synapse user directory", config_options
|
"Synapse user directory", config_options
|
||||||
)
|
)
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
assert config.worker_app == "synapse.app.user_dir"
|
assert config.worker_app == "synapse.app.user_dir"
|
||||||
|
@ -232,7 +229,6 @@ def start(config_options):
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
ps.get_datastore().start_profiling()
|
ps.get_datastore().start_profiling()
|
||||||
ps.get_state_handler().start_caching()
|
|
||||||
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
|
|
@ -13,7 +13,8 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
|
||||||
|
from six.moves import urllib
|
||||||
|
|
||||||
from prometheus_client import Counter
|
from prometheus_client import Counter
|
||||||
|
|
||||||
|
@ -98,7 +99,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||||
def query_user(self, service, user_id):
|
def query_user(self, service, user_id):
|
||||||
if service.url is None:
|
if service.url is None:
|
||||||
defer.returnValue(False)
|
defer.returnValue(False)
|
||||||
uri = service.url + ("/users/%s" % urllib.quote(user_id))
|
uri = service.url + ("/users/%s" % urllib.parse.quote(user_id))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
response = yield self.get_json(uri, {
|
response = yield self.get_json(uri, {
|
||||||
|
@ -119,7 +120,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||||
def query_alias(self, service, alias):
|
def query_alias(self, service, alias):
|
||||||
if service.url is None:
|
if service.url is None:
|
||||||
defer.returnValue(False)
|
defer.returnValue(False)
|
||||||
uri = service.url + ("/rooms/%s" % urllib.quote(alias))
|
uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias))
|
||||||
response = None
|
response = None
|
||||||
try:
|
try:
|
||||||
response = yield self.get_json(uri, {
|
response = yield self.get_json(uri, {
|
||||||
|
@ -153,7 +154,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||||
service.url,
|
service.url,
|
||||||
APP_SERVICE_PREFIX,
|
APP_SERVICE_PREFIX,
|
||||||
kind,
|
kind,
|
||||||
urllib.quote(protocol)
|
urllib.parse.quote(protocol)
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
response = yield self.get_json(uri, fields)
|
response = yield self.get_json(uri, fields)
|
||||||
|
@ -188,7 +189,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||||
uri = "%s%s/thirdparty/protocol/%s" % (
|
uri = "%s%s/thirdparty/protocol/%s" % (
|
||||||
service.url,
|
service.url,
|
||||||
APP_SERVICE_PREFIX,
|
APP_SERVICE_PREFIX,
|
||||||
urllib.quote(protocol)
|
urllib.parse.quote(protocol)
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
info = yield self.get_json(uri, {})
|
info = yield self.get_json(uri, {})
|
||||||
|
@ -228,7 +229,7 @@ class ApplicationServiceApi(SimpleHttpClient):
|
||||||
txn_id = str(txn_id)
|
txn_id = str(txn_id)
|
||||||
|
|
||||||
uri = service.url + ("/transactions/%s" %
|
uri = service.url + ("/transactions/%s" %
|
||||||
urllib.quote(txn_id))
|
urllib.parse.quote(txn_id))
|
||||||
try:
|
try:
|
||||||
yield self.put_json(
|
yield self.put_json(
|
||||||
uri=uri,
|
uri=uri,
|
||||||
|
|
|
@ -25,10 +25,10 @@ if __name__ == "__main__":
|
||||||
try:
|
try:
|
||||||
config = HomeServerConfig.load_config("", sys.argv[3:])
|
config = HomeServerConfig.load_config("", sys.argv[3:])
|
||||||
except ConfigError as e:
|
except ConfigError as e:
|
||||||
sys.stderr.write("\n" + e.message + "\n")
|
sys.stderr.write("\n" + str(e) + "\n")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
print (getattr(config, key))
|
print(getattr(config, key))
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
else:
|
else:
|
||||||
sys.stderr.write("Unknown command %r\n" % (action,))
|
sys.stderr.write("Unknown command %r\n" % (action,))
|
||||||
|
|
|
@ -106,10 +106,7 @@ class Config(object):
|
||||||
@classmethod
|
@classmethod
|
||||||
def check_file(cls, file_path, config_name):
|
def check_file(cls, file_path, config_name):
|
||||||
if file_path is None:
|
if file_path is None:
|
||||||
raise ConfigError(
|
raise ConfigError("Missing config for %s." % (config_name,))
|
||||||
"Missing config for %s."
|
|
||||||
% (config_name,)
|
|
||||||
)
|
|
||||||
try:
|
try:
|
||||||
os.stat(file_path)
|
os.stat(file_path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
|
@ -128,9 +125,7 @@ class Config(object):
|
||||||
if e.errno != errno.EEXIST:
|
if e.errno != errno.EEXIST:
|
||||||
raise
|
raise
|
||||||
if not os.path.isdir(dir_path):
|
if not os.path.isdir(dir_path):
|
||||||
raise ConfigError(
|
raise ConfigError("%s is not a directory" % (dir_path,))
|
||||||
"%s is not a directory" % (dir_path,)
|
|
||||||
)
|
|
||||||
return dir_path
|
return dir_path
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -156,21 +151,20 @@ class Config(object):
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def generate_config(
|
def generate_config(
|
||||||
self,
|
self, config_dir_path, server_name, is_generating_file, report_stats=None
|
||||||
config_dir_path,
|
|
||||||
server_name,
|
|
||||||
is_generating_file,
|
|
||||||
report_stats=None,
|
|
||||||
):
|
):
|
||||||
default_config = "# vim:ft=yaml\n"
|
default_config = "# vim:ft=yaml\n"
|
||||||
|
|
||||||
default_config += "\n\n".join(dedent(conf) for conf in self.invoke_all(
|
default_config += "\n\n".join(
|
||||||
"default_config",
|
dedent(conf)
|
||||||
config_dir_path=config_dir_path,
|
for conf in self.invoke_all(
|
||||||
server_name=server_name,
|
"default_config",
|
||||||
is_generating_file=is_generating_file,
|
config_dir_path=config_dir_path,
|
||||||
report_stats=report_stats,
|
server_name=server_name,
|
||||||
))
|
is_generating_file=is_generating_file,
|
||||||
|
report_stats=report_stats,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
config = yaml.load(default_config)
|
config = yaml.load(default_config)
|
||||||
|
|
||||||
|
@ -178,23 +172,22 @@ class Config(object):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def load_config(cls, description, argv):
|
def load_config(cls, description, argv):
|
||||||
config_parser = argparse.ArgumentParser(
|
config_parser = argparse.ArgumentParser(description=description)
|
||||||
description=description,
|
|
||||||
)
|
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"-c", "--config-path",
|
"-c",
|
||||||
|
"--config-path",
|
||||||
action="append",
|
action="append",
|
||||||
metavar="CONFIG_FILE",
|
metavar="CONFIG_FILE",
|
||||||
help="Specify config file. Can be given multiple times and"
|
help="Specify config file. Can be given multiple times and"
|
||||||
" may specify directories containing *.yaml files."
|
" may specify directories containing *.yaml files.",
|
||||||
)
|
)
|
||||||
|
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"--keys-directory",
|
"--keys-directory",
|
||||||
metavar="DIRECTORY",
|
metavar="DIRECTORY",
|
||||||
help="Where files such as certs and signing keys are stored when"
|
help="Where files such as certs and signing keys are stored when"
|
||||||
" their location is given explicitly in the config."
|
" their location is given explicitly in the config."
|
||||||
" Defaults to the directory containing the last config file",
|
" Defaults to the directory containing the last config file",
|
||||||
)
|
)
|
||||||
|
|
||||||
config_args = config_parser.parse_args(argv)
|
config_args = config_parser.parse_args(argv)
|
||||||
|
@ -203,9 +196,7 @@ class Config(object):
|
||||||
|
|
||||||
obj = cls()
|
obj = cls()
|
||||||
obj.read_config_files(
|
obj.read_config_files(
|
||||||
config_files,
|
config_files, keys_directory=config_args.keys_directory, generate_keys=False
|
||||||
keys_directory=config_args.keys_directory,
|
|
||||||
generate_keys=False,
|
|
||||||
)
|
)
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
@ -213,38 +204,38 @@ class Config(object):
|
||||||
def load_or_generate_config(cls, description, argv):
|
def load_or_generate_config(cls, description, argv):
|
||||||
config_parser = argparse.ArgumentParser(add_help=False)
|
config_parser = argparse.ArgumentParser(add_help=False)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"-c", "--config-path",
|
"-c",
|
||||||
|
"--config-path",
|
||||||
action="append",
|
action="append",
|
||||||
metavar="CONFIG_FILE",
|
metavar="CONFIG_FILE",
|
||||||
help="Specify config file. Can be given multiple times and"
|
help="Specify config file. Can be given multiple times and"
|
||||||
" may specify directories containing *.yaml files."
|
" may specify directories containing *.yaml files.",
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"--generate-config",
|
"--generate-config",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Generate a config file for the server name"
|
help="Generate a config file for the server name",
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"--report-stats",
|
"--report-stats",
|
||||||
action="store",
|
action="store",
|
||||||
help="Whether the generated config reports anonymized usage statistics",
|
help="Whether the generated config reports anonymized usage statistics",
|
||||||
choices=["yes", "no"]
|
choices=["yes", "no"],
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"--generate-keys",
|
"--generate-keys",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
help="Generate any missing key files then exit"
|
help="Generate any missing key files then exit",
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"--keys-directory",
|
"--keys-directory",
|
||||||
metavar="DIRECTORY",
|
metavar="DIRECTORY",
|
||||||
help="Used with 'generate-*' options to specify where files such as"
|
help="Used with 'generate-*' options to specify where files such as"
|
||||||
" certs and signing keys should be stored in, unless explicitly"
|
" certs and signing keys should be stored in, unless explicitly"
|
||||||
" specified in the config."
|
" specified in the config.",
|
||||||
)
|
)
|
||||||
config_parser.add_argument(
|
config_parser.add_argument(
|
||||||
"-H", "--server-name",
|
"-H", "--server-name", help="The server name to generate a config file for"
|
||||||
help="The server name to generate a config file for"
|
|
||||||
)
|
)
|
||||||
config_args, remaining_args = config_parser.parse_known_args(argv)
|
config_args, remaining_args = config_parser.parse_known_args(argv)
|
||||||
|
|
||||||
|
@ -257,8 +248,8 @@ class Config(object):
|
||||||
if config_args.generate_config:
|
if config_args.generate_config:
|
||||||
if config_args.report_stats is None:
|
if config_args.report_stats is None:
|
||||||
config_parser.error(
|
config_parser.error(
|
||||||
"Please specify either --report-stats=yes or --report-stats=no\n\n" +
|
"Please specify either --report-stats=yes or --report-stats=no\n\n"
|
||||||
MISSING_REPORT_STATS_SPIEL
|
+ MISSING_REPORT_STATS_SPIEL
|
||||||
)
|
)
|
||||||
if not config_files:
|
if not config_files:
|
||||||
config_parser.error(
|
config_parser.error(
|
||||||
|
@ -287,26 +278,32 @@ class Config(object):
|
||||||
config_dir_path=config_dir_path,
|
config_dir_path=config_dir_path,
|
||||||
server_name=server_name,
|
server_name=server_name,
|
||||||
report_stats=(config_args.report_stats == "yes"),
|
report_stats=(config_args.report_stats == "yes"),
|
||||||
is_generating_file=True
|
is_generating_file=True,
|
||||||
)
|
)
|
||||||
obj.invoke_all("generate_files", config)
|
obj.invoke_all("generate_files", config)
|
||||||
config_file.write(config_str)
|
config_file.write(config_str)
|
||||||
print((
|
print(
|
||||||
"A config file has been generated in %r for server name"
|
(
|
||||||
" %r with corresponding SSL keys and self-signed"
|
"A config file has been generated in %r for server name"
|
||||||
" certificates. Please review this file and customise it"
|
" %r with corresponding SSL keys and self-signed"
|
||||||
" to your needs."
|
" certificates. Please review this file and customise it"
|
||||||
) % (config_path, server_name))
|
" to your needs."
|
||||||
|
)
|
||||||
|
% (config_path, server_name)
|
||||||
|
)
|
||||||
print(
|
print(
|
||||||
"If this server name is incorrect, you will need to"
|
"If this server name is incorrect, you will need to"
|
||||||
" regenerate the SSL certificates"
|
" regenerate the SSL certificates"
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
print((
|
print(
|
||||||
"Config file %r already exists. Generating any missing key"
|
(
|
||||||
" files."
|
"Config file %r already exists. Generating any missing key"
|
||||||
) % (config_path,))
|
" files."
|
||||||
|
)
|
||||||
|
% (config_path,)
|
||||||
|
)
|
||||||
generate_keys = True
|
generate_keys = True
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
|
@ -338,8 +335,7 @@ class Config(object):
|
||||||
|
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
def read_config_files(self, config_files, keys_directory=None,
|
def read_config_files(self, config_files, keys_directory=None, generate_keys=False):
|
||||||
generate_keys=False):
|
|
||||||
if not keys_directory:
|
if not keys_directory:
|
||||||
keys_directory = os.path.dirname(config_files[-1])
|
keys_directory = os.path.dirname(config_files[-1])
|
||||||
|
|
||||||
|
@ -364,8 +360,9 @@ class Config(object):
|
||||||
|
|
||||||
if "report_stats" not in config:
|
if "report_stats" not in config:
|
||||||
raise ConfigError(
|
raise ConfigError(
|
||||||
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS + "\n" +
|
MISSING_REPORT_STATS_CONFIG_INSTRUCTIONS
|
||||||
MISSING_REPORT_STATS_SPIEL
|
+ "\n"
|
||||||
|
+ MISSING_REPORT_STATS_SPIEL
|
||||||
)
|
)
|
||||||
|
|
||||||
if generate_keys:
|
if generate_keys:
|
||||||
|
@ -399,16 +396,16 @@ def find_config_files(search_paths):
|
||||||
for entry in os.listdir(config_path):
|
for entry in os.listdir(config_path):
|
||||||
entry_path = os.path.join(config_path, entry)
|
entry_path = os.path.join(config_path, entry)
|
||||||
if not os.path.isfile(entry_path):
|
if not os.path.isfile(entry_path):
|
||||||
print (
|
err = "Found subdirectory in config directory: %r. IGNORING."
|
||||||
"Found subdirectory in config directory: %r. IGNORING."
|
print(err % (entry_path,))
|
||||||
) % (entry_path, )
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if not entry.endswith(".yaml"):
|
if not entry.endswith(".yaml"):
|
||||||
print (
|
err = (
|
||||||
"Found file in config directory that does not"
|
"Found file in config directory that does not end in "
|
||||||
" end in '.yaml': %r. IGNORING."
|
"'.yaml': %r. IGNORING."
|
||||||
) % (entry_path, )
|
)
|
||||||
|
print(err % (entry_path,))
|
||||||
continue
|
continue
|
||||||
|
|
||||||
files.append(entry_path)
|
files.append(entry_path)
|
||||||
|
|
|
@ -13,10 +13,18 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
# This file can't be called email.py because if it is, we cannot:
|
# This file can't be called email.py because if it is, we cannot:
|
||||||
import email.utils
|
import email.utils
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
from ._base import Config
|
import pkg_resources
|
||||||
|
|
||||||
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class EmailConfig(Config):
|
class EmailConfig(Config):
|
||||||
|
@ -38,7 +46,6 @@ class EmailConfig(Config):
|
||||||
"smtp_host",
|
"smtp_host",
|
||||||
"smtp_port",
|
"smtp_port",
|
||||||
"notif_from",
|
"notif_from",
|
||||||
"template_dir",
|
|
||||||
"notif_template_html",
|
"notif_template_html",
|
||||||
"notif_template_text",
|
"notif_template_text",
|
||||||
]
|
]
|
||||||
|
@ -62,9 +69,26 @@ class EmailConfig(Config):
|
||||||
self.email_smtp_host = email_config["smtp_host"]
|
self.email_smtp_host = email_config["smtp_host"]
|
||||||
self.email_smtp_port = email_config["smtp_port"]
|
self.email_smtp_port = email_config["smtp_port"]
|
||||||
self.email_notif_from = email_config["notif_from"]
|
self.email_notif_from = email_config["notif_from"]
|
||||||
self.email_template_dir = email_config["template_dir"]
|
|
||||||
self.email_notif_template_html = email_config["notif_template_html"]
|
self.email_notif_template_html = email_config["notif_template_html"]
|
||||||
self.email_notif_template_text = email_config["notif_template_text"]
|
self.email_notif_template_text = email_config["notif_template_text"]
|
||||||
|
|
||||||
|
template_dir = email_config.get("template_dir")
|
||||||
|
# we need an absolute path, because we change directory after starting (and
|
||||||
|
# we don't yet know what auxilliary templates like mail.css we will need).
|
||||||
|
# (Note that loading as package_resources with jinja.PackageLoader doesn't
|
||||||
|
# work for the same reason.)
|
||||||
|
if not template_dir:
|
||||||
|
template_dir = pkg_resources.resource_filename(
|
||||||
|
'synapse', 'res/templates'
|
||||||
|
)
|
||||||
|
template_dir = os.path.abspath(template_dir)
|
||||||
|
|
||||||
|
for f in self.email_notif_template_text, self.email_notif_template_html:
|
||||||
|
p = os.path.join(template_dir, f)
|
||||||
|
if not os.path.isfile(p):
|
||||||
|
raise ConfigError("Unable to find email template file %s" % (p, ))
|
||||||
|
self.email_template_dir = template_dir
|
||||||
|
|
||||||
self.email_notif_for_new_users = email_config.get(
|
self.email_notif_for_new_users = email_config.get(
|
||||||
"notif_for_new_users", True
|
"notif_for_new_users", True
|
||||||
)
|
)
|
||||||
|
@ -113,7 +137,9 @@ class EmailConfig(Config):
|
||||||
# require_transport_security: False
|
# require_transport_security: False
|
||||||
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
# notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
|
||||||
# app_name: Matrix
|
# app_name: Matrix
|
||||||
# template_dir: res/templates
|
# # if template_dir is unset, uses the example templates that are part of
|
||||||
|
# # the Synapse distribution.
|
||||||
|
# #template_dir: res/templates
|
||||||
# notif_template_html: notif_mail.html
|
# notif_template_html: notif_mail.html
|
||||||
# notif_template_text: notif_mail.txt
|
# notif_template_text: notif_mail.txt
|
||||||
# notif_for_new_users: True
|
# notif_for_new_users: True
|
||||||
|
|
|
@ -21,7 +21,7 @@ from .consent_config import ConsentConfig
|
||||||
from .database import DatabaseConfig
|
from .database import DatabaseConfig
|
||||||
from .emailconfig import EmailConfig
|
from .emailconfig import EmailConfig
|
||||||
from .groups import GroupsConfig
|
from .groups import GroupsConfig
|
||||||
from .jwt import JWTConfig
|
from .jwt_config import JWTConfig
|
||||||
from .key import KeyConfig
|
from .key import KeyConfig
|
||||||
from .logger import LoggingConfig
|
from .logger import LoggingConfig
|
||||||
from .metrics import MetricsConfig
|
from .metrics import MetricsConfig
|
||||||
|
@ -31,6 +31,7 @@ from .push import PushConfig
|
||||||
from .ratelimiting import RatelimitConfig
|
from .ratelimiting import RatelimitConfig
|
||||||
from .registration import RegistrationConfig
|
from .registration import RegistrationConfig
|
||||||
from .repository import ContentRepositoryConfig
|
from .repository import ContentRepositoryConfig
|
||||||
|
from .room_directory import RoomDirectoryConfig
|
||||||
from .saml2 import SAML2Config
|
from .saml2 import SAML2Config
|
||||||
from .server import ServerConfig
|
from .server import ServerConfig
|
||||||
from .server_notices_config import ServerNoticesConfig
|
from .server_notices_config import ServerNoticesConfig
|
||||||
|
@ -49,7 +50,7 @@ class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
WorkerConfig, PasswordAuthProviderConfig, PushConfig,
|
||||||
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
SpamCheckerConfig, GroupsConfig, UserDirectoryConfig,
|
||||||
ConsentConfig,
|
ConsentConfig,
|
||||||
ServerNoticesConfig,
|
ServerNoticesConfig, RoomDirectoryConfig,
|
||||||
):
|
):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
|
@ -227,7 +227,22 @@ def setup_logging(config, use_worker_options=False):
|
||||||
#
|
#
|
||||||
# However this may not be too much of a problem if we are just writing to a file.
|
# However this may not be too much of a problem if we are just writing to a file.
|
||||||
observer = STDLibLogObserver()
|
observer = STDLibLogObserver()
|
||||||
|
|
||||||
|
def _log(event):
|
||||||
|
|
||||||
|
if "log_text" in event:
|
||||||
|
if event["log_text"].startswith("DNSDatagramProtocol starting on "):
|
||||||
|
return
|
||||||
|
|
||||||
|
if event["log_text"].startswith("(UDP Port "):
|
||||||
|
return
|
||||||
|
|
||||||
|
if event["log_text"].startswith("Timing out client"):
|
||||||
|
return
|
||||||
|
|
||||||
|
return observer(event)
|
||||||
|
|
||||||
globalLogBeginner.beginLoggingTo(
|
globalLogBeginner.beginLoggingTo(
|
||||||
[observer],
|
[_log],
|
||||||
redirectStandardIO=not config.no_redirect_stdio,
|
redirectStandardIO=not config.no_redirect_stdio,
|
||||||
)
|
)
|
||||||
|
|
|
@ -15,10 +15,10 @@
|
||||||
|
|
||||||
from distutils.util import strtobool
|
from distutils.util import strtobool
|
||||||
|
|
||||||
|
from synapse.config._base import Config, ConfigError
|
||||||
|
from synapse.types import RoomAlias
|
||||||
from synapse.util.stringutils import random_string_with_symbols
|
from synapse.util.stringutils import random_string_with_symbols
|
||||||
|
|
||||||
from ._base import Config
|
|
||||||
|
|
||||||
|
|
||||||
class RegistrationConfig(Config):
|
class RegistrationConfig(Config):
|
||||||
|
|
||||||
|
@ -52,6 +52,10 @@ class RegistrationConfig(Config):
|
||||||
)
|
)
|
||||||
|
|
||||||
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
self.auto_join_rooms = config.get("auto_join_rooms", [])
|
||||||
|
for room_alias in self.auto_join_rooms:
|
||||||
|
if not RoomAlias.is_valid(room_alias):
|
||||||
|
raise ConfigError('Invalid auto_join_rooms entry %s' % (room_alias,))
|
||||||
|
self.autocreate_auto_join_rooms = config.get("autocreate_auto_join_rooms", True)
|
||||||
|
|
||||||
self.disable_set_displayname = config.get("disable_set_displayname", False)
|
self.disable_set_displayname = config.get("disable_set_displayname", False)
|
||||||
self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
|
self.disable_set_avatar_url = config.get("disable_set_avatar_url", False)
|
||||||
|
@ -150,6 +154,12 @@ class RegistrationConfig(Config):
|
||||||
#auto_join_rooms:
|
#auto_join_rooms:
|
||||||
# - "#example:example.com"
|
# - "#example:example.com"
|
||||||
|
|
||||||
|
# Where auto_join_rooms are specified, setting this flag ensures that the
|
||||||
|
# the rooms exist by creating them when the first user on the
|
||||||
|
# homeserver registers.
|
||||||
|
# Setting to false means that if the rooms are not manually created,
|
||||||
|
# users cannot be auto-joined since they do not exist.
|
||||||
|
autocreate_auto_join_rooms: true
|
||||||
""" % locals()
|
""" % locals()
|
||||||
|
|
||||||
def add_arguments(self, parser):
|
def add_arguments(self, parser):
|
||||||
|
|
|
@ -178,7 +178,7 @@ class ContentRepositoryConfig(Config):
|
||||||
def default_config(self, **kwargs):
|
def default_config(self, **kwargs):
|
||||||
media_store = self.default_path("media_store")
|
media_store = self.default_path("media_store")
|
||||||
uploads_path = self.default_path("uploads")
|
uploads_path = self.default_path("uploads")
|
||||||
return """
|
return r"""
|
||||||
# Directory where uploaded images and attachments are stored.
|
# Directory where uploaded images and attachments are stored.
|
||||||
media_store_path: "%(media_store)s"
|
media_store_path: "%(media_store)s"
|
||||||
|
|
||||||
|
|
102
synapse/config/room_directory.py
Normal file
102
synapse/config/room_directory.py
Normal file
|
@ -0,0 +1,102 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from synapse.util import glob_to_regex
|
||||||
|
|
||||||
|
from ._base import Config, ConfigError
|
||||||
|
|
||||||
|
|
||||||
|
class RoomDirectoryConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
alias_creation_rules = config["alias_creation_rules"]
|
||||||
|
|
||||||
|
self._alias_creation_rules = [
|
||||||
|
_AliasRule(rule)
|
||||||
|
for rule in alias_creation_rules
|
||||||
|
]
|
||||||
|
|
||||||
|
def default_config(self, config_dir_path, server_name, **kwargs):
|
||||||
|
return """
|
||||||
|
# The `alias_creation` option controls who's allowed to create aliases
|
||||||
|
# on this server.
|
||||||
|
#
|
||||||
|
# The format of this option is a list of rules that contain globs that
|
||||||
|
# match against user_id and the new alias (fully qualified with server
|
||||||
|
# name). The action in the first rule that matches is taken, which can
|
||||||
|
# currently either be "allow" or "deny".
|
||||||
|
#
|
||||||
|
# If no rules match the request is denied.
|
||||||
|
alias_creation_rules:
|
||||||
|
- user_id: "*"
|
||||||
|
alias: "*"
|
||||||
|
action: allow
|
||||||
|
"""
|
||||||
|
|
||||||
|
def is_alias_creation_allowed(self, user_id, alias):
|
||||||
|
"""Checks if the given user is allowed to create the given alias
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
alias (str)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
boolean: True if user is allowed to crate the alias
|
||||||
|
"""
|
||||||
|
for rule in self._alias_creation_rules:
|
||||||
|
if rule.matches(user_id, alias):
|
||||||
|
return rule.action == "allow"
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class _AliasRule(object):
|
||||||
|
def __init__(self, rule):
|
||||||
|
action = rule["action"]
|
||||||
|
user_id = rule["user_id"]
|
||||||
|
alias = rule["alias"]
|
||||||
|
|
||||||
|
if action in ("allow", "deny"):
|
||||||
|
self.action = action
|
||||||
|
else:
|
||||||
|
raise ConfigError(
|
||||||
|
"alias_creation_rules rules can only have action of 'allow'"
|
||||||
|
" or 'deny'"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._user_id_regex = glob_to_regex(user_id)
|
||||||
|
self._alias_regex = glob_to_regex(alias)
|
||||||
|
except Exception as e:
|
||||||
|
raise ConfigError("Failed to parse glob into regex: %s", e)
|
||||||
|
|
||||||
|
def matches(self, user_id, alias):
|
||||||
|
"""Tests if this rule matches the given user_id and alias.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id (str)
|
||||||
|
alias (str)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
boolean
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Note: The regexes are anchored at both ends
|
||||||
|
if not self._user_id_regex.match(user_id):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if not self._alias_regex.match(alias):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
|
@ -77,10 +77,15 @@ class ServerConfig(Config):
|
||||||
self.max_mau_value = config.get(
|
self.max_mau_value = config.get(
|
||||||
"max_mau_value", 0,
|
"max_mau_value", 0,
|
||||||
)
|
)
|
||||||
|
|
||||||
self.mau_limits_reserved_threepids = config.get(
|
self.mau_limits_reserved_threepids = config.get(
|
||||||
"mau_limit_reserved_threepids", []
|
"mau_limit_reserved_threepids", []
|
||||||
)
|
)
|
||||||
|
|
||||||
|
self.mau_trial_days = config.get(
|
||||||
|
"mau_trial_days", 0,
|
||||||
|
)
|
||||||
|
|
||||||
# Options to disable HS
|
# Options to disable HS
|
||||||
self.hs_disabled = config.get("hs_disabled", False)
|
self.hs_disabled = config.get("hs_disabled", False)
|
||||||
self.hs_disabled_message = config.get("hs_disabled_message", "")
|
self.hs_disabled_message = config.get("hs_disabled_message", "")
|
||||||
|
@ -88,7 +93,7 @@ class ServerConfig(Config):
|
||||||
|
|
||||||
# Admin uri to direct users at should their instance become blocked
|
# Admin uri to direct users at should their instance become blocked
|
||||||
# due to resource constraints
|
# due to resource constraints
|
||||||
self.admin_uri = config.get("admin_uri", None)
|
self.admin_contact = config.get("admin_contact", None)
|
||||||
|
|
||||||
# FIXME: federation_domain_whitelist needs sytests
|
# FIXME: federation_domain_whitelist needs sytests
|
||||||
self.federation_domain_whitelist = None
|
self.federation_domain_whitelist = None
|
||||||
|
@ -352,7 +357,7 @@ class ServerConfig(Config):
|
||||||
# Homeserver blocking
|
# Homeserver blocking
|
||||||
#
|
#
|
||||||
# How to reach the server admin, used in ResourceLimitError
|
# How to reach the server admin, used in ResourceLimitError
|
||||||
# admin_uri: 'mailto:admin@server.com'
|
# admin_contact: 'mailto:admin@server.com'
|
||||||
#
|
#
|
||||||
# Global block config
|
# Global block config
|
||||||
#
|
#
|
||||||
|
@ -365,6 +370,7 @@ class ServerConfig(Config):
|
||||||
# Enables monthly active user checking
|
# Enables monthly active user checking
|
||||||
# limit_usage_by_mau: False
|
# limit_usage_by_mau: False
|
||||||
# max_mau_value: 50
|
# max_mau_value: 50
|
||||||
|
# mau_trial_days: 2
|
||||||
#
|
#
|
||||||
# Sometimes the server admin will want to ensure certain accounts are
|
# Sometimes the server admin will want to ensure certain accounts are
|
||||||
# never blocked by mau checking. These accounts are specified here.
|
# never blocked by mau checking. These accounts are specified here.
|
||||||
|
@ -398,6 +404,23 @@ class ServerConfig(Config):
|
||||||
" service on the given port.")
|
" service on the given port.")
|
||||||
|
|
||||||
|
|
||||||
|
def is_threepid_reserved(config, threepid):
|
||||||
|
"""Check the threepid against the reserved threepid config
|
||||||
|
Args:
|
||||||
|
config(ServerConfig) - to access server config attributes
|
||||||
|
threepid(dict) - The threepid to test for
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
boolean Is the threepid undertest reserved_user
|
||||||
|
"""
|
||||||
|
|
||||||
|
for tp in config.mau_limits_reserved_threepids:
|
||||||
|
if (threepid['medium'] == tp['medium']
|
||||||
|
and threepid['address'] == tp['address']):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def read_gc_thresholds(thresholds):
|
def read_gc_thresholds(thresholds):
|
||||||
"""Reads the three integer thresholds for garbage collection. Ensures that
|
"""Reads the three integer thresholds for garbage collection. Ensures that
|
||||||
the thresholds are integers if thresholds are supplied.
|
the thresholds are integers if thresholds are supplied.
|
||||||
|
|
|
@ -123,6 +123,6 @@ class ClientTLSOptionsFactory(object):
|
||||||
|
|
||||||
def get_options(self, host):
|
def get_options(self, host):
|
||||||
return ClientTLSOptions(
|
return ClientTLSOptions(
|
||||||
host.decode('utf-8'),
|
host,
|
||||||
CertificateOptions(verify=False).getContext()
|
CertificateOptions(verify=False).getContext()
|
||||||
)
|
)
|
||||||
|
|
|
@ -18,7 +18,9 @@ import logging
|
||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
|
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
|
from twisted.internet.error import ConnectError
|
||||||
from twisted.internet.protocol import Factory
|
from twisted.internet.protocol import Factory
|
||||||
|
from twisted.names.error import DomainError
|
||||||
from twisted.web.http import HTTPClient
|
from twisted.web.http import HTTPClient
|
||||||
|
|
||||||
from synapse.http.endpoint import matrix_federation_endpoint
|
from synapse.http.endpoint import matrix_federation_endpoint
|
||||||
|
@ -47,12 +49,14 @@ def fetch_server_key(server_name, tls_client_options_factory, path=KEY_API_V1):
|
||||||
server_response, server_certificate = yield protocol.remote_key
|
server_response, server_certificate = yield protocol.remote_key
|
||||||
defer.returnValue((server_response, server_certificate))
|
defer.returnValue((server_response, server_certificate))
|
||||||
except SynapseKeyClientError as e:
|
except SynapseKeyClientError as e:
|
||||||
logger.exception("Error getting key for %r" % (server_name,))
|
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||||
if e.status.startswith("4"):
|
if e.status.startswith(b"4"):
|
||||||
# Don't retry for 4xx responses.
|
# Don't retry for 4xx responses.
|
||||||
raise IOError("Cannot get key for %r" % server_name)
|
raise IOError("Cannot get key for %r" % server_name)
|
||||||
except Exception as e:
|
except (ConnectError, DomainError) as e:
|
||||||
logger.exception(e)
|
logger.warn("Error getting key for %r: %s", server_name, e)
|
||||||
|
except Exception:
|
||||||
|
logger.exception("Error getting key for %r", server_name)
|
||||||
raise IOError("Cannot get key for %r" % server_name)
|
raise IOError("Cannot get key for %r" % server_name)
|
||||||
|
|
||||||
|
|
||||||
|
@ -78,6 +82,12 @@ class SynapseKeyClientProtocol(HTTPClient):
|
||||||
self._peer = self.transport.getPeer()
|
self._peer = self.transport.getPeer()
|
||||||
logger.debug("Connected to %s", self._peer)
|
logger.debug("Connected to %s", self._peer)
|
||||||
|
|
||||||
|
if not isinstance(self.path, bytes):
|
||||||
|
self.path = self.path.encode('ascii')
|
||||||
|
|
||||||
|
if not isinstance(self.host, bytes):
|
||||||
|
self.host = self.host.encode('ascii')
|
||||||
|
|
||||||
self.sendCommand(b"GET", self.path)
|
self.sendCommand(b"GET", self.path)
|
||||||
if self.host:
|
if self.host:
|
||||||
self.sendHeader(b"Host", self.host)
|
self.sendHeader(b"Host", self.host)
|
||||||
|
|
|
@ -16,9 +16,10 @@
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
|
from six.moves import urllib
|
||||||
|
|
||||||
from signedjson.key import (
|
from signedjson.key import (
|
||||||
decode_verify_key_bytes,
|
decode_verify_key_bytes,
|
||||||
encode_verify_key_base64,
|
encode_verify_key_base64,
|
||||||
|
@ -40,6 +41,7 @@ from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.crypto.keyclient import fetch_server_key
|
from synapse.crypto.keyclient import fetch_server_key
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
from synapse.util.logcontext import (
|
from synapse.util.logcontext import (
|
||||||
|
LoggingContext,
|
||||||
PreserveLoggingContext,
|
PreserveLoggingContext,
|
||||||
preserve_fn,
|
preserve_fn,
|
||||||
run_in_background,
|
run_in_background,
|
||||||
|
@ -216,23 +218,34 @@ class Keyring(object):
|
||||||
servers have completed. Follows the synapse rules of logcontext
|
servers have completed. Follows the synapse rules of logcontext
|
||||||
preservation.
|
preservation.
|
||||||
"""
|
"""
|
||||||
|
loop_count = 1
|
||||||
while True:
|
while True:
|
||||||
wait_on = [
|
wait_on = [
|
||||||
self.key_downloads[server_name]
|
(server_name, self.key_downloads[server_name])
|
||||||
for server_name in server_names
|
for server_name in server_names
|
||||||
if server_name in self.key_downloads
|
if server_name in self.key_downloads
|
||||||
]
|
]
|
||||||
if wait_on:
|
if not wait_on:
|
||||||
with PreserveLoggingContext():
|
|
||||||
yield defer.DeferredList(wait_on)
|
|
||||||
else:
|
|
||||||
break
|
break
|
||||||
|
logger.info(
|
||||||
|
"Waiting for existing lookups for %s to complete [loop %i]",
|
||||||
|
[w[0] for w in wait_on], loop_count,
|
||||||
|
)
|
||||||
|
with PreserveLoggingContext():
|
||||||
|
yield defer.DeferredList((w[1] for w in wait_on))
|
||||||
|
|
||||||
|
loop_count += 1
|
||||||
|
|
||||||
|
ctx = LoggingContext.current_context()
|
||||||
|
|
||||||
def rm(r, server_name_):
|
def rm(r, server_name_):
|
||||||
self.key_downloads.pop(server_name_, None)
|
with PreserveLoggingContext(ctx):
|
||||||
|
logger.debug("Releasing key lookup lock on %s", server_name_)
|
||||||
|
self.key_downloads.pop(server_name_, None)
|
||||||
return r
|
return r
|
||||||
|
|
||||||
for server_name, deferred in server_to_deferred.items():
|
for server_name, deferred in server_to_deferred.items():
|
||||||
|
logger.debug("Got key lookup lock on %s", server_name)
|
||||||
self.key_downloads[server_name] = deferred
|
self.key_downloads[server_name] = deferred
|
||||||
deferred.addBoth(rm, server_name)
|
deferred.addBoth(rm, server_name)
|
||||||
|
|
||||||
|
@ -432,7 +445,7 @@ class Keyring(object):
|
||||||
# an incoming request.
|
# an incoming request.
|
||||||
query_response = yield self.client.post_json(
|
query_response = yield self.client.post_json(
|
||||||
destination=perspective_name,
|
destination=perspective_name,
|
||||||
path=b"/_matrix/key/v2/query",
|
path="/_matrix/key/v2/query",
|
||||||
data={
|
data={
|
||||||
u"server_keys": {
|
u"server_keys": {
|
||||||
server_name: {
|
server_name: {
|
||||||
|
@ -513,8 +526,8 @@ class Keyring(object):
|
||||||
|
|
||||||
(response, tls_certificate) = yield fetch_server_key(
|
(response, tls_certificate) = yield fetch_server_key(
|
||||||
server_name, self.hs.tls_client_options_factory,
|
server_name, self.hs.tls_client_options_factory,
|
||||||
path=(b"/_matrix/key/v2/server/%s" % (
|
path=("/_matrix/key/v2/server/%s" % (
|
||||||
urllib.quote(requested_key_id),
|
urllib.parse.quote(requested_key_id),
|
||||||
)).encode("ascii"),
|
)).encode("ascii"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -98,9 +98,9 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||||
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
creation_event = auth_events.get((EventTypes.Create, ""), None)
|
||||||
|
|
||||||
if not creation_event:
|
if not creation_event:
|
||||||
raise SynapseError(
|
raise AuthError(
|
||||||
403,
|
403,
|
||||||
"Room %r does not exist" % (event.room_id,)
|
"No create event in auth events",
|
||||||
)
|
)
|
||||||
|
|
||||||
creating_domain = get_domain_from_id(event.room_id)
|
creating_domain = get_domain_from_id(event.room_id)
|
||||||
|
@ -155,10 +155,7 @@ def check(event, auth_events, do_sig_check=True, do_size_check=True):
|
||||||
|
|
||||||
if user_level < invite_level:
|
if user_level < invite_level:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403, (
|
403, "You don't have permission to invite users",
|
||||||
"You cannot issue a third party invite for %s." %
|
|
||||||
(event.content.display_name,)
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.debug("Allowing! %s", event)
|
logger.debug("Allowing! %s", event)
|
||||||
|
@ -305,7 +302,7 @@ def _is_membership_change_allowed(event, auth_events):
|
||||||
|
|
||||||
if user_level < invite_level:
|
if user_level < invite_level:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403, "You cannot invite user %s." % target_user_id
|
403, "You don't have permission to invite users",
|
||||||
)
|
)
|
||||||
elif Membership.JOIN == membership:
|
elif Membership.JOIN == membership:
|
||||||
# Joins are valid iff caller == target and they were:
|
# Joins are valid iff caller == target and they were:
|
||||||
|
@ -693,7 +690,7 @@ def auth_types_for_event(event):
|
||||||
auth_types = []
|
auth_types = []
|
||||||
|
|
||||||
auth_types.append((EventTypes.PowerLevels, "", ))
|
auth_types.append((EventTypes.PowerLevels, "", ))
|
||||||
auth_types.append((EventTypes.Member, event.user_id, ))
|
auth_types.append((EventTypes.Member, event.sender, ))
|
||||||
auth_types.append((EventTypes.Create, "", ))
|
auth_types.append((EventTypes.Create, "", ))
|
||||||
|
|
||||||
if event.type == EventTypes.Member:
|
if event.type == EventTypes.Member:
|
||||||
|
|
|
@ -13,13 +13,22 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
from distutils.util import strtobool
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
from synapse.util.caches import intern_dict
|
from synapse.util.caches import intern_dict
|
||||||
from synapse.util.frozenutils import freeze
|
from synapse.util.frozenutils import freeze
|
||||||
|
|
||||||
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
# Whether we should use frozen_dict in FrozenEvent. Using frozen_dicts prevents
|
||||||
# bugs where we accidentally share e.g. signature dicts. However, converting
|
# bugs where we accidentally share e.g. signature dicts. However, converting a
|
||||||
# a dict to frozen_dicts is expensive.
|
# dict to frozen_dicts is expensive.
|
||||||
USE_FROZEN_DICTS = True
|
#
|
||||||
|
# NOTE: This is overridden by the configuration by the Synapse worker apps, but
|
||||||
|
# for the sake of tests, it is set here while it cannot be configured on the
|
||||||
|
# homeserver object itself.
|
||||||
|
USE_FROZEN_DICTS = strtobool(os.environ.get("SYNAPSE_USE_FROZEN_DICTS", "0"))
|
||||||
|
|
||||||
|
|
||||||
class _EventInternalMetadata(object):
|
class _EventInternalMetadata(object):
|
||||||
|
@ -147,6 +156,9 @@ class EventBase(object):
|
||||||
def items(self):
|
def items(self):
|
||||||
return list(self._event_dict.items())
|
return list(self._event_dict.items())
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
return six.iterkeys(self._event_dict)
|
||||||
|
|
||||||
|
|
||||||
class FrozenEvent(EventBase):
|
class FrozenEvent(EventBase):
|
||||||
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
def __init__(self, event_dict, internal_metadata_dict={}, rejected_reason=None):
|
||||||
|
|
|
@ -13,17 +13,20 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
from twisted.internet.defer import DeferredList
|
||||||
|
|
||||||
from synapse.api.constants import MAX_DEPTH
|
from synapse.api.constants import MAX_DEPTH, EventTypes, Membership
|
||||||
from synapse.api.errors import Codes, SynapseError
|
from synapse.api.errors import Codes, SynapseError
|
||||||
from synapse.crypto.event_signing import check_event_content_hash
|
from synapse.crypto.event_signing import check_event_content_hash
|
||||||
from synapse.events import FrozenEvent
|
from synapse.events import FrozenEvent
|
||||||
from synapse.events.utils import prune_event
|
from synapse.events.utils import prune_event
|
||||||
from synapse.http.servlet import assert_params_in_dict
|
from synapse.http.servlet import assert_params_in_dict
|
||||||
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
@ -133,34 +136,45 @@ class FederationBase(object):
|
||||||
* throws a SynapseError if the signature check failed.
|
* throws a SynapseError if the signature check failed.
|
||||||
The deferreds run their callbacks in the sentinel logcontext.
|
The deferreds run their callbacks in the sentinel logcontext.
|
||||||
"""
|
"""
|
||||||
|
deferreds = _check_sigs_on_pdus(self.keyring, pdus)
|
||||||
redacted_pdus = [
|
|
||||||
prune_event(pdu)
|
|
||||||
for pdu in pdus
|
|
||||||
]
|
|
||||||
|
|
||||||
deferreds = self.keyring.verify_json_objects_for_server([
|
|
||||||
(p.origin, p.get_pdu_json())
|
|
||||||
for p in redacted_pdus
|
|
||||||
])
|
|
||||||
|
|
||||||
ctx = logcontext.LoggingContext.current_context()
|
ctx = logcontext.LoggingContext.current_context()
|
||||||
|
|
||||||
def callback(_, pdu, redacted):
|
def callback(_, pdu):
|
||||||
with logcontext.PreserveLoggingContext(ctx):
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
if not check_event_content_hash(pdu):
|
if not check_event_content_hash(pdu):
|
||||||
logger.warn(
|
# let's try to distinguish between failures because the event was
|
||||||
"Event content has been tampered, redacting %s: %s",
|
# redacted (which are somewhat expected) vs actual ball-tampering
|
||||||
pdu.event_id, pdu.get_pdu_json()
|
# incidents.
|
||||||
)
|
#
|
||||||
return redacted
|
# This is just a heuristic, so we just assume that if the keys are
|
||||||
|
# about the same between the redacted and received events, then the
|
||||||
|
# received event was probably a redacted copy (but we then use our
|
||||||
|
# *actual* redacted copy to be on the safe side.)
|
||||||
|
redacted_event = prune_event(pdu)
|
||||||
|
if (
|
||||||
|
set(redacted_event.keys()) == set(pdu.keys()) and
|
||||||
|
set(six.iterkeys(redacted_event.content))
|
||||||
|
== set(six.iterkeys(pdu.content))
|
||||||
|
):
|
||||||
|
logger.info(
|
||||||
|
"Event %s seems to have been redacted; using our redacted "
|
||||||
|
"copy",
|
||||||
|
pdu.event_id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
"Event %s content has been tampered, redacting",
|
||||||
|
pdu.event_id, pdu.get_pdu_json(),
|
||||||
|
)
|
||||||
|
return redacted_event
|
||||||
|
|
||||||
if self.spam_checker.check_event_for_spam(pdu):
|
if self.spam_checker.check_event_for_spam(pdu):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Event contains spam, redacting %s: %s",
|
"Event contains spam, redacting %s: %s",
|
||||||
pdu.event_id, pdu.get_pdu_json()
|
pdu.event_id, pdu.get_pdu_json()
|
||||||
)
|
)
|
||||||
return redacted
|
return prune_event(pdu)
|
||||||
|
|
||||||
return pdu
|
return pdu
|
||||||
|
|
||||||
|
@ -168,21 +182,121 @@ class FederationBase(object):
|
||||||
failure.trap(SynapseError)
|
failure.trap(SynapseError)
|
||||||
with logcontext.PreserveLoggingContext(ctx):
|
with logcontext.PreserveLoggingContext(ctx):
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Signature check failed for %s",
|
"Signature check failed for %s: %s",
|
||||||
pdu.event_id,
|
pdu.event_id, failure.getErrorMessage(),
|
||||||
)
|
)
|
||||||
return failure
|
return failure
|
||||||
|
|
||||||
for deferred, pdu, redacted in zip(deferreds, pdus, redacted_pdus):
|
for deferred, pdu in zip(deferreds, pdus):
|
||||||
deferred.addCallbacks(
|
deferred.addCallbacks(
|
||||||
callback, errback,
|
callback, errback,
|
||||||
callbackArgs=[pdu, redacted],
|
callbackArgs=[pdu],
|
||||||
errbackArgs=[pdu],
|
errbackArgs=[pdu],
|
||||||
)
|
)
|
||||||
|
|
||||||
return deferreds
|
return deferreds
|
||||||
|
|
||||||
|
|
||||||
|
class PduToCheckSig(namedtuple("PduToCheckSig", [
|
||||||
|
"pdu", "redacted_pdu_json", "event_id_domain", "sender_domain", "deferreds",
|
||||||
|
])):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _check_sigs_on_pdus(keyring, pdus):
|
||||||
|
"""Check that the given events are correctly signed
|
||||||
|
|
||||||
|
Args:
|
||||||
|
keyring (synapse.crypto.Keyring): keyring object to do the checks
|
||||||
|
pdus (Collection[EventBase]): the events to be checked
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[Deferred]: a Deferred for each event in pdus, which will either succeed if
|
||||||
|
the signatures are valid, or fail (with a SynapseError) if not.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# (currently this is written assuming the v1 room structure; we'll probably want a
|
||||||
|
# separate function for checking v2 rooms)
|
||||||
|
|
||||||
|
# we want to check that the event is signed by:
|
||||||
|
#
|
||||||
|
# (a) the server which created the event_id
|
||||||
|
#
|
||||||
|
# (b) the sender's server.
|
||||||
|
#
|
||||||
|
# - except in the case of invites created from a 3pid invite, which are exempt
|
||||||
|
# from this check, because the sender has to match that of the original 3pid
|
||||||
|
# invite, but the event may come from a different HS, for reasons that I don't
|
||||||
|
# entirely grok (why do the senders have to match? and if they do, why doesn't the
|
||||||
|
# joining server ask the inviting server to do the switcheroo with
|
||||||
|
# exchange_third_party_invite?).
|
||||||
|
#
|
||||||
|
# That's pretty awful, since redacting such an invite will render it invalid
|
||||||
|
# (because it will then look like a regular invite without a valid signature),
|
||||||
|
# and signatures are *supposed* to be valid whether or not an event has been
|
||||||
|
# redacted. But this isn't the worst of the ways that 3pid invites are broken.
|
||||||
|
#
|
||||||
|
# let's start by getting the domain for each pdu, and flattening the event back
|
||||||
|
# to JSON.
|
||||||
|
pdus_to_check = [
|
||||||
|
PduToCheckSig(
|
||||||
|
pdu=p,
|
||||||
|
redacted_pdu_json=prune_event(p).get_pdu_json(),
|
||||||
|
event_id_domain=get_domain_from_id(p.event_id),
|
||||||
|
sender_domain=get_domain_from_id(p.sender),
|
||||||
|
deferreds=[],
|
||||||
|
)
|
||||||
|
for p in pdus
|
||||||
|
]
|
||||||
|
|
||||||
|
# first make sure that the event is signed by the event_id's domain
|
||||||
|
deferreds = keyring.verify_json_objects_for_server([
|
||||||
|
(p.event_id_domain, p.redacted_pdu_json)
|
||||||
|
for p in pdus_to_check
|
||||||
|
])
|
||||||
|
|
||||||
|
for p, d in zip(pdus_to_check, deferreds):
|
||||||
|
p.deferreds.append(d)
|
||||||
|
|
||||||
|
# now let's look for events where the sender's domain is different to the
|
||||||
|
# event id's domain (normally only the case for joins/leaves), and add additional
|
||||||
|
# checks.
|
||||||
|
pdus_to_check_sender = [
|
||||||
|
p for p in pdus_to_check
|
||||||
|
if p.sender_domain != p.event_id_domain and not _is_invite_via_3pid(p.pdu)
|
||||||
|
]
|
||||||
|
|
||||||
|
more_deferreds = keyring.verify_json_objects_for_server([
|
||||||
|
(p.sender_domain, p.redacted_pdu_json)
|
||||||
|
for p in pdus_to_check_sender
|
||||||
|
])
|
||||||
|
|
||||||
|
for p, d in zip(pdus_to_check_sender, more_deferreds):
|
||||||
|
p.deferreds.append(d)
|
||||||
|
|
||||||
|
# replace lists of deferreds with single Deferreds
|
||||||
|
return [_flatten_deferred_list(p.deferreds) for p in pdus_to_check]
|
||||||
|
|
||||||
|
|
||||||
|
def _flatten_deferred_list(deferreds):
|
||||||
|
"""Given a list of one or more deferreds, either return the single deferred, or
|
||||||
|
combine into a DeferredList.
|
||||||
|
"""
|
||||||
|
if len(deferreds) > 1:
|
||||||
|
return DeferredList(deferreds, fireOnOneErrback=True, consumeErrors=True)
|
||||||
|
else:
|
||||||
|
assert len(deferreds) == 1
|
||||||
|
return deferreds[0]
|
||||||
|
|
||||||
|
|
||||||
|
def _is_invite_via_3pid(event):
|
||||||
|
return (
|
||||||
|
event.type == EventTypes.Member
|
||||||
|
and event.membership == Membership.INVITE
|
||||||
|
and "third_party_invite" in event.content
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def event_from_pdu_json(pdu_json, outlier=False):
|
def event_from_pdu_json(pdu_json, outlier=False):
|
||||||
"""Construct a FrozenEvent from an event json received over federation
|
"""Construct a FrozenEvent from an event json received over federation
|
||||||
|
|
||||||
|
|
|
@ -66,6 +66,14 @@ class FederationClient(FederationBase):
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
self.transport_layer = hs.get_federation_transport_client()
|
self.transport_layer = hs.get_federation_transport_client()
|
||||||
|
|
||||||
|
self._get_pdu_cache = ExpiringCache(
|
||||||
|
cache_name="get_pdu_cache",
|
||||||
|
clock=self._clock,
|
||||||
|
max_len=1000,
|
||||||
|
expiry_ms=120 * 1000,
|
||||||
|
reset_expiry_on_get=False,
|
||||||
|
)
|
||||||
|
|
||||||
def _clear_tried_cache(self):
|
def _clear_tried_cache(self):
|
||||||
"""Clear pdu_destination_tried cache"""
|
"""Clear pdu_destination_tried cache"""
|
||||||
now = self._clock.time_msec()
|
now = self._clock.time_msec()
|
||||||
|
@ -82,17 +90,6 @@ class FederationClient(FederationBase):
|
||||||
if destination_dict:
|
if destination_dict:
|
||||||
self.pdu_destination_tried[event_id] = destination_dict
|
self.pdu_destination_tried[event_id] = destination_dict
|
||||||
|
|
||||||
def start_get_pdu_cache(self):
|
|
||||||
self._get_pdu_cache = ExpiringCache(
|
|
||||||
cache_name="get_pdu_cache",
|
|
||||||
clock=self._clock,
|
|
||||||
max_len=1000,
|
|
||||||
expiry_ms=120 * 1000,
|
|
||||||
reset_expiry_on_get=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
self._get_pdu_cache.start()
|
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def make_query(self, destination, query_type, args,
|
def make_query(self, destination, query_type, args,
|
||||||
retry_on_dns_fail=False, ignore_backoff=False):
|
retry_on_dns_fail=False, ignore_backoff=False):
|
||||||
|
@ -212,8 +209,6 @@ class FederationClient(FederationBase):
|
||||||
Will attempt to get the PDU from each destination in the list until
|
Will attempt to get the PDU from each destination in the list until
|
||||||
one succeeds.
|
one succeeds.
|
||||||
|
|
||||||
This will persist the PDU locally upon receipt.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
destinations (list): Which home servers to query
|
destinations (list): Which home servers to query
|
||||||
event_id (str): event to fetch
|
event_id (str): event to fetch
|
||||||
|
@ -229,10 +224,9 @@ class FederationClient(FederationBase):
|
||||||
|
|
||||||
# TODO: Rate limit the number of times we try and get the same event.
|
# TODO: Rate limit the number of times we try and get the same event.
|
||||||
|
|
||||||
if self._get_pdu_cache:
|
ev = self._get_pdu_cache.get(event_id)
|
||||||
ev = self._get_pdu_cache.get(event_id)
|
if ev:
|
||||||
if ev:
|
defer.returnValue(ev)
|
||||||
defer.returnValue(ev)
|
|
||||||
|
|
||||||
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
pdu_attempts = self.pdu_destination_tried.setdefault(event_id, {})
|
||||||
|
|
||||||
|
@ -271,10 +265,10 @@ class FederationClient(FederationBase):
|
||||||
event_id, destination, e,
|
event_id, destination, e,
|
||||||
)
|
)
|
||||||
except NotRetryingDestination as e:
|
except NotRetryingDestination as e:
|
||||||
logger.info(e.message)
|
logger.info(str(e))
|
||||||
continue
|
continue
|
||||||
except FederationDeniedError as e:
|
except FederationDeniedError as e:
|
||||||
logger.info(e.message)
|
logger.info(str(e))
|
||||||
continue
|
continue
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
pdu_attempts[destination] = now
|
pdu_attempts[destination] = now
|
||||||
|
@ -285,7 +279,7 @@ class FederationClient(FederationBase):
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self._get_pdu_cache is not None and signed_pdu:
|
if signed_pdu:
|
||||||
self._get_pdu_cache[event_id] = signed_pdu
|
self._get_pdu_cache[event_id] = signed_pdu
|
||||||
|
|
||||||
defer.returnValue(signed_pdu)
|
defer.returnValue(signed_pdu)
|
||||||
|
@ -293,8 +287,7 @@ class FederationClient(FederationBase):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def get_state_for_room(self, destination, room_id, event_id):
|
def get_state_for_room(self, destination, room_id, event_id):
|
||||||
"""Requests all of the `current` state PDUs for a given room from
|
"""Requests all of the room state at a given event from a remote home server.
|
||||||
a remote home server.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
destination (str): The remote homeserver to query for the state.
|
destination (str): The remote homeserver to query for the state.
|
||||||
|
@ -302,9 +295,10 @@ class FederationClient(FederationBase):
|
||||||
event_id (str): The id of the event we want the state at.
|
event_id (str): The id of the event we want the state at.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Results in a list of PDUs.
|
Deferred[Tuple[List[EventBase], List[EventBase]]]:
|
||||||
|
A list of events in the state, and a list of events in the auth chain
|
||||||
|
for the given event.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# First we try and ask for just the IDs, as thats far quicker if
|
# First we try and ask for just the IDs, as thats far quicker if
|
||||||
# we have most of the state and auth_chain already.
|
# we have most of the state and auth_chain already.
|
||||||
|
@ -510,7 +504,7 @@ class FederationClient(FederationBase):
|
||||||
else:
|
else:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Failed to %s via %s: %i %s",
|
"Failed to %s via %s: %i %s",
|
||||||
description, destination, e.code, e.message,
|
description, destination, e.code, e.args[0],
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
|
@ -875,7 +869,7 @@ class FederationClient(FederationBase):
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.exception(
|
logger.exception(
|
||||||
"Failed to send_third_party_invite via %s: %s",
|
"Failed to send_third_party_invite via %s: %s",
|
||||||
destination, e.message
|
destination, str(e)
|
||||||
)
|
)
|
||||||
|
|
||||||
raise RuntimeError("Failed to send to any server.")
|
raise RuntimeError("Failed to send to any server.")
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
from six import iteritems
|
from six import iteritems
|
||||||
|
@ -44,8 +43,10 @@ from synapse.replication.http.federation import (
|
||||||
ReplicationGetQueryRestServlet,
|
ReplicationGetQueryRestServlet,
|
||||||
)
|
)
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
|
from synapse.util import glob_to_regex
|
||||||
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
from synapse.util.async_helpers import Linearizer, concurrently_execute
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
from synapse.util.logcontext import nested_logging_context
|
||||||
from synapse.util.logutils import log_function
|
from synapse.util.logutils import log_function
|
||||||
|
|
||||||
# when processing incoming transactions, we try to handle multiple rooms in
|
# when processing incoming transactions, we try to handle multiple rooms in
|
||||||
|
@ -99,7 +100,7 @@ class FederationServer(FederationBase):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def on_incoming_transaction(self, transaction_data):
|
def on_incoming_transaction(self, origin, transaction_data):
|
||||||
# keep this as early as possible to make the calculated origin ts as
|
# keep this as early as possible to make the calculated origin ts as
|
||||||
# accurate as possible.
|
# accurate as possible.
|
||||||
request_time = self._clock.time_msec()
|
request_time = self._clock.time_msec()
|
||||||
|
@ -108,34 +109,33 @@ class FederationServer(FederationBase):
|
||||||
|
|
||||||
if not transaction.transaction_id:
|
if not transaction.transaction_id:
|
||||||
raise Exception("Transaction missing transaction_id")
|
raise Exception("Transaction missing transaction_id")
|
||||||
if not transaction.origin:
|
|
||||||
raise Exception("Transaction missing origin")
|
|
||||||
|
|
||||||
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
logger.debug("[%s] Got transaction", transaction.transaction_id)
|
||||||
|
|
||||||
# use a linearizer to ensure that we don't process the same transaction
|
# use a linearizer to ensure that we don't process the same transaction
|
||||||
# multiple times in parallel.
|
# multiple times in parallel.
|
||||||
with (yield self._transaction_linearizer.queue(
|
with (yield self._transaction_linearizer.queue(
|
||||||
(transaction.origin, transaction.transaction_id),
|
(origin, transaction.transaction_id),
|
||||||
)):
|
)):
|
||||||
result = yield self._handle_incoming_transaction(
|
result = yield self._handle_incoming_transaction(
|
||||||
transaction, request_time,
|
origin, transaction, request_time,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _handle_incoming_transaction(self, transaction, request_time):
|
def _handle_incoming_transaction(self, origin, transaction, request_time):
|
||||||
""" Process an incoming transaction and return the HTTP response
|
""" Process an incoming transaction and return the HTTP response
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
origin (unicode): the server making the request
|
||||||
transaction (Transaction): incoming transaction
|
transaction (Transaction): incoming transaction
|
||||||
request_time (int): timestamp that the HTTP request arrived at
|
request_time (int): timestamp that the HTTP request arrived at
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred[(int, object)]: http response code and body
|
Deferred[(int, object)]: http response code and body
|
||||||
"""
|
"""
|
||||||
response = yield self.transaction_actions.have_responded(transaction)
|
response = yield self.transaction_actions.have_responded(origin, transaction)
|
||||||
|
|
||||||
if response:
|
if response:
|
||||||
logger.debug(
|
logger.debug(
|
||||||
|
@ -149,7 +149,7 @@ class FederationServer(FederationBase):
|
||||||
|
|
||||||
received_pdus_counter.inc(len(transaction.pdus))
|
received_pdus_counter.inc(len(transaction.pdus))
|
||||||
|
|
||||||
origin_host, _ = parse_server_name(transaction.origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
|
|
||||||
pdus_by_room = {}
|
pdus_by_room = {}
|
||||||
|
|
||||||
|
@ -188,21 +188,22 @@ class FederationServer(FederationBase):
|
||||||
|
|
||||||
for pdu in pdus_by_room[room_id]:
|
for pdu in pdus_by_room[room_id]:
|
||||||
event_id = pdu.event_id
|
event_id = pdu.event_id
|
||||||
try:
|
with nested_logging_context(event_id):
|
||||||
yield self._handle_received_pdu(
|
try:
|
||||||
transaction.origin, pdu
|
yield self._handle_received_pdu(
|
||||||
)
|
origin, pdu
|
||||||
pdu_results[event_id] = {}
|
)
|
||||||
except FederationError as e:
|
pdu_results[event_id] = {}
|
||||||
logger.warn("Error handling PDU %s: %s", event_id, e)
|
except FederationError as e:
|
||||||
pdu_results[event_id] = {"error": str(e)}
|
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||||
except Exception as e:
|
pdu_results[event_id] = {"error": str(e)}
|
||||||
f = failure.Failure()
|
except Exception as e:
|
||||||
pdu_results[event_id] = {"error": str(e)}
|
f = failure.Failure()
|
||||||
logger.error(
|
pdu_results[event_id] = {"error": str(e)}
|
||||||
"Failed to handle PDU %s: %s",
|
logger.error(
|
||||||
event_id, f.getTraceback().rstrip(),
|
"Failed to handle PDU %s: %s",
|
||||||
)
|
event_id, f.getTraceback().rstrip(),
|
||||||
|
)
|
||||||
|
|
||||||
yield concurrently_execute(
|
yield concurrently_execute(
|
||||||
process_pdus_for_room, pdus_by_room.keys(),
|
process_pdus_for_room, pdus_by_room.keys(),
|
||||||
|
@ -212,7 +213,7 @@ class FederationServer(FederationBase):
|
||||||
if hasattr(transaction, "edus"):
|
if hasattr(transaction, "edus"):
|
||||||
for edu in (Edu(**x) for x in transaction.edus):
|
for edu in (Edu(**x) for x in transaction.edus):
|
||||||
yield self.received_edu(
|
yield self.received_edu(
|
||||||
transaction.origin,
|
origin,
|
||||||
edu.edu_type,
|
edu.edu_type,
|
||||||
edu.content
|
edu.content
|
||||||
)
|
)
|
||||||
|
@ -224,6 +225,7 @@ class FederationServer(FederationBase):
|
||||||
logger.debug("Returning: %s", str(response))
|
logger.debug("Returning: %s", str(response))
|
||||||
|
|
||||||
yield self.transaction_actions.set_response(
|
yield self.transaction_actions.set_response(
|
||||||
|
origin,
|
||||||
transaction,
|
transaction,
|
||||||
200, response
|
200, response
|
||||||
)
|
)
|
||||||
|
@ -505,19 +507,19 @@ class FederationServer(FederationBase):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||||
latest_events, limit, min_depth):
|
latest_events, limit):
|
||||||
with (yield self._server_linearizer.queue((origin, room_id))):
|
with (yield self._server_linearizer.queue((origin, room_id))):
|
||||||
origin_host, _ = parse_server_name(origin)
|
origin_host, _ = parse_server_name(origin)
|
||||||
yield self.check_server_matches_acl(origin_host, room_id)
|
yield self.check_server_matches_acl(origin_host, room_id)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||||
" limit: %d, min_depth: %d",
|
" limit: %d",
|
||||||
earliest_events, latest_events, limit, min_depth
|
earliest_events, latest_events, limit,
|
||||||
)
|
)
|
||||||
|
|
||||||
missing_events = yield self.handler.on_get_missing_events(
|
missing_events = yield self.handler.on_get_missing_events(
|
||||||
origin, room_id, earliest_events, latest_events, limit, min_depth
|
origin, room_id, earliest_events, latest_events, limit,
|
||||||
)
|
)
|
||||||
|
|
||||||
if len(missing_events) < 5:
|
if len(missing_events) < 5:
|
||||||
|
@ -618,7 +620,7 @@ class FederationServer(FederationBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self.handler.on_receive_pdu(
|
yield self.handler.on_receive_pdu(
|
||||||
origin, pdu, get_missing=True, sent_to_us_directly=True,
|
origin, pdu, sent_to_us_directly=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
@ -727,22 +729,10 @@ def _acl_entry_matches(server_name, acl_entry):
|
||||||
if not isinstance(acl_entry, six.string_types):
|
if not isinstance(acl_entry, six.string_types):
|
||||||
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
|
logger.warn("Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry))
|
||||||
return False
|
return False
|
||||||
regex = _glob_to_regex(acl_entry)
|
regex = glob_to_regex(acl_entry)
|
||||||
return regex.match(server_name)
|
return regex.match(server_name)
|
||||||
|
|
||||||
|
|
||||||
def _glob_to_regex(glob):
|
|
||||||
res = ''
|
|
||||||
for c in glob:
|
|
||||||
if c == '*':
|
|
||||||
res = res + '.*'
|
|
||||||
elif c == '?':
|
|
||||||
res = res + '.'
|
|
||||||
else:
|
|
||||||
res = res + re.escape(c)
|
|
||||||
return re.compile(res + "\\Z", re.IGNORECASE)
|
|
||||||
|
|
||||||
|
|
||||||
class FederationHandlerRegistry(object):
|
class FederationHandlerRegistry(object):
|
||||||
"""Allows classes to register themselves as handlers for a given EDU or
|
"""Allows classes to register themselves as handlers for a given EDU or
|
||||||
query type for incoming federation traffic.
|
query type for incoming federation traffic.
|
||||||
|
@ -798,7 +788,7 @@ class FederationHandlerRegistry(object):
|
||||||
yield handler(origin, content)
|
yield handler(origin, content)
|
||||||
except SynapseError as e:
|
except SynapseError as e:
|
||||||
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
logger.info("Failed to handle edu %r: %r", edu_type, e)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
logger.exception("Failed to handle edu %r", edu_type)
|
logger.exception("Failed to handle edu %r", edu_type)
|
||||||
|
|
||||||
def on_query(self, query_type, args):
|
def on_query(self, query_type, args):
|
||||||
|
@ -838,9 +828,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||||
)
|
)
|
||||||
|
|
||||||
return self._send_edu(
|
return self._send_edu(
|
||||||
edu_type=edu_type,
|
edu_type=edu_type,
|
||||||
origin=origin,
|
origin=origin,
|
||||||
content=content,
|
content=content,
|
||||||
)
|
)
|
||||||
|
|
||||||
def on_query(self, query_type, args):
|
def on_query(self, query_type, args):
|
||||||
|
@ -851,6 +841,6 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
|
||||||
return handler(args)
|
return handler(args)
|
||||||
|
|
||||||
return self._get_query_client(
|
return self._get_query_client(
|
||||||
query_type=query_type,
|
query_type=query_type,
|
||||||
args=args,
|
args=args,
|
||||||
)
|
)
|
||||||
|
|
|
@ -36,7 +36,7 @@ class TransactionActions(object):
|
||||||
self.store = datastore
|
self.store = datastore
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def have_responded(self, transaction):
|
def have_responded(self, origin, transaction):
|
||||||
""" Have we already responded to a transaction with the same id and
|
""" Have we already responded to a transaction with the same id and
|
||||||
origin?
|
origin?
|
||||||
|
|
||||||
|
@ -50,11 +50,11 @@ class TransactionActions(object):
|
||||||
"transaction_id")
|
"transaction_id")
|
||||||
|
|
||||||
return self.store.get_received_txn_response(
|
return self.store.get_received_txn_response(
|
||||||
transaction.transaction_id, transaction.origin
|
transaction.transaction_id, origin
|
||||||
)
|
)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def set_response(self, transaction, code, response):
|
def set_response(self, origin, transaction, code, response):
|
||||||
""" Persist how we responded to a transaction.
|
""" Persist how we responded to a transaction.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -66,7 +66,7 @@ class TransactionActions(object):
|
||||||
|
|
||||||
return self.store.set_received_txn_response(
|
return self.store.set_received_txn_response(
|
||||||
transaction.transaction_id,
|
transaction.transaction_id,
|
||||||
transaction.origin,
|
origin,
|
||||||
code,
|
code,
|
||||||
response,
|
response,
|
||||||
)
|
)
|
||||||
|
|
|
@ -32,7 +32,7 @@ Events are replicated via a separate events stream.
|
||||||
import logging
|
import logging
|
||||||
from collections import namedtuple
|
from collections import namedtuple
|
||||||
|
|
||||||
from six import iteritems, itervalues
|
from six import iteritems
|
||||||
|
|
||||||
from sortedcontainers import SortedDict
|
from sortedcontainers import SortedDict
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ class FederationRemoteSendQueue(object):
|
||||||
|
|
||||||
user_ids = set(
|
user_ids = set(
|
||||||
user_id
|
user_id
|
||||||
for uids in itervalues(self.presence_changed)
|
for uids in self.presence_changed.values()
|
||||||
for user_id in uids
|
for user_id in uids
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -137,26 +137,6 @@ class TransactionQueue(object):
|
||||||
|
|
||||||
self._processing_pending_presence = False
|
self._processing_pending_presence = False
|
||||||
|
|
||||||
def can_send_to(self, destination):
|
|
||||||
"""Can we send messages to the given server?
|
|
||||||
|
|
||||||
We can't send messages to ourselves. If we are running on localhost
|
|
||||||
then we can only federation with other servers running on localhost.
|
|
||||||
Otherwise we only federate with servers on a public domain.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
destination(str): The server we are possibly trying to send to.
|
|
||||||
Returns:
|
|
||||||
bool: True if we can send to the server.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if destination == self.server_name:
|
|
||||||
return False
|
|
||||||
if self.server_name.startswith("localhost"):
|
|
||||||
return destination.startswith("localhost")
|
|
||||||
else:
|
|
||||||
return not destination.startswith("localhost")
|
|
||||||
|
|
||||||
def notify_new_events(self, current_id):
|
def notify_new_events(self, current_id):
|
||||||
"""This gets called when we have some new events we might want to
|
"""This gets called when we have some new events we might want to
|
||||||
send out to other servers.
|
send out to other servers.
|
||||||
|
@ -279,10 +259,7 @@ class TransactionQueue(object):
|
||||||
self._order += 1
|
self._order += 1
|
||||||
|
|
||||||
destinations = set(destinations)
|
destinations = set(destinations)
|
||||||
destinations = set(
|
destinations.discard(self.server_name)
|
||||||
dest for dest in destinations if self.can_send_to(dest)
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.debug("Sending to: %s", str(destinations))
|
logger.debug("Sending to: %s", str(destinations))
|
||||||
|
|
||||||
if not destinations:
|
if not destinations:
|
||||||
|
@ -358,7 +335,7 @@ class TransactionQueue(object):
|
||||||
|
|
||||||
for destinations, states in hosts_and_states:
|
for destinations, states in hosts_and_states:
|
||||||
for destination in destinations:
|
for destination in destinations:
|
||||||
if not self.can_send_to(destination):
|
if destination == self.server_name:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.pending_presence_by_dest.setdefault(
|
self.pending_presence_by_dest.setdefault(
|
||||||
|
@ -377,7 +354,8 @@ class TransactionQueue(object):
|
||||||
content=content,
|
content=content,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not self.can_send_to(destination):
|
if destination == self.server_name:
|
||||||
|
logger.info("Not sending EDU to ourselves")
|
||||||
return
|
return
|
||||||
|
|
||||||
sent_edus_counter.inc()
|
sent_edus_counter.inc()
|
||||||
|
@ -392,10 +370,8 @@ class TransactionQueue(object):
|
||||||
self._attempt_new_transaction(destination)
|
self._attempt_new_transaction(destination)
|
||||||
|
|
||||||
def send_device_messages(self, destination):
|
def send_device_messages(self, destination):
|
||||||
if destination == self.server_name or destination == "localhost":
|
if destination == self.server_name:
|
||||||
return
|
logger.info("Not sending device update to ourselves")
|
||||||
|
|
||||||
if not self.can_send_to(destination):
|
|
||||||
return
|
return
|
||||||
|
|
||||||
self._attempt_new_transaction(destination)
|
self._attempt_new_transaction(destination)
|
||||||
|
@ -463,7 +439,19 @@ class TransactionQueue(object):
|
||||||
# pending_transactions flag.
|
# pending_transactions flag.
|
||||||
|
|
||||||
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
pending_pdus = self.pending_pdus_by_dest.pop(destination, [])
|
||||||
|
|
||||||
|
# We can only include at most 50 PDUs per transactions
|
||||||
|
pending_pdus, leftover_pdus = pending_pdus[:50], pending_pdus[50:]
|
||||||
|
if leftover_pdus:
|
||||||
|
self.pending_pdus_by_dest[destination] = leftover_pdus
|
||||||
|
|
||||||
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
pending_edus = self.pending_edus_by_dest.pop(destination, [])
|
||||||
|
|
||||||
|
# We can only include at most 100 EDUs per transactions
|
||||||
|
pending_edus, leftover_edus = pending_edus[:100], pending_edus[100:]
|
||||||
|
if leftover_edus:
|
||||||
|
self.pending_edus_by_dest[destination] = leftover_edus
|
||||||
|
|
||||||
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
pending_presence = self.pending_presence_by_dest.pop(destination, {})
|
||||||
|
|
||||||
pending_edus.extend(
|
pending_edus.extend(
|
||||||
|
@ -645,14 +633,6 @@ class TransactionQueue(object):
|
||||||
transaction, json_data_cb
|
transaction, json_data_cb
|
||||||
)
|
)
|
||||||
code = 200
|
code = 200
|
||||||
|
|
||||||
if response:
|
|
||||||
for e_id, r in response.get("pdus", {}).items():
|
|
||||||
if "error" in r:
|
|
||||||
logger.warn(
|
|
||||||
"Transaction returned error for %s: %s",
|
|
||||||
e_id, r,
|
|
||||||
)
|
|
||||||
except HttpResponseException as e:
|
except HttpResponseException as e:
|
||||||
code = e.code
|
code = e.code
|
||||||
response = e.response
|
response = e.response
|
||||||
|
@ -669,19 +649,24 @@ class TransactionQueue(object):
|
||||||
destination, txn_id, code
|
destination, txn_id, code
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("TX [%s] Sent transaction", destination)
|
|
||||||
logger.debug("TX [%s] Marking as delivered...", destination)
|
|
||||||
|
|
||||||
yield self.transaction_actions.delivered(
|
yield self.transaction_actions.delivered(
|
||||||
transaction, code, response
|
transaction, code, response
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("TX [%s] Marked as delivered", destination)
|
logger.debug("TX [%s] {%s} Marked as delivered", destination, txn_id)
|
||||||
|
|
||||||
if code != 200:
|
if code == 200:
|
||||||
|
for e_id, r in response.get("pdus", {}).items():
|
||||||
|
if "error" in r:
|
||||||
|
logger.warn(
|
||||||
|
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||||
|
destination, txn_id, e_id, r,
|
||||||
|
)
|
||||||
|
else:
|
||||||
for p in pdus:
|
for p in pdus:
|
||||||
logger.info(
|
logger.warn(
|
||||||
"Failed to send event %s to %s", p.event_id, destination
|
"TX [%s] {%s} Failed to send event %s",
|
||||||
|
destination, txn_id, p.event_id,
|
||||||
)
|
)
|
||||||
success = False
|
success = False
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,8 @@
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import urllib
|
|
||||||
|
from six.moves import urllib
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
@ -106,7 +107,7 @@ class TransportLayerClient(object):
|
||||||
dest (str)
|
dest (str)
|
||||||
room_id (str)
|
room_id (str)
|
||||||
event_tuples (list)
|
event_tuples (list)
|
||||||
limt (int)
|
limit (int)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Results in a dict received from the remote homeserver.
|
Deferred: Results in a dict received from the remote homeserver.
|
||||||
|
@ -142,9 +143,17 @@ class TransportLayerClient(object):
|
||||||
transaction (Transaction)
|
transaction (Transaction)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred: Results of the deferred is a tuple in the form of
|
Deferred: Succeeds when we get a 2xx HTTP response. The result
|
||||||
(response_code, response_body) where the response_body is a
|
will be the decoded JSON body.
|
||||||
python dict decoded from json
|
|
||||||
|
Fails with ``HTTPRequestException`` if we get an HTTP response
|
||||||
|
code >= 300.
|
||||||
|
|
||||||
|
Fails with ``NotRetryingDestination`` if we are not yet ready
|
||||||
|
to retry this server.
|
||||||
|
|
||||||
|
Fails with ``FederationDeniedError`` if this destination
|
||||||
|
is not on our federation whitelist
|
||||||
"""
|
"""
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"send_data dest=%s, txid=%s",
|
"send_data dest=%s, txid=%s",
|
||||||
|
@ -169,11 +178,6 @@ class TransportLayerClient(object):
|
||||||
backoff_on_404=True, # If we get a 404 the other side has gone
|
backoff_on_404=True, # If we get a 404 the other side has gone
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug(
|
|
||||||
"send_data dest=%s, txid=%s, got response: 200",
|
|
||||||
transaction.destination, transaction.transaction_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
defer.returnValue(response)
|
defer.returnValue(response)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
@ -951,4 +955,4 @@ def _create_path(prefix, path, *args):
|
||||||
Returns:
|
Returns:
|
||||||
str
|
str
|
||||||
"""
|
"""
|
||||||
return prefix + path % tuple(urllib.quote(arg, "") for arg in args)
|
return prefix + path % tuple(urllib.parse.quote(arg, "") for arg in args)
|
||||||
|
|
|
@ -90,8 +90,8 @@ class Authenticator(object):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def authenticate_request(self, request, content):
|
def authenticate_request(self, request, content):
|
||||||
json_request = {
|
json_request = {
|
||||||
"method": request.method,
|
"method": request.method.decode('ascii'),
|
||||||
"uri": request.uri,
|
"uri": request.uri.decode('ascii'),
|
||||||
"destination": self.server_name,
|
"destination": self.server_name,
|
||||||
"signatures": {},
|
"signatures": {},
|
||||||
}
|
}
|
||||||
|
@ -252,7 +252,7 @@ class BaseFederationServlet(object):
|
||||||
by the callback method. None if the request has already been handled.
|
by the callback method. None if the request has already been handled.
|
||||||
"""
|
"""
|
||||||
content = None
|
content = None
|
||||||
if request.method in ["PUT", "POST"]:
|
if request.method in [b"PUT", b"POST"]:
|
||||||
# TODO: Handle other method types? other content types?
|
# TODO: Handle other method types? other content types?
|
||||||
content = parse_json_object_from_request(request)
|
content = parse_json_object_from_request(request)
|
||||||
|
|
||||||
|
@ -261,10 +261,10 @@ class BaseFederationServlet(object):
|
||||||
except NoAuthenticationError:
|
except NoAuthenticationError:
|
||||||
origin = None
|
origin = None
|
||||||
if self.REQUIRE_AUTH:
|
if self.REQUIRE_AUTH:
|
||||||
logger.exception("authenticate_request failed")
|
logger.warn("authenticate_request failed: missing authentication")
|
||||||
raise
|
raise
|
||||||
except Exception:
|
except Exception as e:
|
||||||
logger.exception("authenticate_request failed")
|
logger.warn("authenticate_request failed: %s", e)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
if origin:
|
if origin:
|
||||||
|
@ -353,7 +353,7 @@ class FederationSendServlet(BaseFederationServlet):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
code, response = yield self.handler.on_incoming_transaction(
|
code, response = yield self.handler.on_incoming_transaction(
|
||||||
transaction_data
|
origin, transaction_data,
|
||||||
)
|
)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.exception("on_incoming_transaction failed")
|
logger.exception("on_incoming_transaction failed")
|
||||||
|
@ -386,7 +386,7 @@ class FederationStateServlet(BaseFederationServlet):
|
||||||
return self.handler.on_context_state_request(
|
return self.handler.on_context_state_request(
|
||||||
origin,
|
origin,
|
||||||
context,
|
context,
|
||||||
query.get("event_id", [None])[0],
|
parse_string_from_args(query, "event_id", None),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -397,7 +397,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
|
||||||
return self.handler.on_state_ids_request(
|
return self.handler.on_state_ids_request(
|
||||||
origin,
|
origin,
|
||||||
room_id,
|
room_id,
|
||||||
query.get("event_id", [None])[0],
|
parse_string_from_args(query, "event_id", None),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -405,14 +405,12 @@ class FederationBackfillServlet(BaseFederationServlet):
|
||||||
PATH = "/backfill/(?P<context>[^/]*)/"
|
PATH = "/backfill/(?P<context>[^/]*)/"
|
||||||
|
|
||||||
def on_GET(self, origin, content, query, context):
|
def on_GET(self, origin, content, query, context):
|
||||||
versions = query["v"]
|
versions = [x.decode('ascii') for x in query[b"v"]]
|
||||||
limits = query["limit"]
|
limit = parse_integer_from_args(query, "limit", None)
|
||||||
|
|
||||||
if not limits:
|
if not limit:
|
||||||
return defer.succeed((400, {"error": "Did not include limit param"}))
|
return defer.succeed((400, {"error": "Did not include limit param"}))
|
||||||
|
|
||||||
limit = int(limits[-1])
|
|
||||||
|
|
||||||
return self.handler.on_backfill_request(origin, context, versions, limit)
|
return self.handler.on_backfill_request(origin, context, versions, limit)
|
||||||
|
|
||||||
|
|
||||||
|
@ -423,7 +421,7 @@ class FederationQueryServlet(BaseFederationServlet):
|
||||||
def on_GET(self, origin, content, query, query_type):
|
def on_GET(self, origin, content, query, query_type):
|
||||||
return self.handler.on_query_request(
|
return self.handler.on_query_request(
|
||||||
query_type,
|
query_type,
|
||||||
{k: v[0].decode("utf-8") for k, v in query.items()}
|
{k.decode('utf8'): v[0].decode("utf-8") for k, v in query.items()}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -562,7 +560,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_POST(self, origin, content, query, room_id):
|
def on_POST(self, origin, content, query, room_id):
|
||||||
limit = int(content.get("limit", 10))
|
limit = int(content.get("limit", 10))
|
||||||
min_depth = int(content.get("min_depth", 0))
|
|
||||||
earliest_events = content.get("earliest_events", [])
|
earliest_events = content.get("earliest_events", [])
|
||||||
latest_events = content.get("latest_events", [])
|
latest_events = content.get("latest_events", [])
|
||||||
|
|
||||||
|
@ -571,7 +568,6 @@ class FederationGetMissingEventsServlet(BaseFederationServlet):
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
earliest_events=earliest_events,
|
earliest_events=earliest_events,
|
||||||
latest_events=latest_events,
|
latest_events=latest_events,
|
||||||
min_depth=min_depth,
|
|
||||||
limit=limit,
|
limit=limit,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -630,14 +626,14 @@ class OpenIdUserInfo(BaseFederationServlet):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_GET(self, origin, content, query):
|
def on_GET(self, origin, content, query):
|
||||||
token = query.get("access_token", [None])[0]
|
token = query.get(b"access_token", [None])[0]
|
||||||
if token is None:
|
if token is None:
|
||||||
defer.returnValue((401, {
|
defer.returnValue((401, {
|
||||||
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
"errcode": "M_MISSING_TOKEN", "error": "Access Token required"
|
||||||
}))
|
}))
|
||||||
return
|
return
|
||||||
|
|
||||||
user_id = yield self.handler.on_openid_userinfo(token)
|
user_id = yield self.handler.on_openid_userinfo(token.decode('ascii'))
|
||||||
|
|
||||||
if user_id is None:
|
if user_id is None:
|
||||||
defer.returnValue((401, {
|
defer.returnValue((401, {
|
||||||
|
|
|
@ -28,6 +28,7 @@ from synapse.metrics import (
|
||||||
event_processing_loop_room_count,
|
event_processing_loop_room_count,
|
||||||
)
|
)
|
||||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
|
from synapse.util import log_failure
|
||||||
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
from synapse.util.logcontext import make_deferred_yieldable, run_in_background
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
|
@ -36,17 +37,6 @@ logger = logging.getLogger(__name__)
|
||||||
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
|
events_processed_counter = Counter("synapse_handlers_appservice_events_processed", "")
|
||||||
|
|
||||||
|
|
||||||
def log_failure(failure):
|
|
||||||
logger.error(
|
|
||||||
"Application Services Failure",
|
|
||||||
exc_info=(
|
|
||||||
failure.type,
|
|
||||||
failure.value,
|
|
||||||
failure.getTracebackObject()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class ApplicationServicesHandler(object):
|
class ApplicationServicesHandler(object):
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
|
@ -112,7 +102,10 @@ class ApplicationServicesHandler(object):
|
||||||
|
|
||||||
if not self.started_scheduler:
|
if not self.started_scheduler:
|
||||||
def start_scheduler():
|
def start_scheduler():
|
||||||
return self.scheduler.start().addErrback(log_failure)
|
return self.scheduler.start().addErrback(
|
||||||
|
log_failure, "Application Services Failure",
|
||||||
|
)
|
||||||
|
|
||||||
run_as_background_process("as_scheduler", start_scheduler)
|
run_as_background_process("as_scheduler", start_scheduler)
|
||||||
self.started_scheduler = True
|
self.started_scheduler = True
|
||||||
|
|
||||||
|
|
|
@ -22,7 +22,7 @@ import bcrypt
|
||||||
import pymacaroons
|
import pymacaroons
|
||||||
from canonicaljson import json
|
from canonicaljson import json
|
||||||
|
|
||||||
from twisted.internet import defer, threads
|
from twisted.internet import defer
|
||||||
from twisted.web.client import PartialDownloadError
|
from twisted.web.client import PartialDownloadError
|
||||||
|
|
||||||
import synapse.util.stringutils as stringutils
|
import synapse.util.stringutils as stringutils
|
||||||
|
@ -37,8 +37,8 @@ from synapse.api.errors import (
|
||||||
)
|
)
|
||||||
from synapse.module_api import ModuleApi
|
from synapse.module_api import ModuleApi
|
||||||
from synapse.types import UserID
|
from synapse.types import UserID
|
||||||
|
from synapse.util import logcontext
|
||||||
from synapse.util.caches.expiringcache import ExpiringCache
|
from synapse.util.caches.expiringcache import ExpiringCache
|
||||||
from synapse.util.logcontext import make_deferred_yieldable
|
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|
||||||
|
@ -884,40 +884,32 @@ class AuthHandler(BaseHandler):
|
||||||
bcrypt.gensalt(self.bcrypt_rounds),
|
bcrypt.gensalt(self.bcrypt_rounds),
|
||||||
).decode('ascii')
|
).decode('ascii')
|
||||||
|
|
||||||
return make_deferred_yieldable(
|
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_hash)
|
||||||
threads.deferToThreadPool(
|
|
||||||
self.hs.get_reactor(), self.hs.get_reactor().getThreadPool(), _do_hash
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
def validate_hash(self, password, stored_hash):
|
def validate_hash(self, password, stored_hash):
|
||||||
"""Validates that self.hash(password) == stored_hash.
|
"""Validates that self.hash(password) == stored_hash.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
password (unicode): Password to hash.
|
password (unicode): Password to hash.
|
||||||
stored_hash (unicode): Expected hash value.
|
stored_hash (bytes): Expected hash value.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Deferred(bool): Whether self.hash(password) == stored_hash.
|
Deferred(bool): Whether self.hash(password) == stored_hash.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _do_validate_hash():
|
def _do_validate_hash():
|
||||||
# Normalise the Unicode in the password
|
# Normalise the Unicode in the password
|
||||||
pw = unicodedata.normalize("NFKC", password)
|
pw = unicodedata.normalize("NFKC", password)
|
||||||
|
|
||||||
return bcrypt.checkpw(
|
return bcrypt.checkpw(
|
||||||
pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
|
pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
|
||||||
stored_hash.encode('utf8')
|
stored_hash
|
||||||
)
|
)
|
||||||
|
|
||||||
if stored_hash:
|
if stored_hash:
|
||||||
return make_deferred_yieldable(
|
if not isinstance(stored_hash, bytes):
|
||||||
threads.deferToThreadPool(
|
stored_hash = stored_hash.encode('ascii')
|
||||||
self.hs.get_reactor(),
|
|
||||||
self.hs.get_reactor().getThreadPool(),
|
return logcontext.defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
|
||||||
_do_validate_hash,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
return defer.succeed(False)
|
return defer.succeed(False)
|
||||||
|
|
||||||
|
|
|
@ -17,8 +17,8 @@ import logging
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.errors import SynapseError
|
from synapse.api.errors import SynapseError
|
||||||
|
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||||
from synapse.types import UserID, create_requester
|
from synapse.types import UserID, create_requester
|
||||||
from synapse.util.logcontext import run_in_background
|
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ class DeactivateAccountHandler(BaseHandler):
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
if not self._user_parter_running:
|
if not self._user_parter_running:
|
||||||
run_in_background(self._user_parter_loop)
|
run_as_background_process("user_parter_loop", self._user_parter_loop)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _user_parter_loop(self):
|
def _user_parter_loop(self):
|
||||||
|
|
|
@ -20,7 +20,14 @@ import string
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError
|
from synapse.api.errors import (
|
||||||
|
AuthError,
|
||||||
|
CodeMessageException,
|
||||||
|
Codes,
|
||||||
|
NotFoundError,
|
||||||
|
StoreError,
|
||||||
|
SynapseError,
|
||||||
|
)
|
||||||
from synapse.types import RoomAlias, UserID, get_domain_from_id
|
from synapse.types import RoomAlias, UserID, get_domain_from_id
|
||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
@ -36,6 +43,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
self.state = hs.get_state_handler()
|
self.state = hs.get_state_handler()
|
||||||
self.appservice_handler = hs.get_application_service_handler()
|
self.appservice_handler = hs.get_application_service_handler()
|
||||||
self.event_creation_handler = hs.get_event_creation_handler()
|
self.event_creation_handler = hs.get_event_creation_handler()
|
||||||
|
self.config = hs.config
|
||||||
|
|
||||||
self.federation = hs.get_federation_client()
|
self.federation = hs.get_federation_client()
|
||||||
hs.get_federation_registry().register_query_handler(
|
hs.get_federation_registry().register_query_handler(
|
||||||
|
@ -73,43 +81,75 @@ class DirectoryHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def create_association(self, user_id, room_alias, room_id, servers=None):
|
def create_association(self, requester, room_alias, room_id, servers=None,
|
||||||
# association creation for human users
|
send_event=True):
|
||||||
# TODO(erikj): Do user auth.
|
"""Attempt to create a new alias
|
||||||
|
|
||||||
if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
|
Args:
|
||||||
raise SynapseError(
|
requester (Requester)
|
||||||
403, "This user is not permitted to create this alias",
|
room_alias (RoomAlias)
|
||||||
)
|
room_id (str)
|
||||||
|
servers (list[str]|None): List of servers that others servers
|
||||||
|
should try and join via
|
||||||
|
send_event (bool): Whether to send an updated m.room.aliases event
|
||||||
|
|
||||||
can_create = yield self.can_modify_alias(
|
Returns:
|
||||||
room_alias,
|
Deferred
|
||||||
user_id=user_id
|
"""
|
||||||
)
|
|
||||||
if not can_create:
|
user_id = requester.user.to_string()
|
||||||
raise SynapseError(
|
|
||||||
400, "This alias is reserved by an application service.",
|
service = requester.app_service
|
||||||
errcode=Codes.EXCLUSIVE
|
if service:
|
||||||
|
if not service.is_interested_in_alias(room_alias.to_string()):
|
||||||
|
raise SynapseError(
|
||||||
|
400, "This application service has not reserved"
|
||||||
|
" this kind of alias.", errcode=Codes.EXCLUSIVE
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if not self.spam_checker.user_may_create_room_alias(user_id, room_alias):
|
||||||
|
raise AuthError(
|
||||||
|
403, "This user is not permitted to create this alias",
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.config.is_alias_creation_allowed(user_id, room_alias.to_string()):
|
||||||
|
# Lets just return a generic message, as there may be all sorts of
|
||||||
|
# reasons why we said no. TODO: Allow configurable error messages
|
||||||
|
# per alias creation rule?
|
||||||
|
raise SynapseError(
|
||||||
|
403, "Not allowed to create alias",
|
||||||
|
)
|
||||||
|
|
||||||
|
can_create = yield self.can_modify_alias(
|
||||||
|
room_alias,
|
||||||
|
user_id=user_id
|
||||||
)
|
)
|
||||||
|
if not can_create:
|
||||||
|
raise AuthError(
|
||||||
|
400, "This alias is reserved by an application service.",
|
||||||
|
errcode=Codes.EXCLUSIVE
|
||||||
|
)
|
||||||
|
|
||||||
yield self._create_association(room_alias, room_id, servers, creator=user_id)
|
yield self._create_association(room_alias, room_id, servers, creator=user_id)
|
||||||
|
if send_event:
|
||||||
@defer.inlineCallbacks
|
yield self.send_room_alias_update_event(
|
||||||
def create_appservice_association(self, service, room_alias, room_id,
|
requester,
|
||||||
servers=None):
|
room_id
|
||||||
if not service.is_interested_in_alias(room_alias.to_string()):
|
|
||||||
raise SynapseError(
|
|
||||||
400, "This application service has not reserved"
|
|
||||||
" this kind of alias.", errcode=Codes.EXCLUSIVE
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# association creation for app services
|
|
||||||
yield self._create_association(room_alias, room_id, servers)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_association(self, requester, user_id, room_alias):
|
def delete_association(self, requester, room_alias):
|
||||||
# association deletion for human users
|
# association deletion for human users
|
||||||
|
|
||||||
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
|
user_id = requester.user.to_string()
|
||||||
|
|
||||||
|
try:
|
||||||
|
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
|
||||||
|
except StoreError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
raise NotFoundError("Unknown room alias")
|
||||||
|
raise
|
||||||
|
|
||||||
if not can_delete:
|
if not can_delete:
|
||||||
raise AuthError(
|
raise AuthError(
|
||||||
403, "You don't have permission to delete the alias.",
|
403, "You don't have permission to delete the alias.",
|
||||||
|
@ -130,7 +170,6 @@ class DirectoryHandler(BaseHandler):
|
||||||
try:
|
try:
|
||||||
yield self.send_room_alias_update_event(
|
yield self.send_room_alias_update_event(
|
||||||
requester,
|
requester,
|
||||||
requester.user.to_string(),
|
|
||||||
room_id
|
room_id
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -248,7 +287,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_room_alias_update_event(self, requester, user_id, room_id):
|
def send_room_alias_update_event(self, requester, room_id):
|
||||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||||
|
|
||||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||||
|
@ -257,7 +296,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
"type": EventTypes.Aliases,
|
"type": EventTypes.Aliases,
|
||||||
"state_key": self.hs.hostname,
|
"state_key": self.hs.hostname,
|
||||||
"room_id": room_id,
|
"room_id": room_id,
|
||||||
"sender": user_id,
|
"sender": requester.user.to_string(),
|
||||||
"content": {"aliases": aliases},
|
"content": {"aliases": aliases},
|
||||||
},
|
},
|
||||||
ratelimit=False
|
ratelimit=False
|
||||||
|
@ -320,7 +359,7 @@ class DirectoryHandler(BaseHandler):
|
||||||
def _user_can_delete_alias(self, alias, user_id):
|
def _user_can_delete_alias(self, alias, user_id):
|
||||||
creator = yield self.store.get_room_alias_creator(alias.to_string())
|
creator = yield self.store.get_room_alias_creator(alias.to_string())
|
||||||
|
|
||||||
if creator and creator == user_id:
|
if creator is not None and creator == user_id:
|
||||||
defer.returnValue(True)
|
defer.returnValue(True)
|
||||||
|
|
||||||
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
|
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
|
||||||
|
|
|
@ -330,7 +330,8 @@ class E2eKeysHandler(object):
|
||||||
(algorithm, key_id, ex_json, key)
|
(algorithm, key_id, ex_json, key)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
new_keys.append((algorithm, key_id, encode_canonical_json(key)))
|
new_keys.append((
|
||||||
|
algorithm, key_id, encode_canonical_json(key).decode('ascii')))
|
||||||
|
|
||||||
yield self.store.add_e2e_one_time_keys(
|
yield self.store.add_e2e_one_time_keys(
|
||||||
user_id, device_id, time_now, new_keys
|
user_id, device_id, time_now, new_keys
|
||||||
|
@ -340,7 +341,7 @@ class E2eKeysHandler(object):
|
||||||
def _exception_to_failure(e):
|
def _exception_to_failure(e):
|
||||||
if isinstance(e, CodeMessageException):
|
if isinstance(e, CodeMessageException):
|
||||||
return {
|
return {
|
||||||
"status": e.code, "message": e.message,
|
"status": e.code, "message": str(e),
|
||||||
}
|
}
|
||||||
|
|
||||||
if isinstance(e, NotRetryingDestination):
|
if isinstance(e, NotRetryingDestination):
|
||||||
|
@ -358,7 +359,7 @@ def _exception_to_failure(e):
|
||||||
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
||||||
# give a string for e.message, which json then fails to serialize.
|
# give a string for e.message, which json then fails to serialize.
|
||||||
return {
|
return {
|
||||||
"status": 503, "message": str(e.message),
|
"status": 503, "message": str(e),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
289
synapse/handlers/e2e_room_keys.py
Normal file
289
synapse/handlers/e2e_room_keys.py
Normal file
|
@ -0,0 +1,289 @@
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2017, 2018 New Vector Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from six import iteritems
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from synapse.api.errors import RoomKeysVersionError, StoreError, SynapseError
|
||||||
|
from synapse.util.async_helpers import Linearizer
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class E2eRoomKeysHandler(object):
|
||||||
|
"""
|
||||||
|
Implements an optional realtime backup mechanism for encrypted E2E megolm room keys.
|
||||||
|
This gives a way for users to store and recover their megolm keys if they lose all
|
||||||
|
their clients. It should also extend easily to future room key mechanisms.
|
||||||
|
The actual payload of the encrypted keys is completely opaque to the handler.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
self.store = hs.get_datastore()
|
||||||
|
|
||||||
|
# Used to lock whenever a client is uploading key data. This prevents collisions
|
||||||
|
# between clients trying to upload the details of a new session, given all
|
||||||
|
# clients belonging to a user will receive and try to upload a new session at
|
||||||
|
# roughly the same time. Also used to lock out uploads when the key is being
|
||||||
|
# changed.
|
||||||
|
self._upload_linearizer = Linearizer("upload_room_keys_lock")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_room_keys(self, user_id, version, room_id=None, session_id=None):
|
||||||
|
"""Bulk get the E2E room keys for a given backup, optionally filtered to a given
|
||||||
|
room, or a given session.
|
||||||
|
See EndToEndRoomKeyStore.get_e2e_room_keys for full details.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose keys we're getting
|
||||||
|
version(str): the version ID of the backup we're getting keys from
|
||||||
|
room_id(string): room ID to get keys for, for None to get keys for all rooms
|
||||||
|
session_id(string): session ID to get keys for, for None to get keys for all
|
||||||
|
sessions
|
||||||
|
Returns:
|
||||||
|
A deferred list of dicts giving the session_data and message metadata for
|
||||||
|
these room keys.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# we deliberately take the lock to get keys so that changing the version
|
||||||
|
# works atomically
|
||||||
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
|
results = yield self.store.get_e2e_room_keys(
|
||||||
|
user_id, version, room_id, session_id
|
||||||
|
)
|
||||||
|
|
||||||
|
if results['rooms'] == {}:
|
||||||
|
raise SynapseError(404, "No room_keys found")
|
||||||
|
|
||||||
|
defer.returnValue(results)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def delete_room_keys(self, user_id, version, room_id=None, session_id=None):
|
||||||
|
"""Bulk delete the E2E room keys for a given backup, optionally filtered to a given
|
||||||
|
room or a given session.
|
||||||
|
See EndToEndRoomKeyStore.delete_e2e_room_keys for full details.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose backup we're deleting
|
||||||
|
version(str): the version ID of the backup we're deleting
|
||||||
|
room_id(string): room ID to delete keys for, for None to delete keys for all
|
||||||
|
rooms
|
||||||
|
session_id(string): session ID to delete keys for, for None to delete keys
|
||||||
|
for all sessions
|
||||||
|
Returns:
|
||||||
|
A deferred of the deletion transaction
|
||||||
|
"""
|
||||||
|
|
||||||
|
# lock for consistency with uploading
|
||||||
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
|
yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def upload_room_keys(self, user_id, version, room_keys):
|
||||||
|
"""Bulk upload a list of room keys into a given backup version, asserting
|
||||||
|
that the given version is the current backup version. room_keys are merged
|
||||||
|
into the current backup as described in RoomKeysServlet.on_PUT().
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose backup we're setting
|
||||||
|
version(str): the version ID of the backup we're updating
|
||||||
|
room_keys(dict): a nested dict describing the room_keys we're setting:
|
||||||
|
|
||||||
|
{
|
||||||
|
"rooms": {
|
||||||
|
"!abc:matrix.org": {
|
||||||
|
"sessions": {
|
||||||
|
"c0ff33": {
|
||||||
|
"first_message_index": 1,
|
||||||
|
"forwarded_count": 1,
|
||||||
|
"is_verified": false,
|
||||||
|
"session_data": "SSBBTSBBIEZJU0gK"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError: with code 404 if there are no versions defined
|
||||||
|
RoomKeysVersionError: if the uploaded version is not the current version
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO: Validate the JSON to make sure it has the right keys.
|
||||||
|
|
||||||
|
# XXX: perhaps we should use a finer grained lock here?
|
||||||
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
|
|
||||||
|
# Check that the version we're trying to upload is the current version
|
||||||
|
try:
|
||||||
|
version_info = yield self.store.get_e2e_room_keys_version_info(user_id)
|
||||||
|
except StoreError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
raise SynapseError(404, "Version '%s' not found" % (version,))
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
if version_info['version'] != version:
|
||||||
|
# Check that the version we're trying to upload actually exists
|
||||||
|
try:
|
||||||
|
version_info = yield self.store.get_e2e_room_keys_version_info(
|
||||||
|
user_id, version,
|
||||||
|
)
|
||||||
|
# if we get this far, the version must exist
|
||||||
|
raise RoomKeysVersionError(current_version=version_info['version'])
|
||||||
|
except StoreError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
raise SynapseError(404, "Version '%s' not found" % (version,))
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
# go through the room_keys.
|
||||||
|
# XXX: this should/could be done concurrently, given we're in a lock.
|
||||||
|
for room_id, room in iteritems(room_keys['rooms']):
|
||||||
|
for session_id, session in iteritems(room['sessions']):
|
||||||
|
yield self._upload_room_key(
|
||||||
|
user_id, version, room_id, session_id, session
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _upload_room_key(self, user_id, version, room_id, session_id, room_key):
|
||||||
|
"""Upload a given room_key for a given room and session into a given
|
||||||
|
version of the backup. Merges the key with any which might already exist.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose backup we're setting
|
||||||
|
version(str): the version ID of the backup we're updating
|
||||||
|
room_id(str): the ID of the room whose keys we're setting
|
||||||
|
session_id(str): the session whose room_key we're setting
|
||||||
|
room_key(dict): the room_key being set
|
||||||
|
"""
|
||||||
|
|
||||||
|
# get the room_key for this particular row
|
||||||
|
current_room_key = None
|
||||||
|
try:
|
||||||
|
current_room_key = yield self.store.get_e2e_room_key(
|
||||||
|
user_id, version, room_id, session_id
|
||||||
|
)
|
||||||
|
except StoreError as e:
|
||||||
|
if e.code == 404:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
if self._should_replace_room_key(current_room_key, room_key):
|
||||||
|
yield self.store.set_e2e_room_key(
|
||||||
|
user_id, version, room_id, session_id, room_key
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _should_replace_room_key(current_room_key, room_key):
|
||||||
|
"""
|
||||||
|
Determine whether to replace a given current_room_key (if any)
|
||||||
|
with a newly uploaded room_key backup
|
||||||
|
|
||||||
|
Args:
|
||||||
|
current_room_key (dict): Optional, the current room_key dict if any
|
||||||
|
room_key (dict): The new room_key dict which may or may not be fit to
|
||||||
|
replace the current_room_key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if current_room_key should be replaced by room_key in the backup
|
||||||
|
"""
|
||||||
|
|
||||||
|
if current_room_key:
|
||||||
|
# spelt out with if/elifs rather than nested boolean expressions
|
||||||
|
# purely for legibility.
|
||||||
|
|
||||||
|
if room_key['is_verified'] and not current_room_key['is_verified']:
|
||||||
|
return True
|
||||||
|
elif (
|
||||||
|
room_key['first_message_index'] <
|
||||||
|
current_room_key['first_message_index']
|
||||||
|
):
|
||||||
|
return True
|
||||||
|
elif room_key['forwarded_count'] < current_room_key['forwarded_count']:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def create_version(self, user_id, version_info):
|
||||||
|
"""Create a new backup version. This automatically becomes the new
|
||||||
|
backup version for the user's keys; previous backups will no longer be
|
||||||
|
writeable to.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose backup version we're creating
|
||||||
|
version_info(dict): metadata about the new version being created
|
||||||
|
|
||||||
|
{
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
|
||||||
|
}
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A deferred of a string that gives the new version number.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO: Validate the JSON to make sure it has the right keys.
|
||||||
|
|
||||||
|
# lock everyone out until we've switched version
|
||||||
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
|
new_version = yield self.store.create_e2e_room_keys_version(
|
||||||
|
user_id, version_info
|
||||||
|
)
|
||||||
|
defer.returnValue(new_version)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_version_info(self, user_id, version=None):
|
||||||
|
"""Get the info about a given version of the user's backup
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose current backup version we're querying
|
||||||
|
version(str): Optional; if None gives the most recent version
|
||||||
|
otherwise a historical one.
|
||||||
|
Raises:
|
||||||
|
StoreError: code 404 if the requested backup version doesn't exist
|
||||||
|
Returns:
|
||||||
|
A deferred of a info dict that gives the info about the new version.
|
||||||
|
|
||||||
|
{
|
||||||
|
"version": "1234",
|
||||||
|
"algorithm": "m.megolm_backup.v1",
|
||||||
|
"auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K"
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
|
res = yield self.store.get_e2e_room_keys_version_info(user_id, version)
|
||||||
|
defer.returnValue(res)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def delete_version(self, user_id, version=None):
|
||||||
|
"""Deletes a given version of the user's e2e_room_keys backup
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user_id(str): the user whose current backup version we're deleting
|
||||||
|
version(str): the version id of the backup being deleted
|
||||||
|
Raises:
|
||||||
|
StoreError: code 404 if this backup version doesn't exist
|
||||||
|
"""
|
||||||
|
|
||||||
|
with (yield self._upload_linearizer.queue(user_id)):
|
||||||
|
yield self.store.delete_e2e_room_keys_version(user_id, version)
|
|
@ -18,7 +18,6 @@
|
||||||
|
|
||||||
import itertools
|
import itertools
|
||||||
import logging
|
import logging
|
||||||
import sys
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
from six import iteritems, itervalues
|
from six import iteritems, itervalues
|
||||||
|
@ -54,7 +53,7 @@ from synapse.replication.http.federation import (
|
||||||
ReplicationFederationSendEventsRestServlet,
|
ReplicationFederationSendEventsRestServlet,
|
||||||
)
|
)
|
||||||
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
from synapse.replication.http.membership import ReplicationUserJoinedLeftRoomRestServlet
|
||||||
from synapse.state import resolve_events_with_factory
|
from synapse.state import StateResolutionStore, resolve_events_with_store
|
||||||
from synapse.types import UserID, get_domain_from_id
|
from synapse.types import UserID, get_domain_from_id
|
||||||
from synapse.util import logcontext, unwrapFirstError
|
from synapse.util import logcontext, unwrapFirstError
|
||||||
from synapse.util.async_helpers import Linearizer
|
from synapse.util.async_helpers import Linearizer
|
||||||
|
@ -69,6 +68,27 @@ from ._base import BaseHandler
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def shortstr(iterable, maxitems=5):
|
||||||
|
"""If iterable has maxitems or fewer, return the stringification of a list
|
||||||
|
containing those items.
|
||||||
|
|
||||||
|
Otherwise, return the stringification of a a list with the first maxitems items,
|
||||||
|
followed by "...".
|
||||||
|
|
||||||
|
Args:
|
||||||
|
iterable (Iterable): iterable to truncate
|
||||||
|
maxitems (int): number of items to return before truncating
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
unicode
|
||||||
|
"""
|
||||||
|
|
||||||
|
items = list(itertools.islice(iterable, maxitems + 1))
|
||||||
|
if len(items) <= maxitems:
|
||||||
|
return str(items)
|
||||||
|
return u"[" + u", ".join(repr(r) for r in items[:maxitems]) + u", ...]"
|
||||||
|
|
||||||
|
|
||||||
class FederationHandler(BaseHandler):
|
class FederationHandler(BaseHandler):
|
||||||
"""Handles events that originated from federation.
|
"""Handles events that originated from federation.
|
||||||
Responsible for:
|
Responsible for:
|
||||||
|
@ -85,7 +105,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
self.hs = hs
|
self.hs = hs
|
||||||
|
|
||||||
self.store = hs.get_datastore()
|
self.store = hs.get_datastore() # type: synapse.storage.DataStore
|
||||||
self.federation_client = hs.get_federation_client()
|
self.federation_client = hs.get_federation_client()
|
||||||
self.state_handler = hs.get_state_handler()
|
self.state_handler = hs.get_state_handler()
|
||||||
self.server_name = hs.hostname
|
self.server_name = hs.hostname
|
||||||
|
@ -114,9 +134,8 @@ class FederationHandler(BaseHandler):
|
||||||
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
self._room_pdu_linearizer = Linearizer("fed_room_pdu")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
|
||||||
def on_receive_pdu(
|
def on_receive_pdu(
|
||||||
self, origin, pdu, get_missing=True, sent_to_us_directly=False,
|
self, origin, pdu, sent_to_us_directly=False,
|
||||||
):
|
):
|
||||||
""" Process a PDU received via a federation /send/ transaction, or
|
""" Process a PDU received via a federation /send/ transaction, or
|
||||||
via backfill of missing prev_events
|
via backfill of missing prev_events
|
||||||
|
@ -125,14 +144,23 @@ class FederationHandler(BaseHandler):
|
||||||
origin (str): server which initiated the /send/ transaction. Will
|
origin (str): server which initiated the /send/ transaction. Will
|
||||||
be used to fetch missing events or state.
|
be used to fetch missing events or state.
|
||||||
pdu (FrozenEvent): received PDU
|
pdu (FrozenEvent): received PDU
|
||||||
get_missing (bool): True if we should fetch missing prev_events
|
sent_to_us_directly (bool): True if this event was pushed to us; False if
|
||||||
|
we pulled it as the result of a missing prev_event.
|
||||||
|
|
||||||
Returns (Deferred): completes with None
|
Returns (Deferred): completes with None
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
room_id = pdu.room_id
|
||||||
|
event_id = pdu.event_id
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"[%s %s] handling received PDU: %s",
|
||||||
|
room_id, event_id, pdu,
|
||||||
|
)
|
||||||
|
|
||||||
# We reprocess pdus when we have seen them only as outliers
|
# We reprocess pdus when we have seen them only as outliers
|
||||||
existing = yield self.store.get_event(
|
existing = yield self.store.get_event(
|
||||||
pdu.event_id,
|
event_id,
|
||||||
allow_none=True,
|
allow_none=True,
|
||||||
allow_rejected=True,
|
allow_rejected=True,
|
||||||
)
|
)
|
||||||
|
@ -147,7 +175,7 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
if already_seen:
|
if already_seen:
|
||||||
logger.debug("Already seen pdu %s", pdu.event_id)
|
logger.debug("[%s %s]: Already seen pdu", room_id, event_id)
|
||||||
return
|
return
|
||||||
|
|
||||||
# do some initial sanity-checking of the event. In particular, make
|
# do some initial sanity-checking of the event. In particular, make
|
||||||
|
@ -156,6 +184,7 @@ class FederationHandler(BaseHandler):
|
||||||
try:
|
try:
|
||||||
self._sanity_check_event(pdu)
|
self._sanity_check_event(pdu)
|
||||||
except SynapseError as err:
|
except SynapseError as err:
|
||||||
|
logger.warn("[%s %s] Received event failed sanity checks", room_id, event_id)
|
||||||
raise FederationError(
|
raise FederationError(
|
||||||
"ERROR",
|
"ERROR",
|
||||||
err.code,
|
err.code,
|
||||||
|
@ -165,10 +194,12 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
# If we are currently in the process of joining this room, then we
|
# If we are currently in the process of joining this room, then we
|
||||||
# queue up events for later processing.
|
# queue up events for later processing.
|
||||||
if pdu.room_id in self.room_queues:
|
if room_id in self.room_queues:
|
||||||
logger.info("Ignoring PDU %s for room %s from %s for now; join "
|
logger.info(
|
||||||
"in progress", pdu.event_id, pdu.room_id, origin)
|
"[%s %s] Queuing PDU from %s for now: join in progress",
|
||||||
self.room_queues[pdu.room_id].append((pdu, origin))
|
room_id, event_id, origin,
|
||||||
|
)
|
||||||
|
self.room_queues[room_id].append((pdu, origin))
|
||||||
return
|
return
|
||||||
|
|
||||||
# If we're no longer in the room just ditch the event entirely. This
|
# If we're no longer in the room just ditch the event entirely. This
|
||||||
|
@ -179,7 +210,7 @@ class FederationHandler(BaseHandler):
|
||||||
# we should check if we *are* in fact in the room. If we are then we
|
# we should check if we *are* in fact in the room. If we are then we
|
||||||
# can magically rejoin the room.
|
# can magically rejoin the room.
|
||||||
is_in_room = yield self.auth.check_host_in_room(
|
is_in_room = yield self.auth.check_host_in_room(
|
||||||
pdu.room_id,
|
room_id,
|
||||||
self.server_name
|
self.server_name
|
||||||
)
|
)
|
||||||
if not is_in_room:
|
if not is_in_room:
|
||||||
|
@ -188,8 +219,8 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
if was_in_room:
|
if was_in_room:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Ignoring PDU %s for room %s from %s as we've left the room!",
|
"[%s %s] Ignoring PDU from %s as we've left the room",
|
||||||
pdu.event_id, pdu.room_id, origin,
|
room_id, event_id, origin,
|
||||||
)
|
)
|
||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
@ -204,8 +235,8 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
"_handle_new_pdu min_depth for %s: %d",
|
"[%s %s] min_depth: %d",
|
||||||
pdu.room_id, min_depth
|
room_id, event_id, min_depth,
|
||||||
)
|
)
|
||||||
|
|
||||||
prevs = {e_id for e_id, _ in pdu.prev_events}
|
prevs = {e_id for e_id, _ in pdu.prev_events}
|
||||||
|
@ -218,17 +249,18 @@ class FederationHandler(BaseHandler):
|
||||||
# send to the clients.
|
# send to the clients.
|
||||||
pdu.internal_metadata.outlier = True
|
pdu.internal_metadata.outlier = True
|
||||||
elif min_depth and pdu.depth > min_depth:
|
elif min_depth and pdu.depth > min_depth:
|
||||||
if get_missing and prevs - seen:
|
missing_prevs = prevs - seen
|
||||||
|
if sent_to_us_directly and missing_prevs:
|
||||||
# If we're missing stuff, ensure we only fetch stuff one
|
# If we're missing stuff, ensure we only fetch stuff one
|
||||||
# at a time.
|
# at a time.
|
||||||
logger.info(
|
logger.info(
|
||||||
"Acquiring lock for room %r to fetch %d missing events: %r...",
|
"[%s %s] Acquiring room lock to fetch %d missing prev_events: %s",
|
||||||
pdu.room_id, len(prevs - seen), list(prevs - seen)[:5],
|
room_id, event_id, len(missing_prevs), shortstr(missing_prevs),
|
||||||
)
|
)
|
||||||
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
with (yield self._room_pdu_linearizer.queue(pdu.room_id)):
|
||||||
logger.info(
|
logger.info(
|
||||||
"Acquired lock for room %r to fetch %d missing events",
|
"[%s %s] Acquired room lock to fetch %d missing prev_events",
|
||||||
pdu.room_id, len(prevs - seen),
|
room_id, event_id, len(missing_prevs),
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self._get_missing_events_for_pdu(
|
yield self._get_missing_events_for_pdu(
|
||||||
|
@ -241,68 +273,150 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
if not prevs - seen:
|
if not prevs - seen:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Found all missing prev events for %s", pdu.event_id
|
"[%s %s] Found all missing prev_events",
|
||||||
|
room_id, event_id,
|
||||||
)
|
)
|
||||||
elif prevs - seen:
|
elif missing_prevs:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Not fetching %d missing events for room %r,event %s: %r...",
|
"[%s %s] Not recursively fetching %d missing prev_events: %s",
|
||||||
len(prevs - seen), pdu.room_id, pdu.event_id,
|
room_id, event_id, len(missing_prevs), shortstr(missing_prevs),
|
||||||
list(prevs - seen)[:5],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if sent_to_us_directly and prevs - seen:
|
if prevs - seen:
|
||||||
# If they have sent it to us directly, and the server
|
# We've still not been able to get all of the prev_events for this event.
|
||||||
# isn't telling us about the auth events that it's
|
#
|
||||||
# made a message referencing, we explode
|
# In this case, we need to fall back to asking another server in the
|
||||||
raise FederationError(
|
# federation for the state at this event. That's ok provided we then
|
||||||
"ERROR",
|
# resolve the state against other bits of the DAG before using it (which
|
||||||
403,
|
# will ensure that you can't just take over a room by sending an event,
|
||||||
(
|
# withholding its prev_events, and declaring yourself to be an admin in
|
||||||
"Your server isn't divulging details about prev_events "
|
# the subsequent state request).
|
||||||
"referenced in this event."
|
#
|
||||||
),
|
# Now, if we're pulling this event as a missing prev_event, then clearly
|
||||||
affected=pdu.event_id,
|
# this event is not going to become the only forward-extremity and we are
|
||||||
)
|
# guaranteed to resolve its state against our existing forward
|
||||||
elif prevs - seen:
|
# extremities, so that should be fine.
|
||||||
# Calculate the state of the previous events, and
|
#
|
||||||
# de-conflict them to find the current state.
|
# On the other hand, if this event was pushed to us, it is possible for
|
||||||
state_groups = []
|
# it to become the only forward-extremity in the room, and we would then
|
||||||
|
# trust its state to be the state for the whole room. This is very bad.
|
||||||
|
# Further, if the event was pushed to us, there is no excuse for us not to
|
||||||
|
# have all the prev_events. We therefore reject any such events.
|
||||||
|
#
|
||||||
|
# XXX this really feels like it could/should be merged with the above,
|
||||||
|
# but there is an interaction with min_depth that I'm not really
|
||||||
|
# following.
|
||||||
|
|
||||||
|
if sent_to_us_directly:
|
||||||
|
logger.warn(
|
||||||
|
"[%s %s] Rejecting: failed to fetch %d prev events: %s",
|
||||||
|
room_id, event_id, len(prevs - seen), shortstr(prevs - seen)
|
||||||
|
)
|
||||||
|
raise FederationError(
|
||||||
|
"ERROR",
|
||||||
|
403,
|
||||||
|
(
|
||||||
|
"Your server isn't divulging details about prev_events "
|
||||||
|
"referenced in this event."
|
||||||
|
),
|
||||||
|
affected=pdu.event_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate the state after each of the previous events, and
|
||||||
|
# resolve them to find the correct state at the current event.
|
||||||
auth_chains = set()
|
auth_chains = set()
|
||||||
|
event_map = {
|
||||||
|
event_id: pdu,
|
||||||
|
}
|
||||||
try:
|
try:
|
||||||
# Get the state of the events we know about
|
# Get the state of the events we know about
|
||||||
ours = yield self.store.get_state_groups(pdu.room_id, list(seen))
|
ours = yield self.store.get_state_groups_ids(room_id, seen)
|
||||||
state_groups.append(ours)
|
|
||||||
|
# state_maps is a list of mappings from (type, state_key) to event_id
|
||||||
|
# type: list[dict[tuple[str, str], str]]
|
||||||
|
state_maps = list(ours.values())
|
||||||
|
|
||||||
|
# we don't need this any more, let's delete it.
|
||||||
|
del ours
|
||||||
|
|
||||||
# Ask the remote server for the states we don't
|
# Ask the remote server for the states we don't
|
||||||
# know about
|
# know about
|
||||||
for p in prevs - seen:
|
for p in prevs - seen:
|
||||||
state, got_auth_chain = (
|
logger.info(
|
||||||
yield self.federation_client.get_state_for_room(
|
"[%s %s] Requesting state at missing prev_event %s",
|
||||||
origin, pdu.room_id, p
|
room_id, event_id, p,
|
||||||
|
)
|
||||||
|
|
||||||
|
with logcontext.nested_logging_context(p):
|
||||||
|
# note that if any of the missing prevs share missing state or
|
||||||
|
# auth events, the requests to fetch those events are deduped
|
||||||
|
# by the get_pdu_cache in federation_client.
|
||||||
|
remote_state, got_auth_chain = (
|
||||||
|
yield self.federation_client.get_state_for_room(
|
||||||
|
origin, room_id, p,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
)
|
|
||||||
auth_chains.update(got_auth_chain)
|
|
||||||
state_group = {(x.type, x.state_key): x.event_id for x in state}
|
|
||||||
state_groups.append(state_group)
|
|
||||||
|
|
||||||
# Resolve any conflicting state
|
# we want the state *after* p; get_state_for_room returns the
|
||||||
def fetch(ev_ids):
|
# state *before* p.
|
||||||
return self.store.get_events(
|
remote_event = yield self.federation_client.get_pdu(
|
||||||
ev_ids, get_prev_content=False, check_redacted=False
|
[origin], p, outlier=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
state_map = yield resolve_events_with_factory(
|
if remote_event is None:
|
||||||
state_groups, {pdu.event_id: pdu}, fetch
|
raise Exception(
|
||||||
|
"Unable to get missing prev_event %s" % (p, )
|
||||||
|
)
|
||||||
|
|
||||||
|
if remote_event.is_state():
|
||||||
|
remote_state.append(remote_event)
|
||||||
|
|
||||||
|
# XXX hrm I'm not convinced that duplicate events will compare
|
||||||
|
# for equality, so I'm not sure this does what the author
|
||||||
|
# hoped.
|
||||||
|
auth_chains.update(got_auth_chain)
|
||||||
|
|
||||||
|
remote_state_map = {
|
||||||
|
(x.type, x.state_key): x.event_id for x in remote_state
|
||||||
|
}
|
||||||
|
state_maps.append(remote_state_map)
|
||||||
|
|
||||||
|
for x in remote_state:
|
||||||
|
event_map[x.event_id] = x
|
||||||
|
|
||||||
|
room_version = yield self.store.get_room_version(room_id)
|
||||||
|
state_map = yield resolve_events_with_store(
|
||||||
|
room_version, state_maps, event_map,
|
||||||
|
state_res_store=StateResolutionStore(self.store),
|
||||||
)
|
)
|
||||||
|
|
||||||
state = (yield self.store.get_events(state_map.values())).values()
|
# We need to give _process_received_pdu the actual state events
|
||||||
|
# rather than event ids, so generate that now.
|
||||||
|
|
||||||
|
# First though we need to fetch all the events that are in
|
||||||
|
# state_map, so we can build up the state below.
|
||||||
|
evs = yield self.store.get_events(
|
||||||
|
list(state_map.values()),
|
||||||
|
get_prev_content=False,
|
||||||
|
check_redacted=False,
|
||||||
|
)
|
||||||
|
event_map.update(evs)
|
||||||
|
|
||||||
|
state = [
|
||||||
|
event_map[e] for e in six.itervalues(state_map)
|
||||||
|
]
|
||||||
auth_chain = list(auth_chains)
|
auth_chain = list(auth_chains)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
logger.warn(
|
||||||
|
"[%s %s] Error attempting to resolve state at missing "
|
||||||
|
"prev_events",
|
||||||
|
room_id, event_id, exc_info=True,
|
||||||
|
)
|
||||||
raise FederationError(
|
raise FederationError(
|
||||||
"ERROR",
|
"ERROR",
|
||||||
403,
|
403,
|
||||||
"We can't get valid state history.",
|
"We can't get valid state history.",
|
||||||
affected=pdu.event_id,
|
affected=event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield self._process_received_pdu(
|
yield self._process_received_pdu(
|
||||||
|
@ -321,15 +435,16 @@ class FederationHandler(BaseHandler):
|
||||||
prevs (set(str)): List of event ids which we are missing
|
prevs (set(str)): List of event ids which we are missing
|
||||||
min_depth (int): Minimum depth of events to return.
|
min_depth (int): Minimum depth of events to return.
|
||||||
"""
|
"""
|
||||||
# We recalculate seen, since it may have changed.
|
|
||||||
|
room_id = pdu.room_id
|
||||||
|
event_id = pdu.event_id
|
||||||
|
|
||||||
seen = yield self.store.have_seen_events(prevs)
|
seen = yield self.store.have_seen_events(prevs)
|
||||||
|
|
||||||
if not prevs - seen:
|
if not prevs - seen:
|
||||||
return
|
return
|
||||||
|
|
||||||
latest = yield self.store.get_latest_event_ids_in_room(
|
latest = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||||
pdu.room_id
|
|
||||||
)
|
|
||||||
|
|
||||||
# We add the prev events that we have seen to the latest
|
# We add the prev events that we have seen to the latest
|
||||||
# list to ensure the remote server doesn't give them to us
|
# list to ensure the remote server doesn't give them to us
|
||||||
|
@ -337,8 +452,8 @@ class FederationHandler(BaseHandler):
|
||||||
latest |= seen
|
latest |= seen
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Missing %d events for room %r pdu %s: %r...",
|
"[%s %s]: Requesting missing events between %s and %s",
|
||||||
len(prevs - seen), pdu.room_id, pdu.event_id, list(prevs - seen)[:5]
|
room_id, event_id, shortstr(latest), event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: we set timeout to 10s to help workaround
|
# XXX: we set timeout to 10s to help workaround
|
||||||
|
@ -359,49 +474,88 @@ class FederationHandler(BaseHandler):
|
||||||
# apparently.
|
# apparently.
|
||||||
#
|
#
|
||||||
# see https://github.com/matrix-org/synapse/pull/1744
|
# see https://github.com/matrix-org/synapse/pull/1744
|
||||||
|
#
|
||||||
|
# ----
|
||||||
|
#
|
||||||
|
# Update richvdh 2018/09/18: There are a number of problems with timing this
|
||||||
|
# request out agressively on the client side:
|
||||||
|
#
|
||||||
|
# - it plays badly with the server-side rate-limiter, which starts tarpitting you
|
||||||
|
# if you send too many requests at once, so you end up with the server carefully
|
||||||
|
# working through the backlog of your requests, which you have already timed
|
||||||
|
# out.
|
||||||
|
#
|
||||||
|
# - for this request in particular, we now (as of
|
||||||
|
# https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
|
||||||
|
# server can't produce a plausible-looking set of prev_events - so we becone
|
||||||
|
# much more likely to reject the event.
|
||||||
|
#
|
||||||
|
# - contrary to what it says above, we do *not* fall back to fetching fresh state
|
||||||
|
# for the room if get_missing_events times out. Rather, we give up processing
|
||||||
|
# the PDU whose prevs we are missing, which then makes it much more likely that
|
||||||
|
# we'll end up back here for the *next* PDU in the list, which exacerbates the
|
||||||
|
# problem.
|
||||||
|
#
|
||||||
|
# - the agressive 10s timeout was introduced to deal with incoming federation
|
||||||
|
# requests taking 8 hours to process. It's not entirely clear why that was going
|
||||||
|
# on; certainly there were other issues causing traffic storms which are now
|
||||||
|
# resolved, and I think in any case we may be more sensible about our locking
|
||||||
|
# now. We're *certainly* more sensible about our logging.
|
||||||
|
#
|
||||||
|
# All that said: Let's try increasing the timout to 60s and see what happens.
|
||||||
|
|
||||||
missing_events = yield self.federation_client.get_missing_events(
|
missing_events = yield self.federation_client.get_missing_events(
|
||||||
origin,
|
origin,
|
||||||
pdu.room_id,
|
room_id,
|
||||||
earliest_events_ids=list(latest),
|
earliest_events_ids=list(latest),
|
||||||
latest_events=[pdu],
|
latest_events=[pdu],
|
||||||
limit=10,
|
limit=10,
|
||||||
min_depth=min_depth,
|
min_depth=min_depth,
|
||||||
timeout=10000,
|
timeout=60000,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
"Got %d events: %r...",
|
"[%s %s]: Got %d prev_events: %s",
|
||||||
len(missing_events), [e.event_id for e in missing_events[:5]]
|
room_id, event_id, len(missing_events), shortstr(missing_events),
|
||||||
)
|
)
|
||||||
|
|
||||||
# We want to sort these by depth so we process them and
|
# We want to sort these by depth so we process them and
|
||||||
# tell clients about them in order.
|
# tell clients about them in order.
|
||||||
missing_events.sort(key=lambda x: x.depth)
|
missing_events.sort(key=lambda x: x.depth)
|
||||||
|
|
||||||
for e in missing_events:
|
for ev in missing_events:
|
||||||
logger.info("Handling found event %s", e.event_id)
|
logger.info(
|
||||||
try:
|
"[%s %s] Handling received prev_event %s",
|
||||||
yield self.on_receive_pdu(
|
room_id, event_id, ev.event_id,
|
||||||
origin,
|
)
|
||||||
e,
|
with logcontext.nested_logging_context(ev.event_id):
|
||||||
get_missing=False
|
try:
|
||||||
)
|
yield self.on_receive_pdu(
|
||||||
except FederationError as e:
|
origin,
|
||||||
if e.code == 403:
|
ev,
|
||||||
logger.warn("Event %s failed history check.")
|
sent_to_us_directly=False,
|
||||||
else:
|
)
|
||||||
raise
|
except FederationError as e:
|
||||||
|
if e.code == 403:
|
||||||
|
logger.warn(
|
||||||
|
"[%s %s] Received prev_event %s failed history check.",
|
||||||
|
room_id, event_id, ev.event_id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
@log_function
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _process_received_pdu(self, origin, pdu, state, auth_chain):
|
def _process_received_pdu(self, origin, event, state, auth_chain):
|
||||||
""" Called when we have a new pdu. We need to do auth checks and put it
|
""" Called when we have a new pdu. We need to do auth checks and put it
|
||||||
through the StateHandler.
|
through the StateHandler.
|
||||||
"""
|
"""
|
||||||
event = pdu
|
room_id = event.room_id
|
||||||
|
event_id = event.event_id
|
||||||
|
|
||||||
logger.debug("Processing event: %s", event)
|
logger.debug(
|
||||||
|
"[%s %s] Processing event: %s",
|
||||||
|
room_id, event_id, event,
|
||||||
|
)
|
||||||
|
|
||||||
# FIXME (erikj): Awful hack to make the case where we are not currently
|
# FIXME (erikj): Awful hack to make the case where we are not currently
|
||||||
# in the room work
|
# in the room work
|
||||||
|
@ -410,15 +564,16 @@ class FederationHandler(BaseHandler):
|
||||||
# event.
|
# event.
|
||||||
if state and auth_chain and not event.internal_metadata.is_outlier():
|
if state and auth_chain and not event.internal_metadata.is_outlier():
|
||||||
is_in_room = yield self.auth.check_host_in_room(
|
is_in_room = yield self.auth.check_host_in_room(
|
||||||
event.room_id,
|
room_id,
|
||||||
self.server_name
|
self.server_name
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
is_in_room = True
|
is_in_room = True
|
||||||
|
|
||||||
if not is_in_room:
|
if not is_in_room:
|
||||||
logger.info(
|
logger.info(
|
||||||
"Got event for room we're not in: %r %r",
|
"[%s %s] Got event for room we're not in",
|
||||||
event.room_id, event.event_id
|
room_id, event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -430,7 +585,7 @@ class FederationHandler(BaseHandler):
|
||||||
"ERROR",
|
"ERROR",
|
||||||
e.code,
|
e.code,
|
||||||
e.msg,
|
e.msg,
|
||||||
affected=event.event_id,
|
affected=event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
@ -463,6 +618,10 @@ class FederationHandler(BaseHandler):
|
||||||
})
|
})
|
||||||
seen_ids.add(e.event_id)
|
seen_ids.add(e.event_id)
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
"[%s %s] persisting newly-received auth/state events %s",
|
||||||
|
room_id, event_id, [e["event"].event_id for e in event_infos]
|
||||||
|
)
|
||||||
yield self._handle_new_events(origin, event_infos)
|
yield self._handle_new_events(origin, event_infos)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -479,12 +638,12 @@ class FederationHandler(BaseHandler):
|
||||||
affected=event.event_id,
|
affected=event.event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
room = yield self.store.get_room(event.room_id)
|
room = yield self.store.get_room(room_id)
|
||||||
|
|
||||||
if not room:
|
if not room:
|
||||||
try:
|
try:
|
||||||
yield self.store.store_room(
|
yield self.store.store_room(
|
||||||
room_id=event.room_id,
|
room_id=room_id,
|
||||||
room_creator_user_id="",
|
room_creator_user_id="",
|
||||||
is_public=False,
|
is_public=False,
|
||||||
)
|
)
|
||||||
|
@ -512,7 +671,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
if newly_joined:
|
if newly_joined:
|
||||||
user = UserID.from_string(event.state_key)
|
user = UserID.from_string(event.state_key)
|
||||||
yield self.user_joined_room(user, event.room_id)
|
yield self.user_joined_room(user, room_id)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
|
@ -593,7 +752,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
required_auth = set(
|
required_auth = set(
|
||||||
a_id
|
a_id
|
||||||
for event in events + state_events.values() + auth_events.values()
|
for event in events + list(state_events.values()) + list(auth_events.values())
|
||||||
for a_id, _ in event.auth_events
|
for a_id, _ in event.auth_events
|
||||||
)
|
)
|
||||||
auth_events.update({
|
auth_events.update({
|
||||||
|
@ -801,7 +960,7 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
except NotRetryingDestination as e:
|
except NotRetryingDestination as e:
|
||||||
logger.info(e.message)
|
logger.info(str(e))
|
||||||
continue
|
continue
|
||||||
except FederationDeniedError as e:
|
except FederationDeniedError as e:
|
||||||
logger.info(e)
|
logger.info(e)
|
||||||
|
@ -1026,7 +1185,8 @@ class FederationHandler(BaseHandler):
|
||||||
try:
|
try:
|
||||||
logger.info("Processing queued PDU %s which was received "
|
logger.info("Processing queued PDU %s which was received "
|
||||||
"while we were joining %s", p.event_id, p.room_id)
|
"while we were joining %s", p.event_id, p.room_id)
|
||||||
yield self.on_receive_pdu(origin, p)
|
with logcontext.nested_logging_context(p.event_id):
|
||||||
|
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Error handling queued PDU %s from %s: %s",
|
"Error handling queued PDU %s from %s: %s",
|
||||||
|
@ -1357,7 +1517,7 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
|
|
||||||
if state_groups:
|
if state_groups:
|
||||||
_, state = state_groups.items().pop()
|
_, state = list(state_groups.items()).pop()
|
||||||
results = state
|
results = state
|
||||||
|
|
||||||
if event.is_state():
|
if event.is_state():
|
||||||
|
@ -1429,12 +1589,10 @@ class FederationHandler(BaseHandler):
|
||||||
else:
|
else:
|
||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
|
|
||||||
@log_function
|
|
||||||
def get_min_depth_for_context(self, context):
|
def get_min_depth_for_context(self, context):
|
||||||
return self.store.get_min_depth(context)
|
return self.store.get_min_depth(context)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
|
||||||
def _handle_new_event(self, origin, event, state=None, auth_events=None,
|
def _handle_new_event(self, origin, event, state=None, auth_events=None,
|
||||||
backfilled=False):
|
backfilled=False):
|
||||||
context = yield self._prep_event(
|
context = yield self._prep_event(
|
||||||
|
@ -1443,6 +1601,9 @@ class FederationHandler(BaseHandler):
|
||||||
auth_events=auth_events,
|
auth_events=auth_events,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# reraise does not allow inlineCallbacks to preserve the stacktrace, so we
|
||||||
|
# hack around with a try/finally instead.
|
||||||
|
success = False
|
||||||
try:
|
try:
|
||||||
if not event.internal_metadata.is_outlier() and not backfilled:
|
if not event.internal_metadata.is_outlier() and not backfilled:
|
||||||
yield self.action_generator.handle_push_actions_for_event(
|
yield self.action_generator.handle_push_actions_for_event(
|
||||||
|
@ -1453,15 +1614,13 @@ class FederationHandler(BaseHandler):
|
||||||
[(event, context)],
|
[(event, context)],
|
||||||
backfilled=backfilled,
|
backfilled=backfilled,
|
||||||
)
|
)
|
||||||
except: # noqa: E722, as we reraise the exception this is fine.
|
success = True
|
||||||
tp, value, tb = sys.exc_info()
|
finally:
|
||||||
|
if not success:
|
||||||
logcontext.run_in_background(
|
logcontext.run_in_background(
|
||||||
self.store.remove_push_actions_from_staging,
|
self.store.remove_push_actions_from_staging,
|
||||||
event.event_id,
|
event.event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
six.reraise(tp, value, tb)
|
|
||||||
|
|
||||||
defer.returnValue(context)
|
defer.returnValue(context)
|
||||||
|
|
||||||
|
@ -1474,15 +1633,22 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
Notifies about the events where appropriate.
|
Notifies about the events where appropriate.
|
||||||
"""
|
"""
|
||||||
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
|
||||||
[
|
@defer.inlineCallbacks
|
||||||
logcontext.run_in_background(
|
def prep(ev_info):
|
||||||
self._prep_event,
|
event = ev_info["event"]
|
||||||
|
with logcontext.nested_logging_context(suffix=event.event_id):
|
||||||
|
res = yield self._prep_event(
|
||||||
origin,
|
origin,
|
||||||
ev_info["event"],
|
event,
|
||||||
state=ev_info.get("state"),
|
state=ev_info.get("state"),
|
||||||
auth_events=ev_info.get("auth_events"),
|
auth_events=ev_info.get("auth_events"),
|
||||||
)
|
)
|
||||||
|
defer.returnValue(res)
|
||||||
|
|
||||||
|
contexts = yield logcontext.make_deferred_yieldable(defer.gatherResults(
|
||||||
|
[
|
||||||
|
logcontext.run_in_background(prep, ev_info)
|
||||||
for ev_info in event_infos
|
for ev_info in event_infos
|
||||||
], consumeErrors=True,
|
], consumeErrors=True,
|
||||||
))
|
))
|
||||||
|
@ -1634,8 +1800,8 @@ class FederationHandler(BaseHandler):
|
||||||
)
|
)
|
||||||
except AuthError as e:
|
except AuthError as e:
|
||||||
logger.warn(
|
logger.warn(
|
||||||
"Rejecting %s because %s",
|
"[%s %s] Rejecting: %s",
|
||||||
event.event_id, e.msg
|
event.room_id, event.event_id, e.msg
|
||||||
)
|
)
|
||||||
|
|
||||||
context.rejected = RejectedReason.AUTH_ERROR
|
context.rejected = RejectedReason.AUTH_ERROR
|
||||||
|
@ -1686,7 +1852,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_get_missing_events(self, origin, room_id, earliest_events,
|
def on_get_missing_events(self, origin, room_id, earliest_events,
|
||||||
latest_events, limit, min_depth):
|
latest_events, limit):
|
||||||
in_room = yield self.auth.check_host_in_room(
|
in_room = yield self.auth.check_host_in_room(
|
||||||
room_id,
|
room_id,
|
||||||
origin
|
origin
|
||||||
|
@ -1695,14 +1861,12 @@ class FederationHandler(BaseHandler):
|
||||||
raise AuthError(403, "Host not in room.")
|
raise AuthError(403, "Host not in room.")
|
||||||
|
|
||||||
limit = min(limit, 20)
|
limit = min(limit, 20)
|
||||||
min_depth = max(min_depth, 0)
|
|
||||||
|
|
||||||
missing_events = yield self.store.get_missing_events(
|
missing_events = yield self.store.get_missing_events(
|
||||||
room_id=room_id,
|
room_id=room_id,
|
||||||
earliest_events=earliest_events,
|
earliest_events=earliest_events,
|
||||||
latest_events=latest_events,
|
latest_events=latest_events,
|
||||||
limit=limit,
|
limit=limit,
|
||||||
min_depth=min_depth,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
missing_events = yield filter_events_for_server(
|
missing_events = yield filter_events_for_server(
|
||||||
|
@ -1828,7 +1992,10 @@ class FederationHandler(BaseHandler):
|
||||||
(d.type, d.state_key): d for d in different_events if d
|
(d.type, d.state_key): d for d in different_events if d
|
||||||
})
|
})
|
||||||
|
|
||||||
new_state = self.state_handler.resolve_events(
|
room_version = yield self.store.get_room_version(event.room_id)
|
||||||
|
|
||||||
|
new_state = yield self.state_handler.resolve_events(
|
||||||
|
room_version,
|
||||||
[list(local_view.values()), list(remote_view.values())],
|
[list(local_view.values()), list(remote_view.values())],
|
||||||
event
|
event
|
||||||
)
|
)
|
||||||
|
@ -2353,7 +2520,7 @@ class FederationHandler(BaseHandler):
|
||||||
|
|
||||||
if not backfilled: # Never notify for backfilled events
|
if not backfilled: # Never notify for backfilled events
|
||||||
for event, _ in event_and_contexts:
|
for event, _ in event_and_contexts:
|
||||||
self._notify_persisted_event(event, max_stream_id)
|
yield self._notify_persisted_event(event, max_stream_id)
|
||||||
|
|
||||||
def _notify_persisted_event(self, event, max_stream_id):
|
def _notify_persisted_event(self, event, max_stream_id):
|
||||||
"""Checks to see if notifier/pushers should be notified about the
|
"""Checks to see if notifier/pushers should be notified about the
|
||||||
|
@ -2386,7 +2553,7 @@ class FederationHandler(BaseHandler):
|
||||||
extra_users=extra_users
|
extra_users=extra_users
|
||||||
)
|
)
|
||||||
|
|
||||||
self.pusher_pool.on_new_notifications(
|
return self.pusher_pool.on_new_notifications(
|
||||||
event_stream_id, max_stream_id,
|
event_stream_id, max_stream_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue