Merge branch 'develop' of github.com:matrix-org/synapse into neilj/readme-wellknown

This commit is contained in:
Neil Johnson 2019-02-28 15:28:15 +00:00
commit de1c6cb736
91 changed files with 902 additions and 284 deletions

13
.buildkite/.env Normal file
View file

@ -0,0 +1,13 @@
CI
BUILDKITE
BUILDKITE_BUILD_NUMBER
BUILDKITE_BRANCH
BUILDKITE_BUILD_NUMBER
BUILDKITE_JOB_ID
BUILDKITE_BUILD_URL
BUILDKITE_PROJECT_SLUG
BUILDKITE_COMMIT
BUILDKITE_PULL_REQUEST
BUILDKITE_TAG
CODECOV_TOKEN
TRIAL_FLAGS

View file

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.4
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:2.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View file

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:2.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View file

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.4
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.5
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View file

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.5
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View file

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:11
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

View file

@ -0,0 +1,21 @@
version: '3.1'
services:
postgres:
image: postgres:9.5
environment:
POSTGRES_PASSWORD: postgres
testenv:
image: python:3.7
depends_on:
- postgres
env_file: .env
environment:
SYNAPSE_POSTGRES_HOST: postgres
SYNAPSE_POSTGRES_USER: postgres
SYNAPSE_POSTGRES_PASSWORD: postgres
working_dir: /app
volumes:
- ..:/app

149
.buildkite/pipeline.yml Normal file
View file

@ -0,0 +1,149 @@
env:
CODECOV_TOKEN: "2dd7eb9b-0eda-45fe-a47c-9b5ac040045f"
steps:
- command:
- "python -m pip install tox"
- "tox -e pep8"
label: "\U0001F9F9 PEP-8"
plugins:
- docker#v3.0.1:
image: "python:3.6"
- command:
- "python -m pip install tox"
- "tox -e packaging"
label: "\U0001F9F9 packaging"
plugins:
- docker#v3.0.1:
image: "python:3.6"
- command:
- "python -m pip install tox"
- "tox -e check_isort"
label: "\U0001F9F9 isort"
plugins:
- docker#v3.0.1:
image: "python:3.6"
- command:
- "python -m pip install tox"
- "scripts-dev/check-newsfragment"
label: ":newspaper: Newsfile"
branches: "!master !develop !release-*"
plugins:
- docker#v3.0.1:
image: "python:3.6"
propagate-environment: true
- wait
- command:
- "python -m pip install tox"
- "tox -e py27,codecov"
label: ":python: 2.7 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:2.7"
propagate-environment: true
- command:
- "python -m pip install tox"
- "tox -e py35,codecov"
label: ":python: 3.5 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.5"
propagate-environment: true
- command:
- "python -m pip install tox"
- "tox -e py36,codecov"
label: ":python: 3.6 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.6"
propagate-environment: true
- command:
- "python -m pip install tox"
- "tox -e py37,codecov"
label: ":python: 3.7 / SQLite"
env:
TRIAL_FLAGS: "-j 2"
plugins:
- docker#v3.0.1:
image: "python:3.7"
propagate-environment: true
- label: ":python: 2.7 / :postgres: 9.4"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py27.pg94.yaml
- label: ":python: 2.7 / :postgres: 9.5"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py27-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py27.pg95.yaml
- label: ":python: 3.5 / :postgres: 9.4"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py35.pg94.yaml
- label: ":python: 3.5 / :postgres: 9.5"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py35-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py35.pg95.yaml
- label: ":python: 3.7 / :postgres: 9.5"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py37.pg95.yaml
- label: ":python: 3.7 / :postgres: 11"
env:
TRIAL_FLAGS: "-j 4"
command:
- "bash -c 'python -m pip install tox && python -m tox -e py37-postgres,codecov'"
plugins:
- docker-compose#v2.1.0:
run: testenv
config:
- .buildkite/docker-compose.py37.pg11.yaml

View file

@ -1,97 +0,0 @@
dist: xenial
language: python
cache:
directories:
# we only bother to cache the wheels; parts of the http cache get
# invalidated every build (because they get served with a max-age of 600
# seconds), which means that we end up re-uploading the whole cache for
# every build, which is time-consuming In any case, it's not obvious that
# downloading the cache from S3 would be much faster than downloading the
# originals from pypi.
#
- $HOME/.cache/pip/wheels
# don't clone the whole repo history, one commit will do
git:
depth: 1
# only build branches we care about (PRs are built seperately)
branches:
only:
- master
- develop
- /^release-v/
- rav/pg95
# When running the tox environments that call Twisted Trial, we can pass the -j
# flag to run the tests concurrently. We set this to 2 for CPU bound tests
# (SQLite) and 4 for I/O bound tests (PostgreSQL).
matrix:
fast_finish: true
include:
- name: "pep8"
python: 3.6
env: TOX_ENV="pep8,check_isort,packaging"
- name: "py2.7 / sqlite"
python: 2.7
env: TOX_ENV=py27,codecov TRIAL_FLAGS="-j 2"
- name: "py2.7 / sqlite / olddeps"
python: 2.7
env: TOX_ENV=py27-old TRIAL_FLAGS="-j 2"
- name: "py2.7 / postgres9.5"
python: 2.7
addons:
postgresql: "9.5"
env: TOX_ENV=py27-postgres,codecov TRIAL_FLAGS="-j 4"
services:
- postgresql
- name: "py3.5 / sqlite"
python: 3.5
env: TOX_ENV=py35,codecov TRIAL_FLAGS="-j 2"
- name: "py3.7 / sqlite"
python: 3.7
env: TOX_ENV=py37,codecov TRIAL_FLAGS="-j 2"
- name: "py3.7 / postgres9.4"
python: 3.7
addons:
postgresql: "9.4"
env: TOX_ENV=py37-postgres TRIAL_FLAGS="-j 4"
services:
- postgresql
- name: "py3.7 / postgres9.5"
python: 3.7
addons:
postgresql: "9.5"
env: TOX_ENV=py37-postgres,codecov TRIAL_FLAGS="-j 4"
services:
- postgresql
- # we only need to check for the newsfragment if it's a PR build
if: type = pull_request
name: "check-newsfragment"
python: 3.6
script: scripts-dev/check-newsfragment
install:
# this just logs the postgres version we will be testing against (if any)
- psql -At -U postgres -c 'select version();' || true
- pip install tox
# if we don't have python3.6 in this environment, travis unhelpfully gives us
# a `python3.6` on our path which does nothing but spit out a warning. Tox
# tries to run it (even if we're not running a py36 env), so the build logs
# then have warnings which look like errors. To reduce the noise, remove the
# non-functional python3.6.
- ( ! command -v python3.6 || python3.6 --version ) &>/dev/null || rm -f $(command -v python3.6)
script:
- tox -e $TOX_ENV

View file

@ -1,3 +1,62 @@
Synapse 0.99.2rc1 (2019-02-27)
==============================
Features
--------
- Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (“Benpro”). ([\#4541](https://github.com/matrix-org/synapse/issues/4541))
- Add basic optional sentry integration. ([\#4632](https://github.com/matrix-org/synapse/issues/4632), [\#4694](https://github.com/matrix-org/synapse/issues/4694))
- Transfer bans on room upgrade. ([\#4642](https://github.com/matrix-org/synapse/issues/4642))
- Add configurable room list publishing rules. ([\#4647](https://github.com/matrix-org/synapse/issues/4647))
- Support .well-known delegation when issuing certificates through ACME. ([\#4652](https://github.com/matrix-org/synapse/issues/4652))
- Allow registration and login to be handled by a worker instance. ([\#4666](https://github.com/matrix-org/synapse/issues/4666), [\#4670](https://github.com/matrix-org/synapse/issues/4670), [\#4682](https://github.com/matrix-org/synapse/issues/4682))
- Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options. ([\#4674](https://github.com/matrix-org/synapse/issues/4674))
- Add prometheus metrics for number of outgoing EDUs, by type. ([\#4695](https://github.com/matrix-org/synapse/issues/4695))
- Return correct error code when inviting a remote user to a room whose homeserver does not support the room version. ([\#4721](https://github.com/matrix-org/synapse/issues/4721))
- Prevent showing rooms to other servers that were set to not federate. ([\#4746](https://github.com/matrix-org/synapse/issues/4746))
Bugfixes
--------
- Fix possible exception when paginating. ([\#4263](https://github.com/matrix-org/synapse/issues/4263))
- The dependency checker now correctly reports a version mismatch for optional
dependencies, instead of reporting the dependency missing. ([\#4450](https://github.com/matrix-org/synapse/issues/4450))
- Set CORS headers on .well-known requests. ([\#4651](https://github.com/matrix-org/synapse/issues/4651))
- Fix kicking guest users on guest access revocation in worker mode. ([\#4667](https://github.com/matrix-org/synapse/issues/4667))
- Fix an issue in the database migration script where the
`e2e_room_keys.is_verified` column wasn't considered as
a boolean. ([\#4680](https://github.com/matrix-org/synapse/issues/4680))
- Fix TaskStopped exceptions in logs when outbound requests time out. ([\#4690](https://github.com/matrix-org/synapse/issues/4690))
- Fix ACME config for python 2. ([\#4717](https://github.com/matrix-org/synapse/issues/4717))
- Fix paginating over federation persisting incorrect state. ([\#4718](https://github.com/matrix-org/synapse/issues/4718))
Internal Changes
----------------
- Run `black` to reformat user directory code. ([\#4635](https://github.com/matrix-org/synapse/issues/4635))
- Reduce number of exceptions we log. ([\#4643](https://github.com/matrix-org/synapse/issues/4643), [\#4668](https://github.com/matrix-org/synapse/issues/4668))
- Introduce upsert batching functionality in the database layer. ([\#4644](https://github.com/matrix-org/synapse/issues/4644))
- Fix various spelling mistakes. ([\#4657](https://github.com/matrix-org/synapse/issues/4657))
- Cleanup request exception logging. ([\#4669](https://github.com/matrix-org/synapse/issues/4669), [\#4737](https://github.com/matrix-org/synapse/issues/4737), [\#4738](https://github.com/matrix-org/synapse/issues/4738))
- Improve replication performance by reducing cache invalidation traffic. ([\#4671](https://github.com/matrix-org/synapse/issues/4671), [\#4715](https://github.com/matrix-org/synapse/issues/4715), [\#4748](https://github.com/matrix-org/synapse/issues/4748))
- Test against Postgres 9.5 as well as 9.4. ([\#4676](https://github.com/matrix-org/synapse/issues/4676))
- Run unit tests against python 3.7. ([\#4677](https://github.com/matrix-org/synapse/issues/4677))
- Attempt to clarify installation instructions/config. ([\#4681](https://github.com/matrix-org/synapse/issues/4681))
- Clean up gitignores. ([\#4688](https://github.com/matrix-org/synapse/issues/4688))
- Minor tweaks to acme docs. ([\#4689](https://github.com/matrix-org/synapse/issues/4689))
- Improve the logging in the pusher process. ([\#4691](https://github.com/matrix-org/synapse/issues/4691))
- Better checks on newsfragments. ([\#4698](https://github.com/matrix-org/synapse/issues/4698), [\#4750](https://github.com/matrix-org/synapse/issues/4750))
- Avoid some redundant work when processing read receipts. ([\#4706](https://github.com/matrix-org/synapse/issues/4706))
- Run `push_receipts_to_remotes` as background job. ([\#4707](https://github.com/matrix-org/synapse/issues/4707))
- Add prometheus metrics for number of badge update pushes. ([\#4709](https://github.com/matrix-org/synapse/issues/4709))
- Reduce pusher logging on startup ([\#4716](https://github.com/matrix-org/synapse/issues/4716))
- Don't log exceptions when failing to fetch remote server keys. ([\#4722](https://github.com/matrix-org/synapse/issues/4722))
- Correctly proxy exception in frontend_proxy worker. ([\#4723](https://github.com/matrix-org/synapse/issues/4723))
- Add database version to phonehome stats. ([\#4753](https://github.com/matrix-org/synapse/issues/4753))
Synapse 0.99.1.1 (2019-02-14) Synapse 0.99.1.1 (2019-02-14)
============================= =============================

View file

@ -39,6 +39,7 @@ prune .circleci
prune .coveragerc prune .coveragerc
prune debian prune debian
prune .codecov.yml prune .codecov.yml
prune .buildkite
exclude jenkins* exclude jenkins*
recursive-exclude jenkins *.sh recursive-exclude jenkins *.sh

View file

@ -204,6 +204,8 @@ by installing the ``libjemalloc1`` package and adding this line to
LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1 LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libjemalloc.so.1
This can make a significant difference on Python 2.7 - it's unclear how
much of an improvement it provides on Python 3.x.
Upgrading an existing Synapse Upgrading an existing Synapse
============================= =============================

View file

@ -1 +0,0 @@
Prevent crash on pagination.

View file

@ -1,2 +0,0 @@
The dependency checker now correctly reports a version mismatch for optional
dependencies, instead of reporting the dependency missing.

View file

@ -1 +0,0 @@
Added an HAProxy example in the reverse proxy documentation. Contributed by Benoît S. (Benpro).

View file

@ -1 +0,0 @@
Add basic optional sentry integration

View file

@ -1 +0,0 @@
Run `black` to reformat user directory code.

View file

@ -1 +0,0 @@
Transfer bans on room upgrade.

View file

@ -1 +0,0 @@
Reduce number of exceptions we log

View file

@ -1 +0,0 @@
Introduce upsert batching functionality in the database layer.

View file

@ -1 +0,0 @@
Add configurable room list publishing rules

View file

@ -1 +0,0 @@
Set CORS headers on .well-known requests

View file

@ -1 +0,0 @@
Support .well-known delegation when issuing certificates through ACME.

View file

@ -1 +0,0 @@
Fix various spelling mistakes.

View file

@ -1 +0,0 @@
Allow registration and login to be handled by a worker instance.

View file

@ -1 +0,0 @@
Fix kicking guest users on guest access revocation in worker mode.

View file

@ -1 +0,0 @@
Reduce number of exceptions we log

View file

@ -1 +0,0 @@
Cleanup request exception logging

View file

@ -1 +0,0 @@
Allow registration and login to be handled by a worker instance.

View file

@ -1 +0,0 @@
Improve replication performance by reducing cache invalidation traffic.

View file

@ -1 +0,0 @@
Reduce the overhead of creating outbound federation connections over TLS by caching the TLS client options.

View file

@ -1 +0,0 @@
Test against Postgres 9.5 as well as 9.4

View file

@ -1 +0,0 @@
Run unit tests against python 3.7.

View file

@ -1,3 +0,0 @@
Fix an issue in the database migration script where the
`e2e_room_keys.is_verified` column wasn't considered as
a boolean

View file

@ -1 +0,0 @@
Attempt to clarify installation instructions/config

View file

@ -1 +0,0 @@
Allow registration and login to be handled by a worker instance.

View file

@ -1 +0,0 @@
Clean up gitignores

View file

@ -1 +0,0 @@
Minor tweaks to acme docs.

View file

@ -1 +0,0 @@
Fix TaskStopped exceptions in logs when outbound requests time out.

View file

@ -1 +0,0 @@
Improve the logging in the pusher process.

View file

@ -1 +0,0 @@
Add basic optional sentry integration

View file

@ -1 +0,0 @@
Add prometheus metrics for number of outgoing EDUs, by type.

View file

@ -1 +0,0 @@
Better checks on newsfragments

View file

@ -1 +0,0 @@
Avoid some redundant work when processing read receipts

View file

@ -1 +0,0 @@
Run push_receipts_to_remotes as background job.

View file

@ -1 +0,0 @@
Add prometheus metrics for number of badge update pushes.

View file

@ -1 +0,0 @@
Improve replication performance by reducing cache invalidation traffic.

View file

@ -1 +0,0 @@
Reduce pusher logging on startup

1
changelog.d/4740.bugfix Normal file
View file

@ -0,0 +1 @@
'event_id' is now a required parameter in federated state requests, as per the matrix spec.

1
changelog.d/4749.bugfix Normal file
View file

@ -0,0 +1 @@
Fix tightloop over connecting to replication server.

1
changelog.d/4752.misc Normal file
View file

@ -0,0 +1 @@
Change from TravisCI to Buildkite for CI.

1
changelog.d/4757.feature Normal file
View file

@ -0,0 +1 @@
Move server key queries to federation reader.

1
changelog.d/4757.misc Normal file
View file

@ -0,0 +1 @@
When presence is disabled don't send over replication.

1
changelog.d/4759.feature Normal file
View file

@ -0,0 +1 @@
Add support for /account/3pid REST endpoint to client_reader worker.

1
changelog.d/4763.bugfix Normal file
View file

@ -0,0 +1 @@
Fix parsing of Content-Disposition headers on remote media requests and URL previews.

5
debian/changelog vendored
View file

@ -1,8 +1,9 @@
matrix-synapse-py3 (0.99.2) UNRELEASED; urgency=medium matrix-synapse-py3 (0.99.2rc1) stable; urgency=medium
* Fix overwriting of config settings on upgrade. * Fix overwriting of config settings on upgrade.
* New synapse release 0.99.2rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 20 Feb 2019 17:11:25 +0000 -- Synapse Packaging team <packages@matrix.org> Wed, 27 Feb 2019 10:45:58 +0000
matrix-synapse-py3 (0.99.1.1) stable; urgency=medium matrix-synapse-py3 (0.99.1.1) stable; urgency=medium

View file

@ -79,7 +79,7 @@ Let's assume that we expect clients to connect to our server at
SSLEngine on SSLEngine on
ServerName example.com; ServerName example.com;
<Location /> <Location /_matrix>
ProxyPass http://127.0.0.1:8008/_matrix nocanon ProxyPass http://127.0.0.1:8008/_matrix nocanon
ProxyPassReverse http://127.0.0.1:8008/_matrix ProxyPassReverse http://127.0.0.1:8008/_matrix
</Location> </Location>

View file

@ -188,7 +188,9 @@ RDATA (S)
A single update in a stream A single update in a stream
POSITION (S) POSITION (S)
The position of the stream has been updated The position of the stream has been updated. Sent to the client after all
missing updates for a stream have been sent to the client and they're now
up to date.
ERROR (S, C) ERROR (S, C)
There was an error There was an error

View file

@ -182,6 +182,7 @@ endpoints matching the following regular expressions::
^/_matrix/federation/v1/event_auth/ ^/_matrix/federation/v1/event_auth/
^/_matrix/federation/v1/exchange_third_party_invite/ ^/_matrix/federation/v1/exchange_third_party_invite/
^/_matrix/federation/v1/send/ ^/_matrix/federation/v1/send/
^/_matrix/key/v2/query
The above endpoints should all be routed to the federation_reader worker by the The above endpoints should all be routed to the federation_reader worker by the
reverse-proxy configuration. reverse-proxy configuration.
@ -223,6 +224,7 @@ following regular expressions::
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$ ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members$
^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$ ^/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state$
^/_matrix/client/(api/v1|r0|unstable)/login$ ^/_matrix/client/(api/v1|r0|unstable)/login$
^/_matrix/client/(api/v1|r0|unstable)/account/3pid$
Additionally, the following REST endpoints can be handled, but all requests must Additionally, the following REST endpoints can be handled, but all requests must
be routed to the same instance:: be routed to the same instance::

View file

@ -6,7 +6,8 @@
set -e set -e
# make sure that origin/develop is up to date # make sure that origin/develop is up to date
git fetch origin develop git remote set-branches --add origin develop
git fetch --depth=1 origin develop
UPSTREAM=origin/develop UPSTREAM=origin/develop
@ -25,11 +26,15 @@ if git diff --name-only $UPSTREAM... | grep -qv '^develop/'; then
tox -e check-newsfragment tox -e check-newsfragment
fi fi
echo
echo "--------------------------"
echo
# check that any new newsfiles on this branch end with a full stop. # check that any new newsfiles on this branch end with a full stop.
for f in git diff --name-only $UPSTREAM... -- changelog.d; do for f in `git diff --name-only $UPSTREAM... -- changelog.d`; do
lastchar=`tr -d '\n' < $f | tail -c 1` lastchar=`tr -d '\n' < $f | tail -c 1`
if [ $lastchar != '.' ]; then if [ $lastchar != '.' ]; then
echo "Newsfragment $f does not end with a '.'" >&2 echo -e "\e[31mERROR: newsfragment $f does not end with a '.'\e[39m" >&2
exit 1 exit 1
fi fi
done done

View file

@ -27,4 +27,4 @@ try:
except ImportError: except ImportError:
pass pass
__version__ = "0.99.1.1" __version__ = "0.99.2rc1"

View file

@ -48,6 +48,7 @@ from synapse.rest.client.v1.room import (
RoomMemberListRestServlet, RoomMemberListRestServlet,
RoomStateRestServlet, RoomStateRestServlet,
) )
from synapse.rest.client.v2_alpha.account import ThreepidRestServlet
from synapse.rest.client.v2_alpha.register import RegisterRestServlet from synapse.rest.client.v2_alpha.register import RegisterRestServlet
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.storage.engines import create_engine from synapse.storage.engines import create_engine
@ -96,6 +97,7 @@ class ClientReaderServer(HomeServer):
RoomEventContextServlet(self).register(resource) RoomEventContextServlet(self).register(resource)
RegisterRestServlet(self).register(resource) RegisterRestServlet(self).register(resource)
LoginRestServlet(self).register(resource) LoginRestServlet(self).register(resource)
ThreepidRestServlet(self).register(resource)
resources.update({ resources.update({
"/_matrix/client/r0": resource, "/_matrix/client/r0": resource,

View file

@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
import synapse import synapse
from synapse import events from synapse import events
from synapse.api.urls import FEDERATION_PREFIX from synapse.api.urls import FEDERATION_PREFIX, SERVER_KEY_V2_PREFIX
from synapse.app import _base from synapse.app import _base
from synapse.config._base import ConfigError from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig from synapse.config.homeserver import HomeServerConfig
@ -44,6 +44,7 @@ from synapse.replication.slave.storage.registration import SlavedRegistrationSto
from synapse.replication.slave.storage.room import RoomStore from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.transactions import SlavedTransactionStore from synapse.replication.slave.storage.transactions import SlavedTransactionStore
from synapse.replication.tcp.client import ReplicationClientHandler from synapse.replication.tcp.client import ReplicationClientHandler
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.server import HomeServer from synapse.server import HomeServer
from synapse.storage.engines import create_engine from synapse.storage.engines import create_engine
from synapse.util.httpresourcetree import create_resource_tree from synapse.util.httpresourcetree import create_resource_tree
@ -99,6 +100,9 @@ class FederationReaderServer(HomeServer):
), ),
}) })
if name in ["keys", "federation"]:
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
root_resource = create_resource_tree(resources, NoResource()) root_resource = create_resource_tree(resources, NoResource())
_base.listen_tcp( _base.listen_tcp(

View file

@ -21,7 +21,7 @@ from twisted.web.resource import NoResource
import synapse import synapse
from synapse import events from synapse import events
from synapse.api.errors import SynapseError from synapse.api.errors import HttpResponseException, SynapseError
from synapse.app import _base from synapse.app import _base
from synapse.config._base import ConfigError from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig from synapse.config.homeserver import HomeServerConfig
@ -66,10 +66,15 @@ class PresenceStatusStubServlet(ClientV1RestServlet):
headers = { headers = {
"Authorization": auth_headers, "Authorization": auth_headers,
} }
result = yield self.http_client.get_json(
self.main_uri + request.uri.decode('ascii'), try:
headers=headers, result = yield self.http_client.get_json(
) self.main_uri + request.uri.decode('ascii'),
headers=headers,
)
except HttpResponseException as e:
raise e.to_synapse_error()
defer.returnValue((200, result)) defer.returnValue((200, result))
@defer.inlineCallbacks @defer.inlineCallbacks

View file

@ -555,6 +555,9 @@ def run(hs):
stats["memory_rss"] += process.memory_info().rss stats["memory_rss"] += process.memory_info().rss
stats["cpu_average"] += int(process.cpu_percent(interval=None)) stats["cpu_average"] += int(process.cpu_percent(interval=None))
stats["database_engine"] = hs.get_datastore().database_engine_name
stats["database_server_version"] = hs.get_datastore().get_server_version()
logger.info("Reporting stats to matrix.org: %s" % (stats,)) logger.info("Reporting stats to matrix.org: %s" % (stats,))
try: try:
yield hs.get_simple_http_client().put_json( yield hs.get_simple_http_client().put_json(

View file

@ -47,5 +47,5 @@ class CaptchaConfig(Config):
#captcha_bypass_secret: "YOUR_SECRET_HERE" #captcha_bypass_secret: "YOUR_SECRET_HERE"
# The API endpoint to use for verifying m.login.recaptcha responses. # The API endpoint to use for verifying m.login.recaptcha responses.
recaptcha_siteverify_api: "https://www.google.com/recaptcha/api/siteverify" recaptcha_siteverify_api: "https://www.recaptcha.net/recaptcha/api/siteverify"
""" """

View file

@ -19,6 +19,8 @@ import warnings
from datetime import datetime from datetime import datetime
from hashlib import sha256 from hashlib import sha256
import six
from unpaddedbase64 import encode_base64 from unpaddedbase64 import encode_base64
from OpenSSL import crypto from OpenSSL import crypto
@ -36,9 +38,11 @@ class TlsConfig(Config):
acme_config = {} acme_config = {}
self.acme_enabled = acme_config.get("enabled", False) self.acme_enabled = acme_config.get("enabled", False)
self.acme_url = acme_config.get(
# hyperlink complains on py2 if this is not a Unicode
self.acme_url = six.text_type(acme_config.get(
"url", u"https://acme-v01.api.letsencrypt.org/directory" "url", u"https://acme-v01.api.letsencrypt.org/directory"
) ))
self.acme_port = acme_config.get("port", 80) self.acme_port = acme_config.get("port", 80)
self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0']) self.acme_bind_addresses = acme_config.get("bind_addresses", ['::', '0.0.0.0'])
self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30) self.acme_reprovision_threshold = acme_config.get("reprovision_threshold", 30)
@ -55,7 +59,7 @@ class TlsConfig(Config):
) )
if not self.tls_private_key_file: if not self.tls_private_key_file:
raise ConfigError( raise ConfigError(
"tls_certificate_path must be specified if TLS-enabled listeners are " "tls_private_key_path must be specified if TLS-enabled listeners are "
"configured." "configured."
) )

View file

@ -17,6 +17,7 @@
import logging import logging
from collections import namedtuple from collections import namedtuple
from six import raise_from
from six.moves import urllib from six.moves import urllib
from signedjson.key import ( from signedjson.key import (
@ -35,7 +36,12 @@ from unpaddedbase64 import decode_base64
from twisted.internet import defer from twisted.internet import defer
from synapse.api.errors import Codes, RequestSendFailed, SynapseError from synapse.api.errors import (
Codes,
HttpResponseException,
RequestSendFailed,
SynapseError,
)
from synapse.util import logcontext, unwrapFirstError from synapse.util import logcontext, unwrapFirstError
from synapse.util.logcontext import ( from synapse.util.logcontext import (
LoggingContext, LoggingContext,
@ -44,6 +50,7 @@ from synapse.util.logcontext import (
run_in_background, run_in_background,
) )
from synapse.util.metrics import Measure from synapse.util.metrics import Measure
from synapse.util.retryutils import NotRetryingDestination
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -367,13 +374,18 @@ class Keyring(object):
server_name_and_key_ids, perspective_name, perspective_keys server_name_and_key_ids, perspective_name, perspective_keys
) )
defer.returnValue(result) defer.returnValue(result)
except KeyLookupError as e:
logger.warning(
"Key lookup failed from %r: %s", perspective_name, e,
)
except Exception as e: except Exception as e:
logger.exception( logger.exception(
"Unable to get key from %r: %s %s", "Unable to get key from %r: %s %s",
perspective_name, perspective_name,
type(e).__name__, str(e), type(e).__name__, str(e),
) )
defer.returnValue({})
defer.returnValue({})
results = yield logcontext.make_deferred_yieldable(defer.gatherResults( results = yield logcontext.make_deferred_yieldable(defer.gatherResults(
[ [
@ -421,21 +433,30 @@ class Keyring(object):
# TODO(mark): Set the minimum_valid_until_ts to that needed by # TODO(mark): Set the minimum_valid_until_ts to that needed by
# the events being validated or the current time if validating # the events being validated or the current time if validating
# an incoming request. # an incoming request.
query_response = yield self.client.post_json( try:
destination=perspective_name, query_response = yield self.client.post_json(
path="/_matrix/key/v2/query", destination=perspective_name,
data={ path="/_matrix/key/v2/query",
u"server_keys": { data={
server_name: { u"server_keys": {
key_id: { server_name: {
u"minimum_valid_until_ts": 0 key_id: {
} for key_id in key_ids u"minimum_valid_until_ts": 0
} for key_id in key_ids
}
for server_name, key_ids in server_names_and_key_ids
} }
for server_name, key_ids in server_names_and_key_ids },
} long_retries=True,
}, )
long_retries=True, except (NotRetryingDestination, RequestSendFailed) as e:
) raise_from(
KeyLookupError("Failed to connect to remote server"), e,
)
except HttpResponseException as e:
raise_from(
KeyLookupError("Remote server returned an error"), e,
)
keys = {} keys = {}
@ -502,11 +523,20 @@ class Keyring(object):
if requested_key_id in keys: if requested_key_id in keys:
continue continue
response = yield self.client.get_json( try:
destination=server_name, response = yield self.client.get_json(
path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id), destination=server_name,
ignore_backoff=True, path="/_matrix/key/v2/server/" + urllib.parse.quote(requested_key_id),
) ignore_backoff=True,
)
except (NotRetryingDestination, RequestSendFailed) as e:
raise_from(
KeyLookupError("Failed to connect to remote server"), e,
)
except HttpResponseException as e:
raise_from(
KeyLookupError("Remote server returned an error"), e,
)
if (u"signatures" not in response if (u"signatures" not in response
or server_name not in response[u"signatures"]): or server_name not in response[u"signatures"]):

View file

@ -33,6 +33,7 @@ from synapse.api.constants import (
) )
from synapse.api.errors import ( from synapse.api.errors import (
CodeMessageException, CodeMessageException,
Codes,
FederationDeniedError, FederationDeniedError,
HttpResponseException, HttpResponseException,
SynapseError, SynapseError,
@ -792,10 +793,25 @@ class FederationClient(FederationBase):
defer.returnValue(content) defer.returnValue(content)
except HttpResponseException as e: except HttpResponseException as e:
if e.code in [400, 404]: if e.code in [400, 404]:
err = e.to_synapse_error()
# If we receive an error response that isn't a generic error, we
# assume that the remote understands the v2 invite API and this
# is a legitimate error.
if err.errcode != Codes.UNKNOWN:
raise err
# Otherwise, we assume that the remote server doesn't understand
# the v2 invite API.
if room_version in (RoomVersions.V1, RoomVersions.V2): if room_version in (RoomVersions.V1, RoomVersions.V2):
pass # We'll fall through pass # We'll fall through
else: else:
raise Exception("Remote server is too old") raise SynapseError(
400,
"User's homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
elif e.code == 403: elif e.code == 403:
raise e.to_synapse_error() raise e.to_synapse_error()
else: else:

View file

@ -25,9 +25,10 @@ from twisted.internet import defer
from twisted.internet.abstract import isIPAddress from twisted.internet.abstract import isIPAddress
from twisted.python import failure from twisted.python import failure
from synapse.api.constants import EventTypes, Membership from synapse.api.constants import KNOWN_ROOM_VERSIONS, EventTypes, Membership
from synapse.api.errors import ( from synapse.api.errors import (
AuthError, AuthError,
Codes,
FederationError, FederationError,
IncompatibleRoomVersionError, IncompatibleRoomVersionError,
NotFoundError, NotFoundError,
@ -239,8 +240,9 @@ class FederationServer(FederationBase):
f = failure.Failure() f = failure.Failure()
pdu_results[event_id] = {"error": str(e)} pdu_results[event_id] = {"error": str(e)}
logger.error( logger.error(
"Failed to handle PDU %s: %s", "Failed to handle PDU %s",
event_id, f.getTraceback().rstrip(), event_id,
exc_info=(f.type, f.value, f.getTracebackObject()),
) )
yield concurrently_execute( yield concurrently_execute(
@ -386,6 +388,13 @@ class FederationServer(FederationBase):
@defer.inlineCallbacks @defer.inlineCallbacks
def on_invite_request(self, origin, content, room_version): def on_invite_request(self, origin, content, room_version):
if room_version not in KNOWN_ROOM_VERSIONS:
raise SynapseError(
400,
"Homeserver does not support this room version",
Codes.UNSUPPORTED_ROOM_VERSION,
)
format_ver = room_version_to_event_format(room_version) format_ver = room_version_to_event_format(room_version)
pdu = event_from_pdu_json(content, format_ver) pdu = event_from_pdu_json(content, format_ver)
@ -877,6 +886,9 @@ class ReplicationFederationHandlerRegistry(FederationHandlerRegistry):
def on_edu(self, edu_type, origin, content): def on_edu(self, edu_type, origin, content):
"""Overrides FederationHandlerRegistry """Overrides FederationHandlerRegistry
""" """
if not self.config.use_presence and edu_type == "m.presence":
return
handler = self.edu_handlers.get(edu_type) handler = self.edu_handlers.get(edu_type)
if handler: if handler:
return super(ReplicationFederationHandlerRegistry, self).on_edu( return super(ReplicationFederationHandlerRegistry, self).on_edu(

View file

@ -393,7 +393,7 @@ class FederationStateServlet(BaseFederationServlet):
return self.handler.on_context_state_request( return self.handler.on_context_state_request(
origin, origin,
context, context,
parse_string_from_args(query, "event_id", None), parse_string_from_args(query, "event_id", None, required=True),
) )
@ -404,7 +404,7 @@ class FederationStateIdsServlet(BaseFederationServlet):
return self.handler.on_state_ids_request( return self.handler.on_state_ids_request(
origin, origin,
room_id, room_id,
parse_string_from_args(query, "event_id", None), parse_string_from_args(query, "event_id", None, required=True),
) )
@ -736,7 +736,8 @@ class PublicRoomList(BaseFederationServlet):
data = yield self.handler.get_local_public_room_list( data = yield self.handler.get_local_public_room_list(
limit, since_token, limit, since_token,
network_tuple=network_tuple network_tuple=network_tuple,
from_federation=True,
) )
defer.returnValue((200, data)) defer.returnValue((200, data))

View file

@ -113,8 +113,7 @@ class GroupsServerHandler(object):
room_id = room_entry["room_id"] room_id = room_entry["room_id"]
joined_users = yield self.store.get_users_in_room(room_id) joined_users = yield self.store.get_users_in_room(room_id)
entry = yield self.room_list_handler.generate_room_entry( entry = yield self.room_list_handler.generate_room_entry(
room_id, len(joined_users), room_id, len(joined_users), with_alias=False, allow_private=True,
with_alias=False, allow_private=True,
) )
entry = dict(entry) # so we don't change whats cached entry = dict(entry) # so we don't change whats cached
entry.pop("room_id", None) entry.pop("room_id", None)
@ -544,8 +543,7 @@ class GroupsServerHandler(object):
joined_users = yield self.store.get_users_in_room(room_id) joined_users = yield self.store.get_users_in_room(room_id)
entry = yield self.room_list_handler.generate_room_entry( entry = yield self.room_list_handler.generate_room_entry(
room_id, len(joined_users), room_id, len(joined_users), with_alias=False, allow_private=True,
with_alias=False, allow_private=True,
) )
if not entry: if not entry:

View file

@ -770,10 +770,26 @@ class FederationHandler(BaseHandler):
set(auth_events.keys()) | set(state_events.keys()) set(auth_events.keys()) | set(state_events.keys())
) )
# We now have a chunk of events plus associated state and auth chain to
# persist. We do the persistence in two steps:
# 1. Auth events and state get persisted as outliers, plus the
# backward extremities get persisted (as non-outliers).
# 2. The rest of the events in the chunk get persisted one by one, as
# each one depends on the previous event for its state.
#
# The important thing is that events in the chunk get persisted as
# non-outliers, including when those events are also in the state or
# auth chain. Caution must therefore be taken to ensure that they are
# not accidentally marked as outliers.
# Step 1a: persist auth events that *don't* appear in the chunk
ev_infos = [] ev_infos = []
for a in auth_events.values(): for a in auth_events.values():
if a.event_id in seen_events: # We only want to persist auth events as outliers that we haven't
# seen and aren't about to persist as part of the backfilled chunk.
if a.event_id in seen_events or a.event_id in event_map:
continue continue
a.internal_metadata.outlier = True a.internal_metadata.outlier = True
ev_infos.append({ ev_infos.append({
"event": a, "event": a,
@ -785,14 +801,21 @@ class FederationHandler(BaseHandler):
} }
}) })
# Step 1b: persist the events in the chunk we fetched state for (i.e.
# the backwards extremities) as non-outliers.
for e_id in events_to_state: for e_id in events_to_state:
# For paranoia we ensure that these events are marked as
# non-outliers
ev = event_map[e_id]
assert(not ev.internal_metadata.is_outlier())
ev_infos.append({ ev_infos.append({
"event": event_map[e_id], "event": ev,
"state": events_to_state[e_id], "state": events_to_state[e_id],
"auth_events": { "auth_events": {
(auth_events[a_id].type, auth_events[a_id].state_key): (auth_events[a_id].type, auth_events[a_id].state_key):
auth_events[a_id] auth_events[a_id]
for a_id in event_map[e_id].auth_event_ids() for a_id in ev.auth_event_ids()
if a_id in auth_events if a_id in auth_events
} }
}) })
@ -802,12 +825,17 @@ class FederationHandler(BaseHandler):
backfilled=True, backfilled=True,
) )
# Step 2: Persist the rest of the events in the chunk one by one
events.sort(key=lambda e: e.depth) events.sort(key=lambda e: e.depth)
for event in events: for event in events:
if event in events_to_state: if event in events_to_state:
continue continue
# For paranoia we ensure that these events are marked as
# non-outliers
assert(not event.internal_metadata.is_outlier())
# We store these one at a time since each event depends on the # We store these one at a time since each event depends on the
# previous to work out the state. # previous to work out the state.
# TODO: We can probably do something more clever here. # TODO: We can probably do something more clever here.

View file

@ -136,7 +136,11 @@ class PaginationHandler(object):
logger.info("[purge] complete") logger.info("[purge] complete")
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
except Exception: except Exception:
logger.error("[purge] failed: %s", Failure().getTraceback().rstrip()) f = Failure()
logger.error(
"[purge] failed",
exc_info=(f.type, f.value, f.getTracebackObject()),
)
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
finally: finally:
self._purges_in_progress_by_room.discard(room_id) self._purges_in_progress_by_room.discard(room_id)

View file

@ -460,7 +460,7 @@ class RegistrationHandler(BaseHandler):
lines = response.split('\n') lines = response.split('\n')
json = { json = {
"valid": lines[0] == 'true', "valid": lines[0] == 'true',
"error_url": "http://www.google.com/recaptcha/api/challenge?" + "error_url": "http://www.recaptcha.net/recaptcha/api/challenge?" +
"error=%s" % lines[1] "error=%s" % lines[1]
} }
defer.returnValue(json) defer.returnValue(json)
@ -471,7 +471,7 @@ class RegistrationHandler(BaseHandler):
Used only by c/s api v1 Used only by c/s api v1
""" """
data = yield self.captcha_client.post_urlencoded_get_raw( data = yield self.captcha_client.post_urlencoded_get_raw(
"http://www.google.com:80/recaptcha/api/verify", "http://www.recaptcha.net:80/recaptcha/api/verify",
args={ args={
'privatekey': private_key, 'privatekey': private_key,
'remoteip': ip_addr, 'remoteip': ip_addr,

View file

@ -50,16 +50,17 @@ class RoomListHandler(BaseHandler):
def get_local_public_room_list(self, limit=None, since_token=None, def get_local_public_room_list(self, limit=None, since_token=None,
search_filter=None, search_filter=None,
network_tuple=EMPTY_THIRD_PARTY_ID,): network_tuple=EMPTY_THIRD_PARTY_ID,
from_federation=False):
"""Generate a local public room list. """Generate a local public room list.
There are multiple different lists: the main one plus one per third There are multiple different lists: the main one plus one per third
party network. A client can ask for a specific list or to return all. party network. A client can ask for a specific list or to return all.
Args: Args:
limit (int) limit (int|None)
since_token (str) since_token (str|None)
search_filter (dict) search_filter (dict|None)
network_tuple (ThirdPartyInstanceID): Which public list to use. network_tuple (ThirdPartyInstanceID): Which public list to use.
This can be (None, None) to indicate the main list, or a particular This can be (None, None) to indicate the main list, or a particular
appservice and network id to use an appservice specific one. appservice and network id to use an appservice specific one.
@ -87,14 +88,30 @@ class RoomListHandler(BaseHandler):
return self.response_cache.wrap( return self.response_cache.wrap(
key, key,
self._get_public_room_list, self._get_public_room_list,
limit, since_token, network_tuple=network_tuple, limit, since_token,
network_tuple=network_tuple, from_federation=from_federation,
) )
@defer.inlineCallbacks @defer.inlineCallbacks
def _get_public_room_list(self, limit=None, since_token=None, def _get_public_room_list(self, limit=None, since_token=None,
search_filter=None, search_filter=None,
network_tuple=EMPTY_THIRD_PARTY_ID, network_tuple=EMPTY_THIRD_PARTY_ID,
from_federation=False,
timeout=None,): timeout=None,):
"""Generate a public room list.
Args:
limit (int|None): Maximum amount of rooms to return.
since_token (str|None)
search_filter (dict|None): Dictionary to filter rooms by.
network_tuple (ThirdPartyInstanceID): Which public list to use.
This can be (None, None) to indicate the main list, or a particular
appservice and network id to use an appservice specific one.
Setting to None returns all public rooms across all lists.
from_federation (bool): Whether this request originated from a
federating server or a client. Used for room filtering.
timeout (int|None): Amount of seconds to wait for a response before
timing out.
"""
if since_token and since_token != "END": if since_token and since_token != "END":
since_token = RoomListNextBatch.from_token(since_token) since_token = RoomListNextBatch.from_token(since_token)
else: else:
@ -217,7 +234,8 @@ class RoomListHandler(BaseHandler):
yield concurrently_execute( yield concurrently_execute(
lambda r: self._append_room_entry_to_chunk( lambda r: self._append_room_entry_to_chunk(
r, rooms_to_num_joined[r], r, rooms_to_num_joined[r],
chunk, limit, search_filter chunk, limit, search_filter,
from_federation=from_federation,
), ),
batch, 5, batch, 5,
) )
@ -288,23 +306,51 @@ class RoomListHandler(BaseHandler):
@defer.inlineCallbacks @defer.inlineCallbacks
def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk, limit, def _append_room_entry_to_chunk(self, room_id, num_joined_users, chunk, limit,
search_filter): search_filter, from_federation=False):
"""Generate the entry for a room in the public room list and append it """Generate the entry for a room in the public room list and append it
to the `chunk` if it matches the search filter to the `chunk` if it matches the search filter
Args:
room_id (str): The ID of the room.
num_joined_users (int): The number of joined users in the room.
chunk (list)
limit (int|None): Maximum amount of rooms to display. Function will
return if length of chunk is greater than limit + 1.
search_filter (dict|None)
from_federation (bool): Whether this request originated from a
federating server or a client. Used for room filtering.
""" """
if limit and len(chunk) > limit + 1: if limit and len(chunk) > limit + 1:
# We've already got enough, so lets just drop it. # We've already got enough, so lets just drop it.
return return
result = yield self.generate_room_entry(room_id, num_joined_users) result = yield self.generate_room_entry(room_id, num_joined_users)
if not result:
return
if result and _matches_room_entry(result, search_filter): if from_federation and not result.get("m.federate", True):
# This is a room that other servers cannot join. Do not show them
# this room.
return
if _matches_room_entry(result, search_filter):
chunk.append(result) chunk.append(result)
@cachedInlineCallbacks(num_args=1, cache_context=True) @cachedInlineCallbacks(num_args=1, cache_context=True)
def generate_room_entry(self, room_id, num_joined_users, cache_context, def generate_room_entry(self, room_id, num_joined_users, cache_context,
with_alias=True, allow_private=False): with_alias=True, allow_private=False):
"""Returns the entry for a room """Returns the entry for a room
Args:
room_id (str): The room's ID.
num_joined_users (int): Number of users in the room.
cache_context: Information for cached responses.
with_alias (bool): Whether to return the room's aliases in the result.
allow_private (bool): Whether invite-only rooms should be shown.
Returns:
Deferred[dict|None]: Returns a room entry as a dictionary, or None if this
room was determined not to be shown publicly.
""" """
result = { result = {
"room_id": room_id, "room_id": room_id,
@ -318,6 +364,7 @@ class RoomListHandler(BaseHandler):
event_map = yield self.store.get_events([ event_map = yield self.store.get_events([
event_id for key, event_id in iteritems(current_state_ids) event_id for key, event_id in iteritems(current_state_ids)
if key[0] in ( if key[0] in (
EventTypes.Create,
EventTypes.JoinRules, EventTypes.JoinRules,
EventTypes.Name, EventTypes.Name,
EventTypes.Topic, EventTypes.Topic,
@ -334,12 +381,17 @@ class RoomListHandler(BaseHandler):
} }
# Double check that this is actually a public room. # Double check that this is actually a public room.
join_rules_event = current_state.get((EventTypes.JoinRules, "")) join_rules_event = current_state.get((EventTypes.JoinRules, ""))
if join_rules_event: if join_rules_event:
join_rule = join_rules_event.content.get("join_rule", None) join_rule = join_rules_event.content.get("join_rule", None)
if not allow_private and join_rule and join_rule != JoinRules.PUBLIC: if not allow_private and join_rule and join_rule != JoinRules.PUBLIC:
defer.returnValue(None) defer.returnValue(None)
# Return whether this room is open to federation users or not
create_event = current_state.get((EventTypes.Create, ""))
result["m.federate"] = create_event.content.get("m.federate", True)
if with_alias: if with_alias:
aliases = yield self.store.get_aliases_for_room( aliases = yield self.store.get_aliases_for_room(
room_id, on_invalidate=cache_context.invalidate room_id, on_invalidate=cache_context.invalidate

View file

@ -169,18 +169,18 @@ def _return_html_error(f, request):
) )
else: else:
logger.error( logger.error(
"Failed handle request %r: %s", "Failed handle request %r",
request, request,
f.getTraceback().rstrip(), exc_info=(f.type, f.value, f.getTracebackObject()),
) )
else: else:
code = http_client.INTERNAL_SERVER_ERROR code = http_client.INTERNAL_SERVER_ERROR
msg = "Internal server error" msg = "Internal server error"
logger.error( logger.error(
"Failed handle request %r: %s", "Failed handle request %r",
request, request,
f.getTraceback().rstrip(), exc_info=(f.type, f.value, f.getTracebackObject()),
) )
body = HTML_ERROR_TEMPLATE.format( body = HTML_ERROR_TEMPLATE.format(

View file

@ -54,8 +54,11 @@ class SlavedPresenceStore(BaseSlavedStore):
def stream_positions(self): def stream_positions(self):
result = super(SlavedPresenceStore, self).stream_positions() result = super(SlavedPresenceStore, self).stream_positions()
position = self._presence_id_gen.get_current_token()
result["presence"] = position if self.hs.config.use_presence:
position = self._presence_id_gen.get_current_token()
result["presence"] = position
return result return result
def process_replication_rows(self, stream_name, token, rows): def process_replication_rows(self, stream_name, token, rows):

View file

@ -39,7 +39,7 @@ class ReplicationClientFactory(ReconnectingClientFactory):
Accepts a handler that will be called when new data is available or data Accepts a handler that will be called when new data is available or data
is required. is required.
""" """
maxDelay = 5 # Try at least once every N seconds maxDelay = 30 # Try at least once every N seconds
def __init__(self, hs, client_name, handler): def __init__(self, hs, client_name, handler):
self.client_name = client_name self.client_name = client_name
@ -54,7 +54,6 @@ class ReplicationClientFactory(ReconnectingClientFactory):
def buildProtocol(self, addr): def buildProtocol(self, addr):
logger.info("Connected to replication: %r", addr) logger.info("Connected to replication: %r", addr)
self.resetDelay()
return ClientReplicationStreamProtocol( return ClientReplicationStreamProtocol(
self.client_name, self.server_name, self._clock, self.handler self.client_name, self.server_name, self._clock, self.handler
) )
@ -90,15 +89,18 @@ class ReplicationClientHandler(object):
# Used for tests. # Used for tests.
self.awaiting_syncs = {} self.awaiting_syncs = {}
# The factory used to create connections.
self.factory = None
def start_replication(self, hs): def start_replication(self, hs):
"""Helper method to start a replication connection to the remote server """Helper method to start a replication connection to the remote server
using TCP. using TCP.
""" """
client_name = hs.config.worker_name client_name = hs.config.worker_name
factory = ReplicationClientFactory(hs, client_name, self) self.factory = ReplicationClientFactory(hs, client_name, self)
host = hs.config.worker_replication_host host = hs.config.worker_replication_host
port = hs.config.worker_replication_port port = hs.config.worker_replication_port
hs.get_reactor().connectTCP(host, port, factory) hs.get_reactor().connectTCP(host, port, self.factory)
def on_rdata(self, stream_name, token, rows): def on_rdata(self, stream_name, token, rows):
"""Called when we get new replication data. By default this just pokes """Called when we get new replication data. By default this just pokes
@ -140,6 +142,7 @@ class ReplicationClientHandler(object):
args["account_data"] = user_account_data args["account_data"] = user_account_data
elif room_account_data: elif room_account_data:
args["account_data"] = room_account_data args["account_data"] = room_account_data
return args return args
def get_currently_syncing_users(self): def get_currently_syncing_users(self):
@ -204,3 +207,14 @@ class ReplicationClientHandler(object):
for cmd in self.pending_commands: for cmd in self.pending_commands:
connection.send_command(cmd) connection.send_command(cmd)
self.pending_commands = [] self.pending_commands = []
def finished_connecting(self):
"""Called when we have successfully subscribed and caught up to all
streams we're interested in.
"""
logger.info("Finished connecting to server")
# We don't reset the delay any earlier as otherwise if there is a
# problem during start up we'll end up tight looping connecting to the
# server.
self.factory.resetDelay()

View file

@ -127,8 +127,11 @@ class RdataCommand(Command):
class PositionCommand(Command): class PositionCommand(Command):
"""Sent by the client to tell the client the stream postition without """Sent by the server to tell the client the stream postition without
needing to send an RDATA. needing to send an RDATA.
Sent to the client after all missing updates for a stream have been sent
to the client and they're now up to date.
""" """
NAME = "POSITION" NAME = "POSITION"

View file

@ -268,7 +268,17 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
if "\n" in string: if "\n" in string:
raise Exception("Unexpected newline in command: %r", string) raise Exception("Unexpected newline in command: %r", string)
self.sendLine(string.encode("utf-8")) encoded_string = string.encode("utf-8")
if len(encoded_string) > self.MAX_LENGTH:
raise Exception(
"Failed to send command %s as too long (%d > %d)" % (
cmd.NAME,
len(encoded_string), self.MAX_LENGTH,
)
)
self.sendLine(encoded_string)
self.last_sent_command = self.clock.time_msec() self.last_sent_command = self.clock.time_msec()
@ -361,6 +371,11 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
def id(self): def id(self):
return "%s-%s" % (self.name, self.conn_id) return "%s-%s" % (self.name, self.conn_id)
def lineLengthExceeded(self, line):
"""Called when we receive a line that is above the maximum line length
"""
self.send_error("Line length exceeded")
class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol): class ServerReplicationStreamProtocol(BaseReplicationStreamProtocol):
VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS VALID_INBOUND_COMMANDS = VALID_CLIENT_COMMANDS
@ -511,6 +526,11 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.server_name = server_name self.server_name = server_name
self.handler = handler self.handler = handler
# Set of stream names that have been subscribe to, but haven't yet
# caught up with. This is used to track when the client has been fully
# connected to the remote.
self.streams_connecting = set()
# Map of stream to batched updates. See RdataCommand for info on how # Map of stream to batched updates. See RdataCommand for info on how
# batching works. # batching works.
self.pending_batches = {} self.pending_batches = {}
@ -533,6 +553,10 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
# We've now finished connecting to so inform the client handler # We've now finished connecting to so inform the client handler
self.handler.update_connection(self) self.handler.update_connection(self)
# This will happen if we don't actually subscribe to any streams
if not self.streams_connecting:
self.handler.finished_connecting()
def on_SERVER(self, cmd): def on_SERVER(self, cmd):
if cmd.data != self.server_name: if cmd.data != self.server_name:
logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data) logger.error("[%s] Connected to wrong remote: %r", self.id(), cmd.data)
@ -562,6 +586,12 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
return self.handler.on_rdata(stream_name, cmd.token, rows) return self.handler.on_rdata(stream_name, cmd.token, rows)
def on_POSITION(self, cmd): def on_POSITION(self, cmd):
# When we get a `POSITION` command it means we've finished getting
# missing updates for the given stream, and are now up to date.
self.streams_connecting.discard(cmd.stream_name)
if not self.streams_connecting:
self.handler.finished_connecting()
return self.handler.on_position(cmd.stream_name, cmd.token) return self.handler.on_position(cmd.stream_name, cmd.token)
def on_SYNC(self, cmd): def on_SYNC(self, cmd):
@ -578,6 +608,8 @@ class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
self.id(), stream_name, token self.id(), stream_name, token
) )
self.streams_connecting.add(stream_name)
self.send_command(ReplicateCommand(stream_name, token)) self.send_command(ReplicateCommand(stream_name, token))
def on_connection_closed(self): def on_connection_closed(self):

View file

@ -33,7 +33,7 @@ RECAPTCHA_TEMPLATE = """
<title>Authentication</title> <title>Authentication</title>
<meta name='viewport' content='width=device-width, initial-scale=1, <meta name='viewport' content='width=device-width, initial-scale=1,
user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'> user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
<script src="https://www.google.com/recaptcha/api.js" <script src="https://www.recaptcha.net/recaptcha/api.js"
async defer></script> async defer></script>
<script src="//code.jquery.com/jquery-1.11.2.min.js"></script> <script src="//code.jquery.com/jquery-1.11.2.min.js"></script>
<link rel="stylesheet" href="/_matrix/static/client/register/style.css"> <link rel="stylesheet" href="/_matrix/static/client/register/style.css">

View file

@ -1,5 +1,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd # Copyright 2014-2016 OpenMarket Ltd
# Copyright 2019 New Vector Ltd.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -133,8 +134,15 @@ def respond_with_responder(request, responder, media_type, file_size, upload_nam
logger.debug("Responding to media request with responder %s") logger.debug("Responding to media request with responder %s")
add_file_headers(request, media_type, file_size, upload_name) add_file_headers(request, media_type, file_size, upload_name)
with responder: try:
yield responder.write_to_consumer(request) with responder:
yield responder.write_to_consumer(request)
except Exception as e:
# The majority of the time this will be due to the client having gone
# away. Unfortunately, Twisted simply throws a generic exception at us
# in that case.
logger.warning("Failed to write to consumer: %s %s", type(e), e)
finish_request(request) finish_request(request)
@ -206,8 +214,7 @@ def get_filename_from_headers(headers):
Content-Disposition HTTP header. Content-Disposition HTTP header.
Args: Args:
headers (twisted.web.http_headers.Headers): The HTTP headers (dict[bytes, list[bytes]]): The HTTP request headers.
request headers.
Returns: Returns:
A Unicode string of the filename, or None. A Unicode string of the filename, or None.
@ -218,23 +225,12 @@ def get_filename_from_headers(headers):
if not content_disposition[0]: if not content_disposition[0]:
return return
# dict of unicode: bytes, corresponding to the key value sections of the _, params = _parse_header(content_disposition[0])
# Content-Disposition header.
params = {}
parts = content_disposition[0].split(b";")
for i in parts:
# Split into key-value pairs, if able
# We don't care about things like `inline`, so throw it out
if b"=" not in i:
continue
key, value = i.strip().split(b"=")
params[key.decode('ascii')] = value
upload_name = None upload_name = None
# First check if there is a valid UTF-8 filename # First check if there is a valid UTF-8 filename
upload_name_utf8 = params.get("filename*", None) upload_name_utf8 = params.get(b"filename*", None)
if upload_name_utf8: if upload_name_utf8:
if upload_name_utf8.lower().startswith(b"utf-8''"): if upload_name_utf8.lower().startswith(b"utf-8''"):
upload_name_utf8 = upload_name_utf8[7:] upload_name_utf8 = upload_name_utf8[7:]
@ -260,12 +256,68 @@ def get_filename_from_headers(headers):
# If there isn't check for an ascii name. # If there isn't check for an ascii name.
if not upload_name: if not upload_name:
upload_name_ascii = params.get("filename", None) upload_name_ascii = params.get(b"filename", None)
if upload_name_ascii and is_ascii(upload_name_ascii): if upload_name_ascii and is_ascii(upload_name_ascii):
# Make sure there's no %-quoted bytes. If there is, reject it as upload_name = upload_name_ascii.decode('ascii')
# non-valid ASCII.
if b"%" not in upload_name_ascii:
upload_name = upload_name_ascii.decode('ascii')
# This may be None here, indicating we did not find a matching name. # This may be None here, indicating we did not find a matching name.
return upload_name return upload_name
def _parse_header(line):
"""Parse a Content-type like header.
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
line (bytes): header to be parsed
Returns:
Tuple[bytes, dict[bytes, bytes]]:
the main content-type, followed by the parameter dictionary
"""
parts = _parseparam(b';' + line)
key = next(parts)
pdict = {}
for p in parts:
i = p.find(b'=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i + 1:].strip()
# strip double-quotes
if len(value) >= 2 and value[0:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parseparam(s):
"""Generator which splits the input on ;, respecting double-quoted sequences
Cargo-culted from `cgi`, but works on bytes rather than strings.
Args:
s (bytes): header to be parsed
Returns:
Iterable[bytes]: the split input
"""
while s[:1] == b';':
s = s[1:]
# look for the next ;
end = s.find(b';')
# if there is an odd number of " marks between here and the next ;, skip to the
# next ; instead
while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
yield f.strip()
s = s[end:]

View file

@ -7,9 +7,9 @@ import synapse.handlers.auth
import synapse.handlers.deactivate_account import synapse.handlers.deactivate_account
import synapse.handlers.device import synapse.handlers.device
import synapse.handlers.e2e_keys import synapse.handlers.e2e_keys
import synapse.handlers.message
import synapse.handlers.room import synapse.handlers.room
import synapse.handlers.room_member import synapse.handlers.room_member
import synapse.handlers.message
import synapse.handlers.set_password import synapse.handlers.set_password
import synapse.rest.media.v1.media_repository import synapse.rest.media.v1.media_repository
import synapse.server_notices.server_notices_manager import synapse.server_notices.server_notices_manager

View file

@ -4,7 +4,7 @@
<meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'> <meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0'>
<link rel="stylesheet" href="style.css"> <link rel="stylesheet" href="style.css">
<script src="js/jquery-2.1.3.min.js"></script> <script src="js/jquery-2.1.3.min.js"></script>
<script src="https://www.google.com/recaptcha/api/js/recaptcha_ajax.js"></script> <script src="https://www.recaptcha.net/recaptcha/api/js/recaptcha_ajax.js"></script>
<script src="register_config.js"></script> <script src="register_config.js"></script>
<script src="js/register.js"></script> <script src="js/register.js"></script>
</head> </head>

View file

@ -30,6 +30,7 @@ from synapse.api.errors import StoreError
from synapse.metrics.background_process_metrics import run_as_background_process from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.storage.engines import PostgresEngine, Sqlite3Engine
from synapse.types import get_domain_from_id from synapse.types import get_domain_from_id
from synapse.util import batch_iter
from synapse.util.caches.descriptors import Cache from synapse.util.caches.descriptors import Cache
from synapse.util.logcontext import LoggingContext, PreserveLoggingContext from synapse.util.logcontext import LoggingContext, PreserveLoggingContext
from synapse.util.stringutils import exception_to_unicode from synapse.util.stringutils import exception_to_unicode
@ -1327,10 +1328,16 @@ class SQLBaseStore(object):
""" """
txn.call_after(self._invalidate_state_caches, room_id, members_changed) txn.call_after(self._invalidate_state_caches, room_id, members_changed)
keys = itertools.chain([room_id], members_changed) # We need to be careful that the size of the `members_changed` list
self._send_invalidation_to_replication( # isn't so large that it causes problems sending over replication, so we
txn, _CURRENT_STATE_CACHE_NAME, keys, # send them in chunks.
) # Max line length is 16K, and max user ID length is 255, so 50 should
# be safe.
for chunk in batch_iter(members_changed, 50):
keys = itertools.chain([room_id], chunk)
self._send_invalidation_to_replication(
txn, _CURRENT_STATE_CACHE_NAME, keys,
)
def _invalidate_state_caches(self, room_id, members_changed): def _invalidate_state_caches(self, room_id, members_changed):
"""Invalidates caches that are based on the current state, but does """Invalidates caches that are based on the current state, but does
@ -1596,6 +1603,14 @@ class SQLBaseStore(object):
return cls.cursor_to_dict(txn) return cls.cursor_to_dict(txn)
@property
def database_engine_name(self):
return self.database_engine.module.__name__
def get_server_version(self):
"""Returns a string describing the server version number"""
return self.database_engine.server_version
class _RollbackButIsFineException(Exception): class _RollbackButIsFineException(Exception):
""" This exception is used to rollback a transaction without implying """ This exception is used to rollback a transaction without implying

View file

@ -23,6 +23,7 @@ class PostgresEngine(object):
self.module = database_module self.module = database_module
self.module.extensions.register_type(self.module.extensions.UNICODE) self.module.extensions.register_type(self.module.extensions.UNICODE)
self.synchronous_commit = database_config.get("synchronous_commit", True) self.synchronous_commit = database_config.get("synchronous_commit", True)
self._version = None # unknown as yet
def check_database(self, txn): def check_database(self, txn):
txn.execute("SHOW SERVER_ENCODING") txn.execute("SHOW SERVER_ENCODING")
@ -87,3 +88,27 @@ class PostgresEngine(object):
""" """
txn.execute("SELECT nextval('state_group_id_seq')") txn.execute("SELECT nextval('state_group_id_seq')")
return txn.fetchone()[0] return txn.fetchone()[0]
@property
def server_version(self):
"""Returns a string giving the server version. For example: '8.1.5'
Returns:
string
"""
# note that this is a bit of a hack because it relies on on_new_connection
# having been called at least once. Still, that should be a safe bet here.
numver = self._version
assert numver is not None
# https://www.postgresql.org/docs/current/libpq-status.html#LIBPQ-PQSERVERVERSION
if numver >= 100000:
return "%i.%i" % (
numver / 10000, numver % 10000,
)
else:
return "%i.%i.%i" % (
numver / 10000,
(numver % 10000) / 100,
numver % 100,
)

View file

@ -70,6 +70,15 @@ class Sqlite3Engine(object):
self._current_state_group_id += 1 self._current_state_group_id += 1
return self._current_state_group_id return self._current_state_group_id
@property
def server_version(self):
"""Gets a string giving the server version. For example: '3.22.0'
Returns:
string
"""
return "%i.%i.%i" % self.module.sqlite_version_info
# Following functions taken from: https://github.com/coleifer/peewee # Following functions taken from: https://github.com/coleifer/peewee

View file

@ -295,6 +295,39 @@ class RegistrationWorkerStore(SQLBaseStore):
return ret['user_id'] return ret['user_id']
return None return None
@defer.inlineCallbacks
def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
yield self._simple_upsert("user_threepids", {
"medium": medium,
"address": address,
}, {
"user_id": user_id,
"validated_at": validated_at,
"added_at": added_at,
})
@defer.inlineCallbacks
def user_get_threepids(self, user_id):
ret = yield self._simple_select_list(
"user_threepids", {
"user_id": user_id
},
['medium', 'address', 'validated_at', 'added_at'],
'user_get_threepids'
)
defer.returnValue(ret)
def user_delete_threepid(self, user_id, medium, address):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
"medium": medium,
"address": address,
},
desc="user_delete_threepids",
)
class RegistrationStore(RegistrationWorkerStore, class RegistrationStore(RegistrationWorkerStore,
background_updates.BackgroundUpdateStore): background_updates.BackgroundUpdateStore):
@ -632,39 +665,6 @@ class RegistrationStore(RegistrationWorkerStore,
defer.returnValue(res if res else False) defer.returnValue(res if res else False)
@defer.inlineCallbacks
def user_add_threepid(self, user_id, medium, address, validated_at, added_at):
yield self._simple_upsert("user_threepids", {
"medium": medium,
"address": address,
}, {
"user_id": user_id,
"validated_at": validated_at,
"added_at": added_at,
})
@defer.inlineCallbacks
def user_get_threepids(self, user_id):
ret = yield self._simple_select_list(
"user_threepids", {
"user_id": user_id
},
['medium', 'address', 'validated_at', 'added_at'],
'user_get_threepids'
)
defer.returnValue(ret)
def user_delete_threepid(self, user_id, medium, address):
return self._simple_delete(
"user_threepids",
keyvalues={
"user_id": user_id,
"medium": medium,
"address": address,
},
desc="user_delete_threepids",
)
@defer.inlineCallbacks @defer.inlineCallbacks
def save_or_get_3pid_guest_access_token( def save_or_get_3pid_guest_access_token(
self, medium, address, access_token, inviter_user_id self, medium, address, access_token, inviter_user_id

View file

@ -0,0 +1,45 @@
# -*- coding: utf-8 -*-
# Copyright 2019 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from synapse.rest.media.v1._base import get_filename_from_headers
from tests import unittest
class GetFileNameFromHeadersTests(unittest.TestCase):
# input -> expected result
TEST_CASES = {
b"inline; filename=abc.txt": u"abc.txt",
b'inline; filename="azerty"': u"azerty",
b'inline; filename="aze%20rty"': u"aze%20rty",
b'inline; filename="aze\"rty"': u'aze"rty',
b'inline; filename="azer;ty"': u"azer;ty",
b"inline; filename*=utf-8''foo%C2%A3bar": u"foo£bar",
}
def tests(self):
for hdr, expected in self.TEST_CASES.items():
res = get_filename_from_headers(
{
b'Content-Disposition': [hdr],
},
)
self.assertEqual(
res, expected,
"expected output for %s to be %s but was %s" % (
hdr, expected, res,
)
)

View file

@ -45,7 +45,9 @@ from synapse.util.ratelimitutils import FederationRateLimiter
# set this to True to run the tests against postgres instead of sqlite. # set this to True to run the tests against postgres instead of sqlite.
USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False) USE_POSTGRES_FOR_TESTS = os.environ.get("SYNAPSE_POSTGRES", False)
LEAVE_DB = os.environ.get("SYNAPSE_LEAVE_DB", False) LEAVE_DB = os.environ.get("SYNAPSE_LEAVE_DB", False)
POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", "postgres") POSTGRES_USER = os.environ.get("SYNAPSE_POSTGRES_USER", None)
POSTGRES_HOST = os.environ.get("SYNAPSE_POSTGRES_HOST", None)
POSTGRES_PASSWORD = os.environ.get("SYNAPSE_POSTGRES_PASSWORD", None)
POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),) POSTGRES_BASE_DB = "_synapse_unit_tests_base_%s" % (os.getpid(),)
@ -58,6 +60,8 @@ def setupdb():
"args": { "args": {
"database": POSTGRES_BASE_DB, "database": POSTGRES_BASE_DB,
"user": POSTGRES_USER, "user": POSTGRES_USER,
"host": POSTGRES_HOST,
"password": POSTGRES_PASSWORD,
"cp_min": 1, "cp_min": 1,
"cp_max": 5, "cp_max": 5,
}, },
@ -66,7 +70,9 @@ def setupdb():
config.password_providers = [] config.password_providers = []
config.database_config = pgconfig config.database_config = pgconfig
db_engine = create_engine(pgconfig) db_engine = create_engine(pgconfig)
db_conn = db_engine.module.connect(user=POSTGRES_USER) db_conn = db_engine.module.connect(
user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD
)
db_conn.autocommit = True db_conn.autocommit = True
cur = db_conn.cursor() cur = db_conn.cursor()
cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,)) cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
@ -76,7 +82,10 @@ def setupdb():
# Set up in the db # Set up in the db
db_conn = db_engine.module.connect( db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB, user=POSTGRES_USER database=POSTGRES_BASE_DB,
user=POSTGRES_USER,
host=POSTGRES_HOST,
password=POSTGRES_PASSWORD,
) )
cur = db_conn.cursor() cur = db_conn.cursor()
_get_or_create_schema_state(cur, db_engine) _get_or_create_schema_state(cur, db_engine)
@ -86,7 +95,9 @@ def setupdb():
db_conn.close() db_conn.close()
def _cleanup(): def _cleanup():
db_conn = db_engine.module.connect(user=POSTGRES_USER) db_conn = db_engine.module.connect(
user=POSTGRES_USER, host=POSTGRES_HOST, password=POSTGRES_PASSWORD
)
db_conn.autocommit = True db_conn.autocommit = True
cur = db_conn.cursor() cur = db_conn.cursor()
cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,)) cur.execute("DROP DATABASE IF EXISTS %s;" % (POSTGRES_BASE_DB,))
@ -142,6 +153,9 @@ def default_config(name):
config.saml2_enabled = False config.saml2_enabled = False
config.public_baseurl = None config.public_baseurl = None
config.default_identity_server = None config.default_identity_server = None
config.key_refresh_interval = 24 * 60 * 60 * 1000
config.old_signing_keys = {}
config.tls_fingerprints = []
config.use_frozen_dicts = False config.use_frozen_dicts = False
@ -203,7 +217,14 @@ def setup_test_homeserver(
config.database_config = { config.database_config = {
"name": "psycopg2", "name": "psycopg2",
"args": {"database": test_db, "cp_min": 1, "cp_max": 5}, "args": {
"database": test_db,
"host": POSTGRES_HOST,
"password": POSTGRES_PASSWORD,
"user": POSTGRES_USER,
"cp_min": 1,
"cp_max": 5,
},
} }
else: else:
config.database_config = { config.database_config = {
@ -217,7 +238,10 @@ def setup_test_homeserver(
# the template database we generate in setupdb() # the template database we generate in setupdb()
if datastore is None and isinstance(db_engine, PostgresEngine): if datastore is None and isinstance(db_engine, PostgresEngine):
db_conn = db_engine.module.connect( db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB, user=POSTGRES_USER database=POSTGRES_BASE_DB,
user=POSTGRES_USER,
host=POSTGRES_HOST,
password=POSTGRES_PASSWORD,
) )
db_conn.autocommit = True db_conn.autocommit = True
cur = db_conn.cursor() cur = db_conn.cursor()
@ -267,7 +291,10 @@ def setup_test_homeserver(
# Drop the test database # Drop the test database
db_conn = db_engine.module.connect( db_conn = db_engine.module.connect(
database=POSTGRES_BASE_DB, user=POSTGRES_USER database=POSTGRES_BASE_DB,
user=POSTGRES_USER,
host=POSTGRES_HOST,
password=POSTGRES_PASSWORD,
) )
db_conn.autocommit = True db_conn.autocommit = True
cur = db_conn.cursor() cur = db_conn.cursor()
@ -457,6 +484,9 @@ class MockKey(object):
def verify(self, message, sig): def verify(self, message, sig):
assert sig == b"\x9a\x87$" assert sig == b"\x9a\x87$"
def encode(self):
return b"<fake_encoded_key>"
class MockClock(object): class MockClock(object):
now = 1000 now = 1000
@ -486,7 +516,7 @@ class MockClock(object):
return t return t
def looping_call(self, function, interval): def looping_call(self, function, interval):
self.loopers.append([function, interval / 1000., self.now]) self.loopers.append([function, interval / 1000.0, self.now])
def cancel_call_later(self, timer, ignore_errs=False): def cancel_call_later(self, timer, ignore_errs=False):
if timer[2]: if timer[2]:
@ -522,7 +552,7 @@ class MockClock(object):
looped[2] = self.now looped[2] = self.now
def advance_time_msec(self, ms): def advance_time_msec(self, ms):
self.advance_time(ms / 1000.) self.advance_time(ms / 1000.0)
def time_bound_deferred(self, d, *args, **kwargs): def time_bound_deferred(self, d, *args, **kwargs):
# We don't bother timing things out for now. # We don't bother timing things out for now.
@ -631,7 +661,7 @@ def create_room(hs, room_id, creator_id):
"sender": creator_id, "sender": creator_id,
"room_id": room_id, "room_id": room_id,
"content": {}, "content": {},
} },
) )
event, context = yield event_creation_handler.create_new_client_event(builder) event, context = yield event_creation_handler.create_new_client_event(builder)