mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-21 12:14:29 +03:00
Merge remote-tracking branch 'origin/release-v1.88' into matrix-org-hotfixes
This commit is contained in:
commit
426cf50ecc
88 changed files with 5016 additions and 3474 deletions
|
@ -29,11 +29,12 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
|
|||
|
||||
# First calculate the various trial jobs.
|
||||
#
|
||||
# For each type of test we only run on Py3.7 on PRs
|
||||
# For PRs, we only run each type of test with the oldest Python version supported (which
|
||||
# is Python 3.8 right now)
|
||||
|
||||
trial_sqlite_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
|
@ -46,13 +47,13 @@ if not IS_PR:
|
|||
"database": "sqlite",
|
||||
"extras": "all",
|
||||
}
|
||||
for version in ("3.8", "3.9", "3.10", "3.11")
|
||||
for version in ("3.9", "3.10", "3.11")
|
||||
)
|
||||
|
||||
|
||||
trial_postgres_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "postgres",
|
||||
"postgres-version": "11",
|
||||
"extras": "all",
|
||||
|
@ -71,7 +72,7 @@ if not IS_PR:
|
|||
|
||||
trial_no_extra_tests = [
|
||||
{
|
||||
"python-version": "3.7",
|
||||
"python-version": "3.8",
|
||||
"database": "sqlite",
|
||||
"extras": "",
|
||||
}
|
||||
|
@ -133,11 +134,6 @@ if not IS_PR:
|
|||
"sytest-tag": "testing",
|
||||
"postgres": "postgres",
|
||||
},
|
||||
{
|
||||
"sytest-tag": "buster",
|
||||
"postgres": "multi-postgres",
|
||||
"workers": "workers",
|
||||
},
|
||||
]
|
||||
)
|
||||
|
||||
|
|
2
.github/workflows/release-artifacts.yml
vendored
2
.github/workflows/release-artifacts.yml
vendored
|
@ -144,7 +144,7 @@ jobs:
|
|||
|
||||
- name: Only build a single wheel on PR
|
||||
if: startsWith(github.ref, 'refs/pull/')
|
||||
run: echo "CIBW_BUILD="cp37-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
|
||||
|
||||
- name: Build wheels
|
||||
run: python -m cibuildwheel --output-dir wheelhouse
|
||||
|
|
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
|
@ -320,7 +320,7 @@ jobs:
|
|||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.7'
|
||||
python-version: '3.8'
|
||||
|
||||
- name: Prepare old deps
|
||||
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
|
||||
|
@ -362,7 +362,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["pypy-3.7"]
|
||||
python-version: ["pypy-3.8"]
|
||||
extras: ["all"]
|
||||
|
||||
steps:
|
||||
|
@ -477,7 +477,7 @@ jobs:
|
|||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- python-version: "3.7"
|
||||
- python-version: "3.8"
|
||||
postgres-version: "11"
|
||||
|
||||
- python-version: "3.11"
|
||||
|
|
6
.github/workflows/twisted_trunk.yml
vendored
6
.github/workflows/twisted_trunk.yml
vendored
|
@ -96,7 +96,11 @@ jobs:
|
|||
if: needs.check_repo.outputs.should_run_workflow == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: matrixdotorg/sytest-synapse:buster
|
||||
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
|
||||
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
|
||||
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
|
||||
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
|
||||
image: matrixdotorg/sytest-synapse:focal
|
||||
volumes:
|
||||
- ${{ github.workspace }}:/src
|
||||
|
||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -34,6 +34,7 @@ __pycache__/
|
|||
/logs
|
||||
/media_store/
|
||||
/uploads
|
||||
/homeserver-config-overrides.d
|
||||
|
||||
# For direnv users
|
||||
/.envrc
|
||||
|
|
2887
CHANGES.md
2887
CHANGES.md
File diff suppressed because it is too large
Load diff
46
Cargo.lock
generated
46
Cargo.lock
generated
|
@ -182,9 +182,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.52"
|
||||
version = "1.0.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224"
|
||||
checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
@ -273,9 +273,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.26"
|
||||
version = "1.0.29"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc"
|
||||
checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
@ -291,9 +291,21 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.8.4"
|
||||
version = "1.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0ab3ca65655bb1e41f2a8c8cd662eb4fb035e67c3f78da1d61dffe89d07300f"
|
||||
checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
"regex-automata",
|
||||
"regex-syntax",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -302,9 +314,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.7.2"
|
||||
version = "0.7.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78"
|
||||
checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
|
@ -320,29 +332,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
|
|||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.164"
|
||||
version = "1.0.171"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e8c8cf938e98f769bc164923b06dce91cea1751522f46f8466461af04c9027d"
|
||||
checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.164"
|
||||
version = "1.0.171"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d9735b638ccc51c28bf6914d90a2e9725b377144fc612c49a611fddd1b631d68"
|
||||
checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.10",
|
||||
"syn 2.0.25",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.99"
|
||||
version = "1.0.100"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"
|
||||
checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
@ -374,9 +386,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.10"
|
||||
version = "2.0.25"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40"
|
||||
checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
|
@ -3,3 +3,4 @@
|
|||
|
||||
[workspace]
|
||||
members = ["rust"]
|
||||
resolver = "2"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Replace `EventContext` fields `prev_group` and `delta_ids` with field `state_group_deltas`.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where media files were served in an unsafe manner. Contributed by @joshqou.
|
|
@ -1 +0,0 @@
|
|||
Improve `/messages` response time by avoiding backfill when we already have messages to return.
|
|
@ -1 +0,0 @@
|
|||
Regularly try to send transactions to other servers after they failed instead of waiting for a new event to be available before trying.
|
|
@ -1 +0,0 @@
|
|||
Remove experimental [MSC2716](https://github.com/matrix-org/matrix-spec-proposals/pull/2716) implementation to incrementally import history into existing rooms.
|
|
@ -1 +0,0 @@
|
|||
Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983).
|
|
@ -1 +0,0 @@
|
|||
Avoid invalidating a cache that was just prefilled.
|
|
@ -1 +0,0 @@
|
|||
Fix requesting multiple keys at once over federation, related to [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983).
|
|
@ -1 +0,0 @@
|
|||
Document `looping_call()` functionality that will wait for the given function to finish before scheduling another.
|
|
@ -1 +0,0 @@
|
|||
Fix joining rooms through aliases where the alias server isn't a real homeserver. Contributed by @tulir @ Beeper.
|
|
@ -1 +0,0 @@
|
|||
Fix a bug in push rules handling leading to an invalid (per spec) `is_user_mention` rule sent to clients. Also fix wrong rule names for `is_user_mention` and `is_room_mention`.
|
|
@ -1 +0,0 @@
|
|||
Allow for the configuration of max request retries and min/max retry delays in the matrix federation client.
|
|
@ -1 +0,0 @@
|
|||
Fix a bug introduced in 1.57.0 where the wrong table would be locked on updating database rows when using SQLite as the database backend.
|
|
@ -1 +0,0 @@
|
|||
Fix Sytest environmental variable evaluation in CI.
|
|
@ -1 +0,0 @@
|
|||
Fix a typo in the [Admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html).
|
|
@ -1 +0,0 @@
|
|||
Switch from `matrix://` to `matrix-federation://` scheme for internal Synapse routing of outbound federation traffic.
|
|
@ -1 +0,0 @@
|
|||
Fix typo in MSC number in faster remote room join architecture doc.
|
|
@ -1 +0,0 @@
|
|||
Fix harmless exceptions being printed when running the port DB script.
|
|
@ -1 +0,0 @@
|
|||
Fix forgotten rooms missing from initial sync after rejoining them. Contributed by Nico from Famedly.
|
|
@ -1 +0,0 @@
|
|||
Fix sqlite `user_filters` upgrade introduced in v1.86.0.
|
|
@ -1 +0,0 @@
|
|||
Add spam checker module API for logins.
|
18
debian/changelog
vendored
18
debian/changelog
vendored
|
@ -1,3 +1,21 @@
|
|||
matrix-synapse-py3 (1.88.0~rc1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.88.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 11 Jul 2023 10:20:19 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.87.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 04 Jul 2023 16:24:00 +0100
|
||||
|
||||
matrix-synapse-py3 (1.87.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.87.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 27 Jun 2023 15:27:04 +0000
|
||||
|
||||
matrix-synapse-py3 (1.86.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.86.0.
|
||||
|
|
|
@ -28,12 +28,12 @@ FROM docker.io/library/${distro} as builder
|
|||
|
||||
RUN apt-get update -qq -o Acquire::Languages=none
|
||||
RUN env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
-yqq --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
devscripts \
|
||||
equivs \
|
||||
wget
|
||||
|
||||
# fetch and unpack the package
|
||||
# We are temporarily using a fork of dh-virtualenv due to an incompatibility with Python 3.11, which ships with
|
||||
|
@ -62,33 +62,29 @@ FROM docker.io/library/${distro}
|
|||
ARG distro=""
|
||||
ENV distro ${distro}
|
||||
|
||||
# Python < 3.7 assumes LANG="C" means ASCII-only and throws on printing unicode
|
||||
# http://bugs.python.org/issue19846
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install the build dependencies
|
||||
#
|
||||
# NB: keep this list in sync with the list of build-deps in debian/control
|
||||
# TODO: it would be nice to do that automatically.
|
||||
RUN apt-get update -qq -o Acquire::Languages=none \
|
||||
&& env DEBIAN_FRONTEND=noninteractive apt-get install \
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
-yqq --no-install-recommends -o Dpkg::Options::=--force-unsafe-io \
|
||||
build-essential \
|
||||
curl \
|
||||
debhelper \
|
||||
devscripts \
|
||||
libsystemd-dev \
|
||||
lsb-release \
|
||||
pkg-config \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
python3-venv \
|
||||
sqlite3 \
|
||||
libpq-dev \
|
||||
libicu-dev \
|
||||
pkg-config \
|
||||
xmlsec1
|
||||
|
||||
# Install rust and ensure it's in the PATH
|
||||
ENV RUSTUP_HOME=/rust
|
||||
|
|
|
@ -242,6 +242,9 @@ The following parameters should be set in the URL:
|
|||
|
||||
- `dir` - Direction of media order. Either `f` for forwards or `b` for backwards.
|
||||
Setting this value to `b` will reverse the above sort order. Defaults to `f`.
|
||||
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
|
||||
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
|
||||
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||
|
@ -1180,7 +1183,7 @@ The following parameters should be set in the URL:
|
|||
- `user_id` - The fully qualified MXID: for example, `@user:server.com`. The user must
|
||||
be local.
|
||||
|
||||
### Check username availability
|
||||
## Check username availability
|
||||
|
||||
Checks to see if a username is available, and valid, for the server. See [the client-server
|
||||
API](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available)
|
||||
|
@ -1198,7 +1201,7 @@ GET /_synapse/admin/v1/username_available?username=$localpart
|
|||
The request and response format is the same as the
|
||||
[/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API.
|
||||
|
||||
### Find a user based on their ID in an auth provider
|
||||
## Find a user based on their ID in an auth provider
|
||||
|
||||
The API is:
|
||||
|
||||
|
@ -1237,7 +1240,7 @@ Returns a `404` HTTP status code if no user was found, with a response body like
|
|||
_Added in Synapse 1.68.0._
|
||||
|
||||
|
||||
### Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
## Find a user based on their Third Party ID (ThreePID or 3PID)
|
||||
|
||||
The API is:
|
||||
|
||||
|
|
2766
docs/changelogs/CHANGES-2022.md
Normal file
2766
docs/changelogs/CHANGES-2022.md
Normal file
File diff suppressed because it is too large
Load diff
|
@ -23,7 +23,7 @@ people building from source should ensure they can fetch recent versions of Rust
|
|||
(e.g. by using [rustup](https://rustup.rs/)).
|
||||
|
||||
The oldest supported version of SQLite is the version
|
||||
[provided](https://packages.debian.org/buster/libsqlite3-0) by
|
||||
[provided](https://packages.debian.org/bullseye/libsqlite3-0) by
|
||||
[Debian oldstable](https://wiki.debian.org/DebianOldStable).
|
||||
|
||||
Context
|
||||
|
|
|
@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
|
|||
configuration:
|
||||
|
||||
```sh
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:buster
|
||||
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
|
||||
```
|
||||
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)
|
||||
|
||||
|
|
|
@ -200,7 +200,7 @@ When following this route please make sure that the [Platform-specific prerequis
|
|||
System requirements:
|
||||
|
||||
- POSIX-compliant system (tested on Linux & OS X)
|
||||
- Python 3.7 or later, up to Python 3.11.
|
||||
- Python 3.8 or later, up to Python 3.11.
|
||||
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
|
||||
|
||||
If building on an uncommon architecture for which pre-built wheels are
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
worker_app: synapse.app.generic_worker
|
||||
worker_name: background_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/background-worker-log.yaml
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
worker_app: synapse.app.generic_worker
|
||||
worker_name: event_persister1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
worker_name: event_persister1
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
worker_app: synapse.app.federation_sender
|
||||
worker_name: federation_sender1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/federation-sender-log.yaml
|
||||
|
|
|
@ -1,10 +1,6 @@
|
|||
worker_app: synapse.app.media_repository
|
||||
worker_name: media_worker
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_listeners:
|
||||
- type: http
|
||||
port: 8085
|
||||
|
|
|
@ -1,8 +1,4 @@
|
|||
worker_app: synapse.app.pusher
|
||||
worker_name: pusher_worker1
|
||||
|
||||
# The replication listener on the main synapse process.
|
||||
worker_replication_host: 127.0.0.1
|
||||
worker_replication_http_port: 9093
|
||||
|
||||
worker_log_config: /etc/matrix-synapse/pusher-worker-log.yaml
|
||||
|
|
|
@ -87,6 +87,33 @@ process, for example:
|
|||
wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
|
||||
```
|
||||
|
||||
# Upgrading to v1.88.0
|
||||
|
||||
## Minimum supported Python version
|
||||
|
||||
The minimum supported Python version has been increased from v3.7 to v3.8.
|
||||
You will need Python 3.8 to run Synapse v1.88.0 (due out July 18th, 2023).
|
||||
|
||||
If you use current versions of the Matrix.org-distributed Debian
|
||||
packages or Docker images, no action is required.
|
||||
|
||||
## Removal of `worker_replication_*` settings
|
||||
|
||||
As mentioned previously in [Upgrading to v1.84.0](#upgrading-to-v1840), the following deprecated settings
|
||||
are being removed in this release of Synapse:
|
||||
|
||||
* [`worker_replication_host`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_host)
|
||||
* [`worker_replication_http_port`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_port)
|
||||
* [`worker_replication_http_tls`](https://matrix-org.github.io/synapse/v1.86/usage/configuration/config_documentation.html#worker_replication_http_tls)
|
||||
|
||||
Please ensure that you have migrated to using `main` on your shared configuration's `instance_map`
|
||||
(or create one if necessary). This is required if you have ***any*** workers at all;
|
||||
administrators of single-process (monolith) installations don't need to do anything.
|
||||
|
||||
For an illustrative example, please see [Upgrading to v1.84.0](#upgrading-to-v1840) below.
|
||||
|
||||
|
||||
# Upgrading to v1.86.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
|
|
@ -4090,51 +4090,6 @@ Example configuration:
|
|||
worker_name: generic_worker1
|
||||
```
|
||||
---
|
||||
### `worker_replication_host`
|
||||
*Deprecated as of version 1.84.0. Place `host` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication endpoint that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_host: 127.0.0.1
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_port`
|
||||
*Deprecated as of version 1.84.0. Place `port` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
The HTTP replication port that it should talk to on the main Synapse process.
|
||||
The main Synapse process defines this with a `replication` resource in
|
||||
[`listeners` option](#listeners).
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_port: 9093
|
||||
```
|
||||
---
|
||||
### `worker_replication_http_tls`
|
||||
*Deprecated as of version 1.84.0. Place `tls` under `main` entry on the [`instance_map`](#instance_map) in your shared yaml configuration instead.*
|
||||
|
||||
Whether TLS should be used for talking to the HTTP replication port on the main
|
||||
Synapse process.
|
||||
The main Synapse process defines this with the `tls` option on its [listener](#listeners) that
|
||||
has the `replication` resource enabled.
|
||||
|
||||
**Please note:** by default, it is not safe to expose replication ports to the
|
||||
public Internet, even with TLS enabled.
|
||||
See [`worker_replication_secret`](#worker_replication_secret).
|
||||
|
||||
Defaults to `false`.
|
||||
|
||||
*Added in Synapse 1.72.0.*
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
worker_replication_http_tls: true
|
||||
```
|
||||
---
|
||||
### `worker_listeners`
|
||||
|
||||
A worker can handle HTTP requests. To do so, a `worker_listeners` option
|
||||
|
|
|
@ -145,9 +145,6 @@ In the config file for each worker, you must specify:
|
|||
with an `http` listener.
|
||||
* **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
|
||||
the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
|
||||
* **Synapse 1.83 and older:** The HTTP replication endpoint that the worker should talk to on the main synapse process
|
||||
([`worker_replication_host`](usage/configuration/config_documentation.md#worker_replication_host) and
|
||||
[`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). If using Synapse 1.84 and newer, these are not needed if `main` is defined on the [shared configuration](#shared-configuration) `instance_map`
|
||||
|
||||
For example:
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@
|
|||
EOF
|
||||
'';
|
||||
# Start synapse when `devenv up` is run.
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml --config-directory homeserver-config-overrides.d";
|
||||
processes.synapse.exec = "poetry run python -m synapse.app.homeserver -c homeserver.yaml -c homeserver-config-overrides.d";
|
||||
|
||||
# Define the perl modules we require to run SyTest.
|
||||
#
|
||||
|
|
362
poetry.lock
generated
362
poetry.lock
generated
|
@ -41,9 +41,6 @@ files = [
|
|||
{file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
|
||||
dev = ["attrs[docs,tests]", "pre-commit"]
|
||||
|
@ -53,13 +50,13 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte
|
|||
|
||||
[[package]]
|
||||
name = "authlib"
|
||||
version = "1.2.0"
|
||||
version = "1.2.1"
|
||||
description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients."
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"},
|
||||
{file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"},
|
||||
{file = "Authlib-1.2.1-py2.py3-none-any.whl", hash = "sha256:c88984ea00149a90e3537c964327da930779afa4564e354edfd98410bea01911"},
|
||||
{file = "Authlib-1.2.1.tar.gz", hash = "sha256:421f7c6b468d907ca2d9afede256f068f87e34d23dd221c07d13d4c234726afb"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -190,7 +187,6 @@ packaging = ">=22.0"
|
|||
pathspec = ">=0.9.0"
|
||||
platformdirs = ">=2"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
|
||||
typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
|
||||
|
||||
[package.extras]
|
||||
|
@ -412,7 +408,6 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
colorama = {version = "*", markers = "platform_system == \"Windows\""}
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
|
||||
[[package]]
|
||||
name = "click-default-group"
|
||||
|
@ -601,7 +596,6 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
gitdb = ">=4.0.1,<5"
|
||||
typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\""}
|
||||
|
||||
[[package]]
|
||||
name = "hiredis"
|
||||
|
@ -837,23 +831,22 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "importlib-metadata"
|
||||
version = "6.6.0"
|
||||
version = "6.7.0"
|
||||
description = "Read metadata from Python packages"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"},
|
||||
{file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"},
|
||||
{file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"},
|
||||
{file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
|
||||
zipp = ">=0.5"
|
||||
|
||||
[package.extras]
|
||||
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
|
||||
perf = ["ipython"]
|
||||
testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
|
||||
testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
|
||||
|
||||
[[package]]
|
||||
name = "importlib-resources"
|
||||
|
@ -987,11 +980,9 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
attrs = ">=17.4.0"
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
|
||||
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
|
||||
pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
|
||||
typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
|
||||
|
@ -1082,95 +1073,108 @@ pyasn1 = ">=0.4.6"
|
|||
|
||||
[[package]]
|
||||
name = "lxml"
|
||||
version = "4.9.2"
|
||||
version = "4.9.3"
|
||||
description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
|
||||
optional = true
|
||||
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
|
||||
files = [
|
||||
{file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"},
|
||||
{file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"},
|
||||
{file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"},
|
||||
{file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"},
|
||||
{file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"},
|
||||
{file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"},
|
||||
{file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"},
|
||||
{file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"},
|
||||
{file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"},
|
||||
{file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"},
|
||||
{file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"},
|
||||
{file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"},
|
||||
{file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"},
|
||||
{file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"},
|
||||
{file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"},
|
||||
{file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"},
|
||||
{file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"},
|
||||
{file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"},
|
||||
{file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"},
|
||||
{file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"},
|
||||
{file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"},
|
||||
{file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"},
|
||||
{file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"},
|
||||
{file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"},
|
||||
{file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"},
|
||||
{file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"},
|
||||
{file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"},
|
||||
{file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"},
|
||||
{file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"},
|
||||
{file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"},
|
||||
{file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"},
|
||||
{file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"},
|
||||
{file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"},
|
||||
{file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
cssselect = ["cssselect (>=0.7)"]
|
||||
html5 = ["html5lib"]
|
||||
htmlsoup = ["BeautifulSoup4"]
|
||||
source = ["Cython (>=0.29.7)"]
|
||||
source = ["Cython (>=0.29.35)"]
|
||||
|
||||
[[package]]
|
||||
name = "lxml-stubs"
|
||||
|
@ -1199,7 +1203,6 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
mdurl = ">=0.1,<1.0"
|
||||
typing_extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
benchmarking = ["psutil", "pytest", "pytest-benchmark"]
|
||||
|
@ -1283,7 +1286,6 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
attrs = "*"
|
||||
importlib-metadata = {version = ">=1.4", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
dev = ["aiounittest", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "mypy (==0.910)", "tox", "twine (==4.0.1)", "twisted"]
|
||||
|
@ -1459,7 +1461,6 @@ files = [
|
|||
[package.dependencies]
|
||||
mypy-extensions = ">=0.4.3"
|
||||
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
|
||||
typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
|
||||
typing-extensions = ">=3.10"
|
||||
|
||||
[package.extras]
|
||||
|
@ -1721,9 +1722,6 @@ files = [
|
|||
{file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
typing-extensions = {version = ">=4.4", markers = "python_version < \"3.8\""}
|
||||
|
||||
[package.extras]
|
||||
docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
|
||||
test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
|
||||
|
@ -1829,47 +1827,47 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "pydantic"
|
||||
version = "1.10.9"
|
||||
version = "1.10.10"
|
||||
description = "Data validation and settings management using python type hints"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "pydantic-1.10.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e692dec4a40bfb40ca530e07805b1208c1de071a18d26af4a2a0d79015b352ca"},
|
||||
{file = "pydantic-1.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c52eb595db83e189419bf337b59154bdcca642ee4b2a09e5d7797e41ace783f"},
|
||||
{file = "pydantic-1.10.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:939328fd539b8d0edf244327398a667b6b140afd3bf7e347cf9813c736211896"},
|
||||
{file = "pydantic-1.10.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b48d3d634bca23b172f47f2335c617d3fcb4b3ba18481c96b7943a4c634f5c8d"},
|
||||
{file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f0b7628fb8efe60fe66fd4adadd7ad2304014770cdc1f4934db41fe46cc8825f"},
|
||||
{file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1aa5c2410769ca28aa9a7841b80d9d9a1c5f223928ca8bec7e7c9a34d26b1d4"},
|
||||
{file = "pydantic-1.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:eec39224b2b2e861259d6f3c8b6290d4e0fbdce147adb797484a42278a1a486f"},
|
||||
{file = "pydantic-1.10.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d111a21bbbfd85c17248130deac02bbd9b5e20b303338e0dbe0faa78330e37e0"},
|
||||
{file = "pydantic-1.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e9aec8627a1a6823fc62fb96480abe3eb10168fd0d859ee3d3b395105ae19a7"},
|
||||
{file = "pydantic-1.10.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07293ab08e7b4d3c9d7de4949a0ea571f11e4557d19ea24dd3ae0c524c0c334d"},
|
||||
{file = "pydantic-1.10.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee829b86ce984261d99ff2fd6e88f2230068d96c2a582f29583ed602ef3fc2c"},
|
||||
{file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b466a23009ff5cdd7076eb56aca537c745ca491293cc38e72bf1e0e00de5b91"},
|
||||
{file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7847ca62e581e6088d9000f3c497267868ca2fa89432714e21a4fb33a04d52e8"},
|
||||
{file = "pydantic-1.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:7845b31959468bc5b78d7b95ec52fe5be32b55d0d09983a877cca6aedc51068f"},
|
||||
{file = "pydantic-1.10.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:517a681919bf880ce1dac7e5bc0c3af1e58ba118fd774da2ffcd93c5f96eaece"},
|
||||
{file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67195274fd27780f15c4c372f4ba9a5c02dad6d50647b917b6a92bf00b3d301a"},
|
||||
{file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2196c06484da2b3fded1ab6dbe182bdabeb09f6318b7fdc412609ee2b564c49a"},
|
||||
{file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6257bb45ad78abacda13f15bde5886efd6bf549dd71085e64b8dcf9919c38b60"},
|
||||
{file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3283b574b01e8dbc982080d8287c968489d25329a463b29a90d4157de4f2baaf"},
|
||||
{file = "pydantic-1.10.9-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8bbaf4013b9a50e8100333cc4e3fa2f81214033e05ac5aa44fa24a98670a29"},
|
||||
{file = "pydantic-1.10.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9cd67fb763248cbe38f0593cd8611bfe4b8ad82acb3bdf2b0898c23415a1f82"},
|
||||
{file = "pydantic-1.10.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f50e1764ce9353be67267e7fd0da08349397c7db17a562ad036aa7c8f4adfdb6"},
|
||||
{file = "pydantic-1.10.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73ef93e5e1d3c8e83f1ff2e7fdd026d9e063c7e089394869a6e2985696693766"},
|
||||
{file = "pydantic-1.10.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128d9453d92e6e81e881dd7e2484e08d8b164da5507f62d06ceecf84bf2e21d3"},
|
||||
{file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ad428e92ab68798d9326bb3e5515bc927444a3d71a93b4a2ca02a8a5d795c572"},
|
||||
{file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fab81a92f42d6d525dd47ced310b0c3e10c416bbfae5d59523e63ea22f82b31e"},
|
||||
{file = "pydantic-1.10.9-cp38-cp38-win_amd64.whl", hash = "sha256:963671eda0b6ba6926d8fc759e3e10335e1dc1b71ff2a43ed2efd6996634dafb"},
|
||||
{file = "pydantic-1.10.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:970b1bdc6243ef663ba5c7e36ac9ab1f2bfecb8ad297c9824b542d41a750b298"},
|
||||
{file = "pydantic-1.10.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7e1d5290044f620f80cf1c969c542a5468f3656de47b41aa78100c5baa2b8276"},
|
||||
{file = "pydantic-1.10.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83fcff3c7df7adff880622a98022626f4f6dbce6639a88a15a3ce0f96466cb60"},
|
||||
{file = "pydantic-1.10.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0da48717dc9495d3a8f215e0d012599db6b8092db02acac5e0d58a65248ec5bc"},
|
||||
{file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0a2aabdc73c2a5960e87c3ffebca6ccde88665616d1fd6d3db3178ef427b267a"},
|
||||
{file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9863b9420d99dfa9c064042304868e8ba08e89081428a1c471858aa2af6f57c4"},
|
||||
{file = "pydantic-1.10.9-cp39-cp39-win_amd64.whl", hash = "sha256:e7c9900b43ac14110efa977be3da28931ffc74c27e96ee89fbcaaf0b0fe338e1"},
|
||||
{file = "pydantic-1.10.9-py3-none-any.whl", hash = "sha256:6cafde02f6699ce4ff643417d1a9223716ec25e228ddc3b436fe7e2d25a1f305"},
|
||||
{file = "pydantic-1.10.9.tar.gz", hash = "sha256:95c70da2cd3b6ddf3b9645ecaa8d98f3d80c606624b6d245558d202cd23ea3be"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:adad1ee4ab9888f12dac2529276704e719efcf472e38df7813f5284db699b4ec"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a7db03339893feef2092ff7b1afc9497beed15ebd4af84c3042a74abce02d48"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67b3714b97ff84b2689654851c2426389bcabfac9080617bcf4306c69db606f6"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edfdf0a5abc5c9bf2052ebaec20e67abd52e92d257e4f2d30e02c354ed3e6030"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a3b30fd255eeeb63caa9483502ba96b7795ce5bf895c6a179b3d909d9f53a6"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:db4c7f7e60ca6f7d6c1785070f3e5771fcb9b2d88546e334d2f2c3934d949028"},
|
||||
{file = "pydantic-1.10.10-cp310-cp310-win_amd64.whl", hash = "sha256:a2d5be50ac4a0976817144c7d653e34df2f9436d15555189f5b6f61161d64183"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:566a04ba755e8f701b074ffb134ddb4d429f75d5dced3fbd829a527aafe74c71"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f79db3652ed743309f116ba863dae0c974a41b688242482638b892246b7db21d"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c62376890b819bebe3c717a9ac841a532988372b7e600e76f75c9f7c128219d5"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4870f13a4fafd5bc3e93cff3169222534fad867918b188e83ee0496452978437"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:990027e77cda6072a566e433b6962ca3b96b4f3ae8bd54748e9d62a58284d9d7"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8c40964596809eb616d94f9c7944511f620a1103d63d5510440ed2908fc410af"},
|
||||
{file = "pydantic-1.10.10-cp311-cp311-win_amd64.whl", hash = "sha256:ea9eebc2ebcba3717e77cdeee3f6203ffc0e78db5f7482c68b1293e8cc156e5e"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:762aa598f79b4cac2f275d13336b2dd8662febee2a9c450a49a2ab3bec4b385f"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dab5219659f95e357d98d70577b361383057fb4414cfdb587014a5f5c595f7b"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3d4ee957a727ccb5a36f1b0a6dbd9fad5dedd2a41eada99a8df55c12896e18d"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:b69f9138dec566962ec65623c9d57bee44412d2fc71065a5f3ebb3820bdeee96"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7aa75d1bd9cc275cf9782f50f60cddaf74cbaae19b6ada2a28e737edac420312"},
|
||||
{file = "pydantic-1.10.10-cp37-cp37m-win_amd64.whl", hash = "sha256:9f62a727f5c590c78c2d12fda302d1895141b767c6488fe623098f8792255fe5"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:aac218feb4af73db8417ca7518fb3bade4534fcca6e3fb00f84966811dd94450"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88546dc10a40b5b52cae87d64666787aeb2878f9a9b37825aedc2f362e7ae1da"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c41bbaae89e32fc582448e71974de738c055aef5ab474fb25692981a08df808a"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b71bd504d1573b0b722ae536e8ffb796bedeef978979d076bf206e77dcc55a5"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e088e3865a2270ecbc369924cd7d9fbc565667d9158e7f304e4097ebb9cf98dd"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3403a090db45d4027d2344859d86eb797484dfda0706cf87af79ace6a35274ef"},
|
||||
{file = "pydantic-1.10.10-cp38-cp38-win_amd64.whl", hash = "sha256:e0014e29637125f4997c174dd6167407162d7af0da73414a9340461ea8573252"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9965e49c6905840e526e5429b09e4c154355b6ecc0a2f05492eda2928190311d"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:748d10ab6089c5d196e1c8be9de48274f71457b01e59736f7a09c9dc34f51887"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86936c383f7c38fd26d35107eb669c85d8f46dfceae873264d9bab46fe1c7dde"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a26841be620309a9697f5b1ffc47dce74909e350c5315ccdac7a853484d468a"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:409b810f387610cc7405ab2fa6f62bdf7ea485311845a242ebc0bd0496e7e5ac"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ce937a2a2c020bcad1c9fde02892392a1123de6dda906ddba62bfe8f3e5989a2"},
|
||||
{file = "pydantic-1.10.10-cp39-cp39-win_amd64.whl", hash = "sha256:37ebddef68370e6f26243acc94de56d291e01227a67b2ace26ea3543cf53dd5f"},
|
||||
{file = "pydantic-1.10.10-py3-none-any.whl", hash = "sha256:a5939ec826f7faec434e2d406ff5e4eaf1716eb1f247d68cd3d0b3612f7b4c8a"},
|
||||
{file = "pydantic-1.10.10.tar.gz", hash = "sha256:3b8d5bd97886f9eb59260594207c9f57dce14a6f869c6ceea90188715d29921a"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -2060,7 +2058,6 @@ files = [
|
|||
[package.dependencies]
|
||||
cryptography = ">=3.1"
|
||||
defusedxml = "*"
|
||||
importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""}
|
||||
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
|
||||
pyopenssl = "*"
|
||||
python-dateutil = "*"
|
||||
|
@ -2245,28 +2242,28 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"]
|
|||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.0.275"
|
||||
version = "0.0.277"
|
||||
description = "An extremely fast Python linter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.0.275-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:5e6554a072e7ce81eb6f0bec1cebd3dcb0e358652c0f4900d7d630d61691e914"},
|
||||
{file = "ruff-0.0.275-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:1cc599022fe5ffb143a965b8d659eb64161ab8ab4433d208777eab018a1aab67"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5206fc1cd8c1c1deadd2e6360c0dbcd690f1c845da588ca9d32e4a764a402c60"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0c4e6468da26f77b90cae35319d310999f471a8c352998e9b39937a23750149e"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0dbdea02942131dbc15dd45f431d152224f15e1dd1859fcd0c0487b658f60f1a"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:22efd9f41af27ef8fb9779462c46c35c89134d33e326c889971e10b2eaf50c63"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c09662112cfa22d7467a19252a546291fd0eae4f423e52b75a7a2000a1894db"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80043726662144876a381efaab88841c88e8df8baa69559f96b22d4fa216bef1"},
|
||||
{file = "ruff-0.0.275-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5859ee543b01b7eb67835dfd505faa8bb7cc1550f0295c92c1401b45b42be399"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c8ace4d40a57b5ea3c16555f25a6b16bc5d8b2779ae1912ce2633543d4e9b1da"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8347fc16aa185aae275906c4ac5b770e00c896b6a0acd5ba521f158801911998"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_i686.whl", hash = "sha256:ec43658c64bfda44fd84bbea9da8c7a3b34f65448192d1c4dd63e9f4e7abfdd4"},
|
||||
{file = "ruff-0.0.275-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:508b13f7ca37274cceaba4fb3ea5da6ca192356323d92acf39462337c33ad14e"},
|
||||
{file = "ruff-0.0.275-py3-none-win32.whl", hash = "sha256:6afb1c4422f24f361e877937e2a44b3f8176774a476f5e33845ebfe887dd5ec2"},
|
||||
{file = "ruff-0.0.275-py3-none-win_amd64.whl", hash = "sha256:d9b264d78621bf7b698b6755d4913ab52c19bd28bee1a16001f954d64c1a1220"},
|
||||
{file = "ruff-0.0.275-py3-none-win_arm64.whl", hash = "sha256:a19ce3bea71023eee5f0f089dde4a4272d088d5ac0b675867e074983238ccc65"},
|
||||
{file = "ruff-0.0.275.tar.gz", hash = "sha256:a63a0b645da699ae5c758fce19188e901b3033ec54d862d93fcd042addf7f38d"},
|
||||
{file = "ruff-0.0.277-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:3250b24333ef419b7a232080d9724ccc4d2da1dbbe4ce85c4caa2290d83200f8"},
|
||||
{file = "ruff-0.0.277-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:3e60605e07482183ba1c1b7237eca827bd6cbd3535fe8a4ede28cbe2a323cb97"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7baa97c3d7186e5ed4d5d4f6834d759a27e56cf7d5874b98c507335f0ad5aadb"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:74e4b206cb24f2e98a615f87dbe0bde18105217cbcc8eb785bb05a644855ba50"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:479864a3ccd8a6a20a37a6e7577bdc2406868ee80b1e65605478ad3b8eb2ba0b"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:468bfb0a7567443cec3d03cf408d6f562b52f30c3c29df19927f1e0e13a40cd7"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f32ec416c24542ca2f9cc8c8b65b84560530d338aaf247a4a78e74b99cd476b4"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:14a7b2f00f149c5a295f188a643ac25226ff8a4d08f7a62b1d4b0a1dc9f9b85c"},
|
||||
{file = "ruff-0.0.277-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9879f59f763cc5628aa01c31ad256a0f4dc61a29355c7315b83c2a5aac932b5"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:f612e0a14b3d145d90eb6ead990064e22f6f27281d847237560b4e10bf2251f3"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:323b674c98078be9aaded5b8b51c0d9c424486566fb6ec18439b496ce79e5998"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3a43fbe026ca1a2a8c45aa0d600a0116bec4dfa6f8bf0c3b871ecda51ef2b5dd"},
|
||||
{file = "ruff-0.0.277-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:734165ea8feb81b0d53e3bf523adc2413fdb76f1264cde99555161dd5a725522"},
|
||||
{file = "ruff-0.0.277-py3-none-win32.whl", hash = "sha256:88d0f2afb2e0c26ac1120e7061ddda2a566196ec4007bd66d558f13b374b9efc"},
|
||||
{file = "ruff-0.0.277-py3-none-win_amd64.whl", hash = "sha256:6fe81732f788894a00f6ade1fe69e996cc9e485b7c35b0f53fb00284397284b2"},
|
||||
{file = "ruff-0.0.277-py3-none-win_arm64.whl", hash = "sha256:2d4444c60f2e705c14cd802b55cd2b561d25bf4311702c463a002392d3116b22"},
|
||||
{file = "ruff-0.0.277.tar.gz", hash = "sha256:2dab13cdedbf3af6d4427c07f47143746b6b95d9e4a254ac369a0edb9280a0d2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2301,13 +2298,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
|
|||
|
||||
[[package]]
|
||||
name = "sentry-sdk"
|
||||
version = "1.25.1"
|
||||
version = "1.26.0"
|
||||
description = "Python client for Sentry (https://sentry.io)"
|
||||
optional = true
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "sentry-sdk-1.25.1.tar.gz", hash = "sha256:aa796423eb6a2f4a8cd7a5b02ba6558cb10aab4ccdc0537f63a47b038c520c38"},
|
||||
{file = "sentry_sdk-1.25.1-py2.py3-none-any.whl", hash = "sha256:79afb7c896014038e358401ad1d36889f97a129dfa8031c49b3f238cd1aa3935"},
|
||||
{file = "sentry-sdk-1.26.0.tar.gz", hash = "sha256:760e4fb6d01c994110507133e08ecd4bdf4d75ee4be77f296a3579796cf73134"},
|
||||
{file = "sentry_sdk-1.26.0-py2.py3-none-any.whl", hash = "sha256:0c9f858337ec3781cf4851972ef42bba8c9828aea116b0dbed8f38c5f9a1896c"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -2410,9 +2407,7 @@ files = [
|
|||
|
||||
[package.dependencies]
|
||||
canonicaljson = ">=1.0.0"
|
||||
importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
|
||||
pynacl = ">=0.3.0"
|
||||
typing-extensions = {version = ">=3.5", markers = "python_version < \"3.8\""}
|
||||
unpaddedbase64 = ">=1.0.1"
|
||||
|
||||
[package.extras]
|
||||
|
@ -2852,39 +2847,6 @@ files = [
|
|||
six = "*"
|
||||
twisted = "*"
|
||||
|
||||
[[package]]
|
||||
name = "typed-ast"
|
||||
version = "1.5.4"
|
||||
description = "a fork of Python 2 and 3 ast modules with type comment support"
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"},
|
||||
{file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"},
|
||||
{file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"},
|
||||
{file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"},
|
||||
{file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"},
|
||||
{file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"},
|
||||
{file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "types-bleach"
|
||||
version = "6.0.0.3"
|
||||
|
@ -2964,13 +2926,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "types-pyopenssl"
|
||||
version = "23.2.0.0"
|
||||
version = "23.2.0.1"
|
||||
description = "Typing stubs for pyOpenSSL"
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "types-pyOpenSSL-23.2.0.0.tar.gz", hash = "sha256:43e307e8dfb3a7a8208a19874ca060305f460c529d4eaca8a2669ea89499f244"},
|
||||
{file = "types_pyOpenSSL-23.2.0.0-py3-none-any.whl", hash = "sha256:ba803a99440b0c2e9ab4e197084aeefc55bdfe8a580d367b2aa4210810a21240"},
|
||||
{file = "types-pyOpenSSL-23.2.0.1.tar.gz", hash = "sha256:beeb5d22704c625a1e4b6dc756355c5b4af0b980138b702a9d9f932acf020903"},
|
||||
{file = "types_pyOpenSSL-23.2.0.1-py3-none-any.whl", hash = "sha256:0568553f104466f1b8e0db3360fbe6770137d02e21a1a45c209bf2b1b03d90d4"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -3293,5 +3255,5 @@ user-search = ["pyicu"]
|
|||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.7.1"
|
||||
content-hash = "7f31754a1009d7b6c9a1bd7221a0b243ffd510f362c28f0da417aaac16757a87"
|
||||
python-versions = "^3.8.0"
|
||||
content-hash = "0a8c6605e7e1d0ac7188a5d02b47a029bfb0f917458b87cb40755911442383d8"
|
||||
|
|
|
@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.86.0"
|
||||
version = "1.88.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
@ -147,7 +147,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
|
|||
update_synapse_database = "synapse._scripts.update_synapse_database:main"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.7.1"
|
||||
python = "^3.8.0"
|
||||
|
||||
# Mandatory Dependencies
|
||||
# ----------------------
|
||||
|
@ -203,11 +203,9 @@ ijson = ">=3.1.4"
|
|||
matrix-common = "^1.3.0"
|
||||
# We need packaging.requirements.Requirement, added in 16.1.
|
||||
packaging = ">=16.1"
|
||||
# At the time of writing, we only use functions from the version `importlib.metadata`
|
||||
# which shipped in Python 3.8. This corresponds to version 1.4 of the backport.
|
||||
importlib_metadata = { version = ">=1.4", python = "<3.8" }
|
||||
# This is the most recent version of Pydantic with available on common distros.
|
||||
pydantic = ">=1.7.4"
|
||||
# We are currently incompatible with >=2.0.0: (https://github.com/matrix-org/synapse/issues/15858)
|
||||
pydantic = "^1.7.4"
|
||||
|
||||
# This is for building the rust components during "poetry install", which
|
||||
# currently ignores the `build-system.requires` directive (c.f.
|
||||
|
@ -311,7 +309,7 @@ all = [
|
|||
# We pin black so that our tests don't start failing on new releases.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.3.0"
|
||||
ruff = "0.0.275"
|
||||
ruff = "0.0.277"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
|
|
|
@ -23,7 +23,6 @@ from typing import Collection, Optional, Sequence, Set
|
|||
# These are expanded inside the dockerfile to be a fully qualified image name.
|
||||
# e.g. docker.io/library/debian:bullseye
|
||||
DISTS = (
|
||||
"debian:buster", # oldstable: EOL 2022-08
|
||||
"debian:bullseye",
|
||||
"debian:bookworm",
|
||||
"debian:sid",
|
||||
|
|
|
@ -25,8 +25,8 @@ from synapse.util.rust import check_rust_lib_up_to_date
|
|||
from synapse.util.stringutils import strtobool
|
||||
|
||||
# Check that we're not running on an unsupported Python version.
|
||||
if sys.version_info < (3, 7):
|
||||
print("Synapse requires Python 3.7 or above.")
|
||||
if sys.version_info < (3, 8):
|
||||
print("Synapse requires Python 3.8 or above.")
|
||||
sys.exit(1)
|
||||
|
||||
# Allow using the asyncio reactor via env var.
|
||||
|
|
|
@ -61,6 +61,7 @@ from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpda
|
|||
from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore
|
||||
from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore
|
||||
from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore
|
||||
from synapse.storage.databases.main.event_federation import EventFederationWorkerStore
|
||||
from synapse.storage.databases.main.event_push_actions import EventPushActionsStore
|
||||
from synapse.storage.databases.main.events_bg_updates import (
|
||||
EventsBackgroundUpdatesStore,
|
||||
|
@ -196,6 +197,11 @@ IGNORED_TABLES = {
|
|||
"ui_auth_sessions",
|
||||
"ui_auth_sessions_credentials",
|
||||
"ui_auth_sessions_ips",
|
||||
# Ignore the worker locks table, as a) there shouldn't be any acquired locks
|
||||
# after porting, and b) the circular foreign key constraints make it hard to
|
||||
# port.
|
||||
"worker_read_write_locks_mode",
|
||||
"worker_read_write_locks",
|
||||
}
|
||||
|
||||
|
||||
|
@ -239,6 +245,7 @@ class Store(
|
|||
PresenceBackgroundUpdateStore,
|
||||
ReceiptsBackgroundUpdateStore,
|
||||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
):
|
||||
def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]:
|
||||
return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs)
|
||||
|
@ -803,7 +810,9 @@ class Porter:
|
|||
)
|
||||
# Map from table name to args passed to `handle_table`, i.e. a tuple
|
||||
# of: `postgres_size`, `table_size`, `forward_chunk`, `backward_chunk`.
|
||||
tables_to_port_info_map = {r[0]: r[1:] for r in setup_res}
|
||||
tables_to_port_info_map = {
|
||||
r[0]: r[1:] for r in setup_res if r[0] not in IGNORED_TABLES
|
||||
}
|
||||
|
||||
# Step 5. Do the copying.
|
||||
#
|
||||
|
|
|
@ -41,11 +41,17 @@ Synapse version. Please use ``%s: name_of_worker`` instead.
|
|||
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA = """
|
||||
Missing data for a worker to connect to main process. Please include '%s' in the
|
||||
`instance_map` declared in your shared yaml configuration, or optionally(as a deprecated
|
||||
solution) in every worker's yaml as various `worker_replication_*` settings as defined
|
||||
in workers documentation here:
|
||||
`instance_map` declared in your shared yaml configuration as defined in configuration
|
||||
documentation here:
|
||||
`https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#instance_map`
|
||||
"""
|
||||
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE = """
|
||||
'%s' is no longer a supported worker setting, please place '%s' onto your shared
|
||||
configuration under `main` inside the `instance_map`. See workers documentation here:
|
||||
`https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration`
|
||||
"""
|
||||
|
||||
# This allows for a handy knob when it's time to change from 'master' to
|
||||
# something with less 'history'
|
||||
MAIN_PROCESS_INSTANCE_NAME = "master"
|
||||
|
@ -216,22 +222,37 @@ class WorkerConfig(Config):
|
|||
)
|
||||
|
||||
# A map from instance name to host/port of their HTTP replication endpoint.
|
||||
# Check if the main process is declared. Inject it into the map if it's not,
|
||||
# based first on if a 'main' block is declared then on 'worker_replication_*'
|
||||
# data. If both are available, default to instance_map. The main process
|
||||
# itself doesn't need this data as it would never have to talk to itself.
|
||||
# Check if the main process is declared. The main process itself doesn't need
|
||||
# this data as it would never have to talk to itself.
|
||||
instance_map: Dict[str, Any] = config.get("instance_map", {})
|
||||
|
||||
if self.instance_name is not MAIN_PROCESS_INSTANCE_NAME:
|
||||
# TODO: The next 3 condition blocks can be deleted after some time has
|
||||
# passed and we're ready to stop checking for these settings.
|
||||
# The host used to connect to the main synapse
|
||||
main_host = config.get("worker_replication_host", None)
|
||||
if main_host:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_host", main_host)
|
||||
)
|
||||
|
||||
# The port on the main synapse for HTTP replication endpoint
|
||||
main_port = config.get("worker_replication_http_port")
|
||||
if main_port:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_http_port", main_port)
|
||||
)
|
||||
|
||||
# The tls mode on the main synapse for HTTP replication endpoint.
|
||||
# For backward compatibility this defaults to False.
|
||||
main_tls = config.get("worker_replication_http_tls", False)
|
||||
if main_tls:
|
||||
raise ConfigError(
|
||||
WORKER_REPLICATION_SETTING_DEPRECATED_MESSAGE
|
||||
% ("worker_replication_http_tls", main_tls)
|
||||
)
|
||||
|
||||
# For now, accept 'main' in the instance_map, but the replication system
|
||||
# expects 'master', force that into being until it's changed later.
|
||||
|
@ -241,22 +262,9 @@ class WorkerConfig(Config):
|
|||
]
|
||||
del instance_map[MAIN_PROCESS_INSTANCE_MAP_NAME]
|
||||
|
||||
# This is the backwards compatibility bit that handles the
|
||||
# worker_replication_* bits using setdefault() to not overwrite anything.
|
||||
elif main_host is not None and main_port is not None:
|
||||
instance_map.setdefault(
|
||||
MAIN_PROCESS_INSTANCE_NAME,
|
||||
{
|
||||
"host": main_host,
|
||||
"port": main_port,
|
||||
"tls": main_tls,
|
||||
},
|
||||
)
|
||||
|
||||
else:
|
||||
# If we've gotten here, it means that the main process is not on the
|
||||
# instance_map and that not enough worker_replication_* variables
|
||||
# were declared in the worker's yaml.
|
||||
# instance_map.
|
||||
raise ConfigError(
|
||||
_MISSING_MAIN_PROCESS_INSTANCE_MAP_DATA
|
||||
% MAIN_PROCESS_INSTANCE_MAP_NAME
|
||||
|
|
|
@ -910,7 +910,7 @@ def set_cors_headers(request: SynapseRequest) -> None:
|
|||
)
|
||||
request.setHeader(
|
||||
b"Access-Control-Expose-Headers",
|
||||
b"Synapse-Trace-Id",
|
||||
b"Synapse-Trace-Id, Server",
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ from synapse.api.errors import (
|
|||
from synapse.config.repository import ThumbnailRequirement
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import defer_to_thread
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.media._base import (
|
||||
FileInfo,
|
||||
Responder,
|
||||
|
@ -174,6 +175,7 @@ class MediaRepository:
|
|||
else:
|
||||
self.recently_accessed_locals.add(media_id)
|
||||
|
||||
@trace
|
||||
async def create_content(
|
||||
self,
|
||||
media_type: str,
|
||||
|
@ -710,6 +712,7 @@ class MediaRepository:
|
|||
# Could not generate thumbnail.
|
||||
return None
|
||||
|
||||
@trace
|
||||
async def _generate_thumbnails(
|
||||
self,
|
||||
server_name: Optional[str],
|
||||
|
|
|
@ -38,6 +38,7 @@ from twisted.protocols.basic import FileSender
|
|||
|
||||
from synapse.api.errors import NotFoundError
|
||||
from synapse.logging.context import defer_to_thread, make_deferred_yieldable
|
||||
from synapse.logging.opentracing import start_active_span, trace, trace_with_opname
|
||||
from synapse.util import Clock
|
||||
from synapse.util.file_consumer import BackgroundFileConsumer
|
||||
|
||||
|
@ -76,6 +77,7 @@ class MediaStorage:
|
|||
self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker
|
||||
self.clock = hs.get_clock()
|
||||
|
||||
@trace_with_opname("MediaStorage.store_file")
|
||||
async def store_file(self, source: IO, file_info: FileInfo) -> str:
|
||||
"""Write `source` to the on disk media store, and also any other
|
||||
configured storage providers
|
||||
|
@ -89,16 +91,19 @@ class MediaStorage:
|
|||
"""
|
||||
|
||||
with self.store_into_file(file_info) as (f, fname, finish_cb):
|
||||
# Write to the main repository
|
||||
# Write to the main media repository
|
||||
await self.write_to_file(source, f)
|
||||
# Write to the other storage providers
|
||||
await finish_cb()
|
||||
|
||||
return fname
|
||||
|
||||
@trace_with_opname("MediaStorage.write_to_file")
|
||||
async def write_to_file(self, source: IO, output: IO) -> None:
|
||||
"""Asynchronously write the `source` to `output`."""
|
||||
await defer_to_thread(self.reactor, _write_file_synchronously, source, output)
|
||||
|
||||
@trace_with_opname("MediaStorage.store_into_file")
|
||||
@contextlib.contextmanager
|
||||
def store_into_file(
|
||||
self, file_info: FileInfo
|
||||
|
@ -113,9 +118,9 @@ class MediaStorage:
|
|||
fname can be used to read the contents from after upload, e.g. to
|
||||
generate thumbnails.
|
||||
|
||||
finish_cb must be called and waited on after the file has been
|
||||
successfully been written to. Should not be called if there was an
|
||||
error.
|
||||
finish_cb must be called and waited on after the file has been successfully been
|
||||
written to. Should not be called if there was an error. Checks for spam and
|
||||
stores the file into the configured storage providers.
|
||||
|
||||
Args:
|
||||
file_info: Info about the file to store
|
||||
|
@ -135,35 +140,48 @@ class MediaStorage:
|
|||
|
||||
finished_called = [False]
|
||||
|
||||
main_media_repo_write_trace_scope = start_active_span(
|
||||
"writing to main media repo"
|
||||
)
|
||||
main_media_repo_write_trace_scope.__enter__()
|
||||
|
||||
try:
|
||||
with open(fname, "wb") as f:
|
||||
|
||||
async def finish() -> None:
|
||||
# Ensure that all writes have been flushed and close the
|
||||
# file.
|
||||
f.flush()
|
||||
f.close()
|
||||
# When someone calls finish, we assume they are done writing to the main media repo
|
||||
main_media_repo_write_trace_scope.__exit__(None, None, None)
|
||||
|
||||
spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam(
|
||||
ReadableFileWrapper(self.clock, fname), file_info
|
||||
)
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.info("Blocking media due to spam checker")
|
||||
# Note that we'll delete the stored media, due to the
|
||||
# try/except below. The media also won't be stored in
|
||||
# the DB.
|
||||
# We currently ignore any additional field returned by
|
||||
# the spam-check API.
|
||||
raise SpamMediaException(errcode=spam_check[0])
|
||||
with start_active_span("writing to other storage providers"):
|
||||
# Ensure that all writes have been flushed and close the
|
||||
# file.
|
||||
f.flush()
|
||||
f.close()
|
||||
|
||||
for provider in self.storage_providers:
|
||||
await provider.store_file(path, file_info)
|
||||
spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam(
|
||||
ReadableFileWrapper(self.clock, fname), file_info
|
||||
)
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.info("Blocking media due to spam checker")
|
||||
# Note that we'll delete the stored media, due to the
|
||||
# try/except below. The media also won't be stored in
|
||||
# the DB.
|
||||
# We currently ignore any additional field returned by
|
||||
# the spam-check API.
|
||||
raise SpamMediaException(errcode=spam_check[0])
|
||||
|
||||
finished_called[0] = True
|
||||
for provider in self.storage_providers:
|
||||
with start_active_span(str(provider)):
|
||||
await provider.store_file(path, file_info)
|
||||
|
||||
finished_called[0] = True
|
||||
|
||||
yield f, fname, finish
|
||||
except Exception as e:
|
||||
try:
|
||||
main_media_repo_write_trace_scope.__exit__(
|
||||
type(e), None, e.__traceback__
|
||||
)
|
||||
os.remove(fname)
|
||||
except Exception:
|
||||
pass
|
||||
|
@ -171,7 +189,11 @@ class MediaStorage:
|
|||
raise e from None
|
||||
|
||||
if not finished_called:
|
||||
raise Exception("Finished callback not called")
|
||||
exc = Exception("Finished callback not called")
|
||||
main_media_repo_write_trace_scope.__exit__(
|
||||
type(exc), None, exc.__traceback__
|
||||
)
|
||||
raise exc
|
||||
|
||||
async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]:
|
||||
"""Attempts to fetch media described by file_info from the local cache
|
||||
|
@ -214,6 +236,7 @@ class MediaStorage:
|
|||
|
||||
return None
|
||||
|
||||
@trace
|
||||
async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str:
|
||||
"""Ensures that the given file is in the local cache. Attempts to
|
||||
download it from storage providers if it isn't.
|
||||
|
@ -259,6 +282,7 @@ class MediaStorage:
|
|||
|
||||
raise NotFoundError()
|
||||
|
||||
@trace
|
||||
def _file_info_to_path(self, file_info: FileInfo) -> str:
|
||||
"""Converts file_info into a relative path.
|
||||
|
||||
|
@ -301,6 +325,7 @@ class MediaStorage:
|
|||
return self.filepaths.local_media_filepath_rel(file_info.file_id)
|
||||
|
||||
|
||||
@trace
|
||||
def _write_file_synchronously(source: IO, dest: IO) -> None:
|
||||
"""Write `source` to the file like `dest` synchronously. Should be called
|
||||
from a thread.
|
||||
|
|
|
@ -20,6 +20,7 @@ from typing import TYPE_CHECKING, Callable, Optional
|
|||
|
||||
from synapse.config._base import Config
|
||||
from synapse.logging.context import defer_to_thread, run_in_background
|
||||
from synapse.logging.opentracing import start_active_span, trace_with_opname
|
||||
from synapse.util.async_helpers import maybe_awaitable
|
||||
|
||||
from ._base import FileInfo, Responder
|
||||
|
@ -86,6 +87,7 @@ class StorageProviderWrapper(StorageProvider):
|
|||
def __str__(self) -> str:
|
||||
return "StorageProviderWrapper[%s]" % (self.backend,)
|
||||
|
||||
@trace_with_opname("StorageProviderWrapper.store_file")
|
||||
async def store_file(self, path: str, file_info: FileInfo) -> None:
|
||||
if not file_info.server_name and not self.store_local:
|
||||
return None
|
||||
|
@ -114,6 +116,7 @@ class StorageProviderWrapper(StorageProvider):
|
|||
|
||||
run_in_background(store)
|
||||
|
||||
@trace_with_opname("StorageProviderWrapper.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
if file_info.url_cache:
|
||||
# Files in the URL preview cache definitely aren't stored here,
|
||||
|
@ -141,6 +144,7 @@ class FileStorageProviderBackend(StorageProvider):
|
|||
def __str__(self) -> str:
|
||||
return "FileStorageProviderBackend[%s]" % (self.base_directory,)
|
||||
|
||||
@trace_with_opname("FileStorageProviderBackend.store_file")
|
||||
async def store_file(self, path: str, file_info: FileInfo) -> None:
|
||||
"""See StorageProvider.store_file"""
|
||||
|
||||
|
@ -152,13 +156,15 @@ class FileStorageProviderBackend(StorageProvider):
|
|||
|
||||
# mypy needs help inferring the type of the second parameter, which is generic
|
||||
shutil_copyfile: Callable[[str, str], str] = shutil.copyfile
|
||||
await defer_to_thread(
|
||||
self.hs.get_reactor(),
|
||||
shutil_copyfile,
|
||||
primary_fname,
|
||||
backup_fname,
|
||||
)
|
||||
with start_active_span("shutil_copyfile"):
|
||||
await defer_to_thread(
|
||||
self.hs.get_reactor(),
|
||||
shutil_copyfile,
|
||||
primary_fname,
|
||||
backup_fname,
|
||||
)
|
||||
|
||||
@trace_with_opname("FileStorageProviderBackend.fetch")
|
||||
async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]:
|
||||
"""See StorageProvider.fetch"""
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ from typing import Optional, Tuple, Type
|
|||
|
||||
from PIL import Image
|
||||
|
||||
from synapse.logging.opentracing import trace
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
EXIF_ORIENTATION_TAG = 0x0112
|
||||
|
@ -82,6 +84,7 @@ class Thumbnailer:
|
|||
# A lot of parsing errors can happen when parsing EXIF
|
||||
logger.info("Error parsing image EXIF information: %s", e)
|
||||
|
||||
@trace
|
||||
def transpose(self) -> Tuple[int, int]:
|
||||
"""Transpose the image using its EXIF Orientation tag
|
||||
|
||||
|
@ -131,8 +134,9 @@ class Thumbnailer:
|
|||
else:
|
||||
with self.image:
|
||||
self.image = self.image.convert("RGB")
|
||||
return self.image.resize((width, height), Image.ANTIALIAS)
|
||||
return self.image.resize((width, height), Image.LANCZOS)
|
||||
|
||||
@trace
|
||||
def scale(self, width: int, height: int, output_type: str) -> BytesIO:
|
||||
"""Rescales the image to the given dimensions.
|
||||
|
||||
|
@ -142,6 +146,7 @@ class Thumbnailer:
|
|||
with self._resize(width, height) as scaled:
|
||||
return self._encode_image(scaled, output_type)
|
||||
|
||||
@trace
|
||||
def crop(self, width: int, height: int, output_type: str) -> BytesIO:
|
||||
"""Rescales and crops the image to the given dimensions preserving
|
||||
aspect::
|
||||
|
|
|
@ -788,6 +788,7 @@ class SpamCheckerModuleApiCallbacks:
|
|||
|
||||
return RegistrationBehaviour.ALLOW
|
||||
|
||||
@trace
|
||||
async def check_media_file_for_spam(
|
||||
self, file_wrapper: ReadableFileWrapper, file_info: FileInfo
|
||||
) -> Union[Tuple[Codes, dict], Literal["NOT_SPAM"]]:
|
||||
|
|
|
@ -28,6 +28,7 @@ from synapse.http.servlet import (
|
|||
parse_integer,
|
||||
parse_json_object_from_request,
|
||||
parse_string,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin._base import (
|
||||
|
@ -64,6 +65,9 @@ class UsersRestServletV2(RestServlet):
|
|||
The parameter `guests` can be used to exclude guest users.
|
||||
The parameter `deactivated` can be used to include deactivated users.
|
||||
The parameter `order_by` can be used to order the result.
|
||||
The parameter `not_user_type` can be used to exclude certain user types.
|
||||
Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
"""
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
|
@ -131,6 +135,10 @@ class UsersRestServletV2(RestServlet):
|
|||
|
||||
direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS)
|
||||
|
||||
# twisted.web.server.Request.args is incorrectly defined as Optional[Any]
|
||||
args: Dict[bytes, List[bytes]] = request.args # type: ignore
|
||||
not_user_types = parse_strings_from_args(args, "not_user_type")
|
||||
|
||||
users, total = await self.store.get_users_paginate(
|
||||
start,
|
||||
limit,
|
||||
|
@ -141,6 +149,7 @@ class UsersRestServletV2(RestServlet):
|
|||
order_by,
|
||||
direction,
|
||||
approved,
|
||||
not_user_types,
|
||||
)
|
||||
|
||||
# If support for MSC3866 is not enabled, don't show the approval flag.
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
</div>
|
||||
<h1>It works! Synapse is running</h1>
|
||||
<p>Your Synapse server is listening on this port and is ready for messages.</p>
|
||||
<p>To use this server you'll need <a href="https://matrix.org/docs/projects/try-matrix-now.html#clients" target="_blank" rel="noopener noreferrer">a Matrix client</a>.
|
||||
<p>To use this server you'll need <a href="https://matrix.org/ecosystem/clients/" target="_blank" rel="noopener noreferrer">a Matrix client</a>.
|
||||
</p>
|
||||
<p>Welcome to the Matrix universe :)</p>
|
||||
<hr>
|
||||
|
|
|
@ -11,8 +11,9 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import abc
|
||||
import logging
|
||||
from enum import IntEnum
|
||||
from enum import Enum, IntEnum
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
|
@ -24,12 +25,16 @@ from typing import (
|
|||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
)
|
||||
|
||||
import attr
|
||||
from pydantic import BaseModel
|
||||
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.types import Connection, Cursor
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock, json_encoder
|
||||
|
@ -48,6 +53,83 @@ DEFAULT_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
|
|||
MIN_BATCH_SIZE_CALLBACK = Callable[[str, str], Awaitable[int]]
|
||||
|
||||
|
||||
class Constraint(metaclass=abc.ABCMeta):
|
||||
"""Base class representing different constraints.
|
||||
|
||||
Used by `register_background_validate_constraint_and_delete_rows`.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def make_check_clause(self, table: str) -> str:
|
||||
"""Returns an SQL expression that checks the row passes the constraint."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def make_constraint_clause_postgres(self) -> str:
|
||||
"""Returns an SQL clause for creating the constraint.
|
||||
|
||||
Only used on Postgres DBs
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class ForeignKeyConstraint(Constraint):
|
||||
"""A foreign key constraint.
|
||||
|
||||
Attributes:
|
||||
referenced_table: The "parent" table name.
|
||||
columns: The list of mappings of columns from table to referenced table
|
||||
deferred: Whether to defer checking of the constraint to the end of the
|
||||
transaction. This is useful for e.g. backwards compatibility where
|
||||
an older version inserted data in the wrong order.
|
||||
"""
|
||||
|
||||
referenced_table: str
|
||||
columns: Sequence[Tuple[str, str]]
|
||||
deferred: bool
|
||||
|
||||
def make_check_clause(self, table: str) -> str:
|
||||
join_clause = " AND ".join(
|
||||
f"{col1} = {table}.{col2}" for col1, col2 in self.columns
|
||||
)
|
||||
return f"EXISTS (SELECT 1 FROM {self.referenced_table} WHERE {join_clause})"
|
||||
|
||||
def make_constraint_clause_postgres(self) -> str:
|
||||
column1_list = ", ".join(col1 for col1, col2 in self.columns)
|
||||
column2_list = ", ".join(col2 for col1, col2 in self.columns)
|
||||
defer_clause = " DEFERRABLE INITIALLY DEFERRED" if self.deferred else ""
|
||||
return f"FOREIGN KEY ({column1_list}) REFERENCES {self.referenced_table} ({column2_list}) {defer_clause}"
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True)
|
||||
class NotNullConstraint(Constraint):
|
||||
"""A NOT NULL column constraint"""
|
||||
|
||||
column: str
|
||||
|
||||
def make_check_clause(self, table: str) -> str:
|
||||
return f"{self.column} IS NOT NULL"
|
||||
|
||||
def make_constraint_clause_postgres(self) -> str:
|
||||
return f"CHECK ({self.column} IS NOT NULL)"
|
||||
|
||||
|
||||
class ValidateConstraintProgress(BaseModel):
|
||||
"""The format of the progress JSON for validate constraint background
|
||||
updates.
|
||||
|
||||
Used by `register_background_validate_constraint_and_delete_rows`.
|
||||
"""
|
||||
|
||||
class State(str, Enum):
|
||||
check = "check"
|
||||
validate = "validate"
|
||||
|
||||
state: State = State.validate
|
||||
lower_bound: Sequence[Any] = ()
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class _BackgroundUpdateHandler:
|
||||
"""A handler for a given background update.
|
||||
|
@ -740,6 +822,179 @@ class BackgroundUpdater:
|
|||
logger.info("Adding index %s to %s", index_name, table)
|
||||
await self.db_pool.runWithConnection(runner)
|
||||
|
||||
def register_background_validate_constraint_and_delete_rows(
|
||||
self,
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
unique_columns: Sequence[str],
|
||||
) -> None:
|
||||
"""Helper for store classes to do a background validate constraint, and
|
||||
delete rows that do not pass the constraint check.
|
||||
|
||||
Note: This deletes rows that don't match the constraint. This may not be
|
||||
appropriate in all situations, and so the suitability of using this
|
||||
method should be considered on a case-by-case basis.
|
||||
|
||||
This only applies on PostgreSQL.
|
||||
|
||||
For SQLite the table gets recreated as part of the schema delta and the
|
||||
data is copied over synchronously (or whatever the correct way to
|
||||
describe it as).
|
||||
|
||||
Args:
|
||||
update_name: The name of the background update.
|
||||
table: The table with the invalid constraint.
|
||||
constraint_name: The name of the constraint
|
||||
constraint: A `Constraint` object matching the type of constraint.
|
||||
unique_columns: A sequence of columns that form a unique constraint
|
||||
on the table. Used to iterate over the table.
|
||||
"""
|
||||
|
||||
assert isinstance(
|
||||
self.db_pool.engine, engines.PostgresEngine
|
||||
), "validate constraint background update registered for non-Postres database"
|
||||
|
||||
async def updater(progress: JsonDict, batch_size: int) -> int:
|
||||
return await self.validate_constraint_and_delete_in_background(
|
||||
update_name=update_name,
|
||||
table=table,
|
||||
constraint_name=constraint_name,
|
||||
constraint=constraint,
|
||||
unique_columns=unique_columns,
|
||||
progress=progress,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
self._background_update_handlers[update_name] = _BackgroundUpdateHandler(
|
||||
updater, oneshot=True
|
||||
)
|
||||
|
||||
async def validate_constraint_and_delete_in_background(
|
||||
self,
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
unique_columns: Sequence[str],
|
||||
progress: JsonDict,
|
||||
batch_size: int,
|
||||
) -> int:
|
||||
"""Validates a table constraint that has been marked as `NOT VALID`,
|
||||
deleting rows that don't pass the constraint check.
|
||||
|
||||
This will delete rows that do not meet the validation check.
|
||||
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
unique_columns: Sequence[str],
|
||||
"""
|
||||
|
||||
# We validate the constraint by:
|
||||
# 1. Trying to validate the constraint as is. If this succeeds then
|
||||
# we're done.
|
||||
# 2. Otherwise, we manually scan the table to remove rows that don't
|
||||
# match the constraint.
|
||||
# 3. We try re-validating the constraint.
|
||||
|
||||
parsed_progress = ValidateConstraintProgress.parse_obj(progress)
|
||||
|
||||
if parsed_progress.state == ValidateConstraintProgress.State.check:
|
||||
return_columns = ", ".join(unique_columns)
|
||||
order_columns = ", ".join(unique_columns)
|
||||
|
||||
where_clause = ""
|
||||
args: List[Any] = []
|
||||
if parsed_progress.lower_bound:
|
||||
where_clause = f"""WHERE ({order_columns}) > ({", ".join("?" for _ in unique_columns)})"""
|
||||
args.extend(parsed_progress.lower_bound)
|
||||
|
||||
args.append(batch_size)
|
||||
|
||||
sql = f"""
|
||||
SELECT
|
||||
{return_columns},
|
||||
{constraint.make_check_clause(table)} AS check
|
||||
FROM {table}
|
||||
{where_clause}
|
||||
ORDER BY {order_columns}
|
||||
LIMIT ?
|
||||
"""
|
||||
|
||||
def validate_constraint_in_background_check(
|
||||
txn: "LoggingTransaction",
|
||||
) -> None:
|
||||
txn.execute(sql, args)
|
||||
rows = txn.fetchall()
|
||||
|
||||
new_progress = parsed_progress.copy()
|
||||
|
||||
if not rows:
|
||||
new_progress.state = ValidateConstraintProgress.State.validate
|
||||
self._background_update_progress_txn(
|
||||
txn, update_name, new_progress.dict()
|
||||
)
|
||||
return
|
||||
|
||||
new_progress.lower_bound = rows[-1][:-1]
|
||||
|
||||
to_delete = [row[:-1] for row in rows if not row[-1]]
|
||||
|
||||
if to_delete:
|
||||
logger.warning(
|
||||
"Deleting %d rows that do not pass new constraint",
|
||||
len(to_delete),
|
||||
)
|
||||
|
||||
self.db_pool.simple_delete_many_batch_txn(
|
||||
txn, table=table, keys=unique_columns, values=to_delete
|
||||
)
|
||||
|
||||
self._background_update_progress_txn(
|
||||
txn, update_name, new_progress.dict()
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"validate_constraint_in_background_check",
|
||||
validate_constraint_in_background_check,
|
||||
)
|
||||
|
||||
return batch_size
|
||||
|
||||
elif parsed_progress.state == ValidateConstraintProgress.State.validate:
|
||||
sql = f"ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}"
|
||||
|
||||
def validate_constraint_in_background_validate(
|
||||
txn: "LoggingTransaction",
|
||||
) -> None:
|
||||
txn.execute(sql)
|
||||
|
||||
try:
|
||||
await self.db_pool.runInteraction(
|
||||
"validate_constraint_in_background_validate",
|
||||
validate_constraint_in_background_validate,
|
||||
)
|
||||
|
||||
await self._end_background_update(update_name)
|
||||
except self.db_pool.engine.module.IntegrityError as e:
|
||||
# If we get an integrity error here, then we go back and recheck the table.
|
||||
logger.warning("Integrity error when validating constraint: %s", e)
|
||||
await self._background_update_progress(
|
||||
update_name,
|
||||
ValidateConstraintProgress(
|
||||
state=ValidateConstraintProgress.State.check
|
||||
).dict(),
|
||||
)
|
||||
|
||||
return batch_size
|
||||
else:
|
||||
raise Exception(
|
||||
f"Unrecognized state '{parsed_progress.state}' when trying to validate_constraint_and_delete_in_background"
|
||||
)
|
||||
|
||||
async def _end_background_update(self, update_name: str) -> None:
|
||||
"""Removes a completed background update task from the queue.
|
||||
|
||||
|
@ -795,3 +1050,86 @@ class BackgroundUpdater:
|
|||
keyvalues={"update_name": update_name},
|
||||
updatevalues={"progress_json": progress_json},
|
||||
)
|
||||
|
||||
|
||||
def run_validate_constraint_and_delete_rows_schema_delta(
|
||||
txn: "LoggingTransaction",
|
||||
ordering: int,
|
||||
update_name: str,
|
||||
table: str,
|
||||
constraint_name: str,
|
||||
constraint: Constraint,
|
||||
sqlite_table_name: str,
|
||||
sqlite_table_schema: str,
|
||||
) -> None:
|
||||
"""Runs a schema delta to add a constraint to the table. This should be run
|
||||
in a schema delta file.
|
||||
|
||||
For PostgreSQL the constraint is added and validated in the background.
|
||||
|
||||
For SQLite the table is recreated and data copied across immediately. This
|
||||
is done by the caller passing in a script to create the new table. Note that
|
||||
table indexes and triggers are copied over automatically.
|
||||
|
||||
There must be a corresponding call to
|
||||
`register_background_validate_constraint_and_delete_rows` to register the
|
||||
background update in one of the data store classes.
|
||||
|
||||
Attributes:
|
||||
txn ordering, update_name: For adding a row to background_updates table.
|
||||
table: The table to add constraint to. constraint_name: The name of the
|
||||
new constraint constraint: A `Constraint` object describing the
|
||||
constraint sqlite_table_name: For SQLite the name of the empty copy of
|
||||
table sqlite_table_schema: A SQL script for creating the above table.
|
||||
"""
|
||||
|
||||
if isinstance(txn.database_engine, PostgresEngine):
|
||||
# For postgres we can just add the constraint and mark it as NOT VALID,
|
||||
# and then insert a background update to go and check the validity in
|
||||
# the background.
|
||||
txn.execute(
|
||||
f"""
|
||||
ALTER TABLE {table}
|
||||
ADD CONSTRAINT {constraint_name} {constraint.make_constraint_clause_postgres()}
|
||||
NOT VALID
|
||||
"""
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
"INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (?, ?, '{}')",
|
||||
(ordering, update_name),
|
||||
)
|
||||
else:
|
||||
# For SQLite, we:
|
||||
# 1. fetch all indexes/triggers/etc related to the table
|
||||
# 2. create an empty copy of the table
|
||||
# 3. copy across the rows (that satisfy the check)
|
||||
# 4. replace the old table with the new able.
|
||||
# 5. add back all the indexes/triggers/etc
|
||||
|
||||
# Fetch the indexes/triggers/etc. Note that `sql` column being null is
|
||||
# due to indexes being auto created based on the class definition (e.g.
|
||||
# PRIMARY KEY), and so don't need to be recreated.
|
||||
txn.execute(
|
||||
"""
|
||||
SELECT sql FROM sqlite_master
|
||||
WHERE tbl_name = ? AND type != 'table' AND sql IS NOT NULL
|
||||
""",
|
||||
(table,),
|
||||
)
|
||||
extras = [row[0] for row in txn]
|
||||
|
||||
txn.execute(sqlite_table_schema)
|
||||
|
||||
sql = f"""
|
||||
INSERT INTO {sqlite_table_name} SELECT * FROM {table}
|
||||
WHERE {constraint.make_check_clause(table)}
|
||||
"""
|
||||
|
||||
txn.execute(sql)
|
||||
|
||||
txn.execute(f"DROP TABLE {table}")
|
||||
txn.execute(f"ALTER TABLE {sqlite_table_name} RENAME TO {table}")
|
||||
|
||||
for extra in extras:
|
||||
txn.execute(extra)
|
||||
|
|
|
@ -98,6 +98,8 @@ UNIQUE_INDEX_BACKGROUND_UPDATES = {
|
|||
"event_push_summary": "event_push_summary_unique_index2",
|
||||
"receipts_linearized": "receipts_linearized_unique_index",
|
||||
"receipts_graph": "receipts_graph_unique_index",
|
||||
"profiles": "profiles_full_user_id_key_idx",
|
||||
"user_filters": "full_users_filters_unique_idx",
|
||||
}
|
||||
|
||||
|
||||
|
@ -2313,6 +2315,43 @@ class DatabasePool:
|
|||
|
||||
return txn.rowcount
|
||||
|
||||
@staticmethod
|
||||
def simple_delete_many_batch_txn(
|
||||
txn: LoggingTransaction,
|
||||
table: str,
|
||||
keys: Collection[str],
|
||||
values: Iterable[Iterable[Any]],
|
||||
) -> None:
|
||||
"""Executes a DELETE query on the named table.
|
||||
|
||||
The input is given as a list of rows, where each row is a list of values.
|
||||
(Actually any iterable is fine.)
|
||||
|
||||
Args:
|
||||
txn: The transaction to use.
|
||||
table: string giving the table name
|
||||
keys: list of column names
|
||||
values: for each row, a list of values in the same order as `keys`
|
||||
"""
|
||||
|
||||
if isinstance(txn.database_engine, PostgresEngine):
|
||||
# We use `execute_values` as it can be a lot faster than `execute_batch`,
|
||||
# but it's only available on postgres.
|
||||
sql = "DELETE FROM %s WHERE (%s) IN (VALUES ?)" % (
|
||||
table,
|
||||
", ".join(k for k in keys),
|
||||
)
|
||||
|
||||
txn.execute_values(sql, values, fetch=False)
|
||||
else:
|
||||
sql = "DELETE FROM %s WHERE (%s) = (%s)" % (
|
||||
table,
|
||||
", ".join(k for k in keys),
|
||||
", ".join("?" for _ in keys),
|
||||
)
|
||||
|
||||
txn.execute_batch(sql, values)
|
||||
|
||||
def get_cache_dict(
|
||||
self,
|
||||
db_conn: LoggingDatabaseConnection,
|
||||
|
|
|
@ -15,10 +15,11 @@
|
|||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, cast
|
||||
from typing import TYPE_CHECKING, List, Optional, Tuple, Union, cast
|
||||
|
||||
from synapse.api.constants import Direction
|
||||
from synapse.config.homeserver import HomeServerConfig
|
||||
from synapse.storage._base import make_in_list_sql_clause
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
|
@ -170,6 +171,7 @@ class DataStore(
|
|||
order_by: str = UserSortOrder.NAME.value,
|
||||
direction: Direction = Direction.FORWARDS,
|
||||
approved: bool = True,
|
||||
not_user_types: Optional[List[str]] = None,
|
||||
) -> Tuple[List[JsonDict], int]:
|
||||
"""Function to retrieve a paginated list of users from
|
||||
users list. This will return a json list of users and the
|
||||
|
@ -185,6 +187,7 @@ class DataStore(
|
|||
order_by: the sort order of the returned list
|
||||
direction: sort ascending or descending
|
||||
approved: whether to include approved users
|
||||
not_user_types: list of user types to exclude
|
||||
Returns:
|
||||
A tuple of a list of mappings from user to information and a count of total users.
|
||||
"""
|
||||
|
@ -193,7 +196,7 @@ class DataStore(
|
|||
txn: LoggingTransaction,
|
||||
) -> Tuple[List[JsonDict], int]:
|
||||
filters = []
|
||||
args = [self.hs.config.server.server_name]
|
||||
args: List[Union[str, int]] = []
|
||||
|
||||
# Set ordering
|
||||
order_by_column = UserSortOrder(order_by).value
|
||||
|
@ -222,11 +225,45 @@ class DataStore(
|
|||
# be already existing users that we consider as already approved.
|
||||
filters.append("approved IS FALSE")
|
||||
|
||||
if not_user_types:
|
||||
if len(not_user_types) == 1 and not_user_types[0] == "":
|
||||
# Only exclude NULL type users
|
||||
filters.append("user_type IS NOT NULL")
|
||||
else:
|
||||
not_user_types_has_empty = False
|
||||
not_user_types_without_empty = []
|
||||
|
||||
for not_user_type in not_user_types:
|
||||
if not_user_type == "":
|
||||
not_user_types_has_empty = True
|
||||
else:
|
||||
not_user_types_without_empty.append(not_user_type)
|
||||
|
||||
not_user_type_clause, not_user_type_args = make_in_list_sql_clause(
|
||||
self.database_engine,
|
||||
"u.user_type",
|
||||
not_user_types_without_empty,
|
||||
)
|
||||
|
||||
if not_user_types_has_empty:
|
||||
# NULL values should be excluded.
|
||||
# They evaluate to false > nothing to do here.
|
||||
filters.append("NOT %s" % (not_user_type_clause))
|
||||
else:
|
||||
# NULL values should *not* be excluded.
|
||||
# Add a special predicate to the query.
|
||||
filters.append(
|
||||
"(NOT %s OR %s IS NULL)"
|
||||
% (not_user_type_clause, "u.user_type")
|
||||
)
|
||||
|
||||
args.extend(not_user_type_args)
|
||||
|
||||
where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else ""
|
||||
|
||||
sql_base = f"""
|
||||
FROM users as u
|
||||
LEFT JOIN profiles AS p ON u.name = '@' || p.user_id || ':' || ?
|
||||
LEFT JOIN profiles AS p ON u.name = p.full_user_id
|
||||
LEFT JOIN erased_users AS eu ON u.name = eu.user_id
|
||||
{where_clause}
|
||||
"""
|
||||
|
|
|
@ -1950,12 +1950,16 @@ class DeviceStore(DeviceWorkerStore, DeviceBackgroundUpdateStore):
|
|||
|
||||
# Delete older entries in the table, as we really only care about
|
||||
# when the latest change happened.
|
||||
txn.execute_batch(
|
||||
"""
|
||||
cleanup_obsolete_stmt = """
|
||||
DELETE FROM device_lists_stream
|
||||
WHERE user_id = ? AND device_id = ? AND stream_id < ?
|
||||
""",
|
||||
[(user_id, device_id, min_stream_id) for device_id in device_ids],
|
||||
WHERE user_id = ? AND stream_id < ? AND %s
|
||||
"""
|
||||
device_ids_clause, device_ids_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "device_id", device_ids
|
||||
)
|
||||
txn.execute(
|
||||
cleanup_obsolete_stmt % (device_ids_clause,),
|
||||
[user_id, min_stream_id] + device_ids_args,
|
||||
)
|
||||
|
||||
self.db_pool.simple_insert_many_txn(
|
||||
|
|
|
@ -38,6 +38,7 @@ from synapse.events import EventBase, make_event_from_dict
|
|||
from synapse.logging.opentracing import tag_args, trace
|
||||
from synapse.metrics.background_process_metrics import wrap_as_background_process
|
||||
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
|
||||
from synapse.storage.background_updates import ForeignKeyConstraint
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
|
@ -140,6 +141,17 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
|||
|
||||
self._clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000)
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
self.db_pool.updates.register_background_validate_constraint_and_delete_rows(
|
||||
update_name="event_forward_extremities_event_id_foreign_key_constraint_update",
|
||||
table="event_forward_extremities",
|
||||
constraint_name="event_forward_extremities_event_id",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"events", [("event_id", "event_id")], deferred=True
|
||||
),
|
||||
unique_columns=("event_id", "room_id"),
|
||||
)
|
||||
|
||||
async def get_auth_chain(
|
||||
self, room_id: str, event_ids: Collection[str], include_given: bool = False
|
||||
) -> List[EventBase]:
|
||||
|
|
|
@ -415,12 +415,6 @@ class PersistEventsStore:
|
|||
backfilled=False,
|
||||
)
|
||||
|
||||
self._update_forward_extremities_txn(
|
||||
txn,
|
||||
new_forward_extremities=new_forward_extremities,
|
||||
max_stream_order=max_stream_order,
|
||||
)
|
||||
|
||||
# Ensure that we don't have the same event twice.
|
||||
events_and_contexts = self._filter_events_and_contexts_for_duplicates(
|
||||
events_and_contexts
|
||||
|
@ -439,6 +433,12 @@ class PersistEventsStore:
|
|||
|
||||
self._store_event_txn(txn, events_and_contexts=events_and_contexts)
|
||||
|
||||
self._update_forward_extremities_txn(
|
||||
txn,
|
||||
new_forward_extremities=new_forward_extremities,
|
||||
max_stream_order=max_stream_order,
|
||||
)
|
||||
|
||||
self._persist_transaction_ids_txn(txn, events_and_contexts)
|
||||
|
||||
# Insert into event_to_state_groups.
|
||||
|
|
|
@ -188,14 +188,13 @@ class FilteringWorkerStore(SQLBaseStore):
|
|||
filter_id = max_id + 1
|
||||
|
||||
sql = (
|
||||
"INSERT INTO user_filters (full_user_id, user_id, filter_id, filter_json)"
|
||||
"VALUES(?, ?, ?, ?)"
|
||||
"INSERT INTO user_filters (full_user_id, filter_id, filter_json)"
|
||||
"VALUES(?, ?, ?)"
|
||||
)
|
||||
txn.execute(
|
||||
sql,
|
||||
(
|
||||
user_id.to_string(),
|
||||
user_id.localpart,
|
||||
filter_id,
|
||||
bytearray(def_json),
|
||||
),
|
||||
|
|
|
@ -25,6 +25,7 @@ from synapse.storage.database import (
|
|||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
|
@ -68,12 +69,20 @@ class LockStore(SQLBaseStore):
|
|||
self._reactor = hs.get_reactor()
|
||||
self._instance_name = hs.get_instance_id()
|
||||
|
||||
# A map from `(lock_name, lock_key)` to the token of any locks that we
|
||||
# think we currently hold.
|
||||
self._live_tokens: WeakValueDictionary[
|
||||
# A map from `(lock_name, lock_key)` to lock that we think we
|
||||
# currently hold.
|
||||
self._live_lock_tokens: WeakValueDictionary[
|
||||
Tuple[str, str], Lock
|
||||
] = WeakValueDictionary()
|
||||
|
||||
# A map from `(lock_name, lock_key, token)` to read/write lock that we
|
||||
# think we currently hold. For a given lock_name/lock_key, there can be
|
||||
# multiple read locks at a time but only one write lock (no mixing read
|
||||
# and write locks at the same time).
|
||||
self._live_read_write_lock_tokens: WeakValueDictionary[
|
||||
Tuple[str, str, str], Lock
|
||||
] = WeakValueDictionary()
|
||||
|
||||
# When we shut down we want to remove the locks. Technically this can
|
||||
# lead to a race, as we may drop the lock while we are still processing.
|
||||
# However, a) it should be a small window, b) the lock is best effort
|
||||
|
@ -91,11 +100,13 @@ class LockStore(SQLBaseStore):
|
|||
"""Called when the server is shutting down"""
|
||||
logger.info("Dropping held locks due to shutdown")
|
||||
|
||||
# We need to take a copy of the tokens dict as dropping the locks will
|
||||
# cause the dictionary to change.
|
||||
locks = dict(self._live_tokens)
|
||||
# We need to take a copy of the locks as dropping the locks will cause
|
||||
# the dictionary to change.
|
||||
locks = list(self._live_lock_tokens.values()) + list(
|
||||
self._live_read_write_lock_tokens.values()
|
||||
)
|
||||
|
||||
for lock in locks.values():
|
||||
for lock in locks:
|
||||
await lock.release()
|
||||
|
||||
logger.info("Dropped locks due to shutdown")
|
||||
|
@ -122,7 +133,7 @@ class LockStore(SQLBaseStore):
|
|||
"""
|
||||
|
||||
# Check if this process has taken out a lock and if it's still valid.
|
||||
lock = self._live_tokens.get((lock_name, lock_key))
|
||||
lock = self._live_lock_tokens.get((lock_name, lock_key))
|
||||
if lock and await lock.is_still_valid():
|
||||
return None
|
||||
|
||||
|
@ -176,61 +187,111 @@ class LockStore(SQLBaseStore):
|
|||
self._reactor,
|
||||
self._clock,
|
||||
self,
|
||||
read_write=False,
|
||||
lock_name=lock_name,
|
||||
lock_key=lock_key,
|
||||
token=token,
|
||||
)
|
||||
|
||||
self._live_tokens[(lock_name, lock_key)] = lock
|
||||
self._live_lock_tokens[(lock_name, lock_key)] = lock
|
||||
|
||||
return lock
|
||||
|
||||
async def _is_lock_still_valid(
|
||||
self, lock_name: str, lock_key: str, token: str
|
||||
) -> bool:
|
||||
"""Checks whether this instance still holds the lock."""
|
||||
last_renewed_ts = await self.db_pool.simple_select_one_onecol(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
retcol="last_renewed_ts",
|
||||
allow_none=True,
|
||||
desc="is_lock_still_valid",
|
||||
)
|
||||
return (
|
||||
last_renewed_ts is not None
|
||||
and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
|
||||
async def try_acquire_read_write_lock(
|
||||
self,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
write: bool,
|
||||
) -> Optional["Lock"]:
|
||||
"""Try to acquire a lock for the given name/key. Will return an async
|
||||
context manager if the lock is successfully acquired, which *must* be
|
||||
used (otherwise the lock will leak).
|
||||
"""
|
||||
|
||||
now = self._clock.time_msec()
|
||||
token = random_string(6)
|
||||
|
||||
def _try_acquire_read_write_lock_txn(txn: LoggingTransaction) -> None:
|
||||
# We attempt to acquire the lock by inserting into
|
||||
# `worker_read_write_locks` and seeing if that fails any
|
||||
# constraints. If it doesn't then we have acquired the lock,
|
||||
# otherwise we haven't.
|
||||
#
|
||||
# Before that though we clear the table of any stale locks.
|
||||
|
||||
delete_sql = """
|
||||
DELETE FROM worker_read_write_locks
|
||||
WHERE last_renewed_ts < ? AND lock_name = ? AND lock_key = ?;
|
||||
"""
|
||||
|
||||
insert_sql = """
|
||||
INSERT INTO worker_read_write_locks (lock_name, lock_key, write_lock, instance_name, token, last_renewed_ts)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
"""
|
||||
|
||||
if isinstance(self.database_engine, PostgresEngine):
|
||||
# For Postgres we can send these queries at the same time.
|
||||
txn.execute(
|
||||
delete_sql + ";" + insert_sql,
|
||||
(
|
||||
# DELETE args
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
lock_name,
|
||||
lock_key,
|
||||
# UPSERT args
|
||||
lock_name,
|
||||
lock_key,
|
||||
write,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
),
|
||||
)
|
||||
else:
|
||||
# For SQLite these need to be two queries.
|
||||
txn.execute(
|
||||
delete_sql,
|
||||
(
|
||||
now - _LOCK_TIMEOUT_MS,
|
||||
lock_name,
|
||||
lock_key,
|
||||
),
|
||||
)
|
||||
txn.execute(
|
||||
insert_sql,
|
||||
(
|
||||
lock_name,
|
||||
lock_key,
|
||||
write,
|
||||
self._instance_name,
|
||||
token,
|
||||
now,
|
||||
),
|
||||
)
|
||||
|
||||
return
|
||||
|
||||
try:
|
||||
await self.db_pool.runInteraction(
|
||||
"try_acquire_read_write_lock",
|
||||
_try_acquire_read_write_lock_txn,
|
||||
)
|
||||
except self.database_engine.module.IntegrityError:
|
||||
return None
|
||||
|
||||
lock = Lock(
|
||||
self._reactor,
|
||||
self._clock,
|
||||
self,
|
||||
read_write=True,
|
||||
lock_name=lock_name,
|
||||
lock_key=lock_key,
|
||||
token=token,
|
||||
)
|
||||
|
||||
async def _renew_lock(self, lock_name: str, lock_key: str, token: str) -> None:
|
||||
"""Attempt to renew the lock if we still hold it."""
|
||||
await self.db_pool.simple_update(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
updatevalues={"last_renewed_ts": self._clock.time_msec()},
|
||||
desc="renew_lock",
|
||||
)
|
||||
self._live_read_write_lock_tokens[(lock_name, lock_key, token)] = lock
|
||||
|
||||
async def _drop_lock(self, lock_name: str, lock_key: str, token: str) -> None:
|
||||
"""Attempt to drop the lock, if we still hold it"""
|
||||
await self.db_pool.simple_delete(
|
||||
table="worker_locks",
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
desc="drop_lock",
|
||||
)
|
||||
|
||||
self._live_tokens.pop((lock_name, lock_key), None)
|
||||
return lock
|
||||
|
||||
|
||||
class Lock:
|
||||
|
@ -259,6 +320,7 @@ class Lock:
|
|||
reactor: IReactorCore,
|
||||
clock: Clock,
|
||||
store: LockStore,
|
||||
read_write: bool,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
token: str,
|
||||
|
@ -266,13 +328,23 @@ class Lock:
|
|||
self._reactor = reactor
|
||||
self._clock = clock
|
||||
self._store = store
|
||||
self._read_write = read_write
|
||||
self._lock_name = lock_name
|
||||
self._lock_key = lock_key
|
||||
|
||||
self._token = token
|
||||
|
||||
self._table = "worker_read_write_locks" if read_write else "worker_locks"
|
||||
|
||||
self._looping_call = clock.looping_call(
|
||||
self._renew, _RENEWAL_INTERVAL_MS, store, lock_name, lock_key, token
|
||||
self._renew,
|
||||
_RENEWAL_INTERVAL_MS,
|
||||
store,
|
||||
clock,
|
||||
read_write,
|
||||
lock_name,
|
||||
lock_key,
|
||||
token,
|
||||
)
|
||||
|
||||
self._dropped = False
|
||||
|
@ -281,6 +353,8 @@ class Lock:
|
|||
@wrap_as_background_process("Lock._renew")
|
||||
async def _renew(
|
||||
store: LockStore,
|
||||
clock: Clock,
|
||||
read_write: bool,
|
||||
lock_name: str,
|
||||
lock_key: str,
|
||||
token: str,
|
||||
|
@ -291,12 +365,34 @@ class Lock:
|
|||
don't end up with a reference to `self` in the reactor, which would stop
|
||||
this from being cleaned up if we dropped the context manager.
|
||||
"""
|
||||
await store._renew_lock(lock_name, lock_key, token)
|
||||
table = "worker_read_write_locks" if read_write else "worker_locks"
|
||||
await store.db_pool.simple_update(
|
||||
table=table,
|
||||
keyvalues={
|
||||
"lock_name": lock_name,
|
||||
"lock_key": lock_key,
|
||||
"token": token,
|
||||
},
|
||||
updatevalues={"last_renewed_ts": clock.time_msec()},
|
||||
desc="renew_lock",
|
||||
)
|
||||
|
||||
async def is_still_valid(self) -> bool:
|
||||
"""Check if the lock is still held by us"""
|
||||
return await self._store._is_lock_still_valid(
|
||||
self._lock_name, self._lock_key, self._token
|
||||
last_renewed_ts = await self._store.db_pool.simple_select_one_onecol(
|
||||
table=self._table,
|
||||
keyvalues={
|
||||
"lock_name": self._lock_name,
|
||||
"lock_key": self._lock_key,
|
||||
"token": self._token,
|
||||
},
|
||||
retcol="last_renewed_ts",
|
||||
allow_none=True,
|
||||
desc="is_lock_still_valid",
|
||||
)
|
||||
return (
|
||||
last_renewed_ts is not None
|
||||
and self._clock.time_msec() - _LOCK_TIMEOUT_MS < last_renewed_ts
|
||||
)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
|
@ -325,7 +421,23 @@ class Lock:
|
|||
if self._looping_call.running:
|
||||
self._looping_call.stop()
|
||||
|
||||
await self._store._drop_lock(self._lock_name, self._lock_key, self._token)
|
||||
await self._store.db_pool.simple_delete(
|
||||
table=self._table,
|
||||
keyvalues={
|
||||
"lock_name": self._lock_name,
|
||||
"lock_key": self._lock_key,
|
||||
"token": self._token,
|
||||
},
|
||||
desc="drop_lock",
|
||||
)
|
||||
|
||||
if self._read_write:
|
||||
self._store._live_read_write_lock_tokens.pop(
|
||||
(self._lock_name, self._lock_key, self._token), None
|
||||
)
|
||||
else:
|
||||
self._store._live_lock_tokens.pop((self._lock_name, self._lock_key), None)
|
||||
|
||||
self._dropped = True
|
||||
|
||||
def __del__(self) -> None:
|
||||
|
|
|
@ -27,6 +27,7 @@ from typing import (
|
|||
)
|
||||
|
||||
from synapse.api.constants import Direction
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
|
@ -328,6 +329,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
|||
"get_local_media_ids", _get_local_media_ids_txn
|
||||
)
|
||||
|
||||
@trace
|
||||
async def store_local_media(
|
||||
self,
|
||||
media_id: str,
|
||||
|
@ -447,6 +449,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
|||
desc="get_local_media_thumbnails",
|
||||
)
|
||||
|
||||
@trace
|
||||
async def store_local_thumbnail(
|
||||
self,
|
||||
media_id: str,
|
||||
|
@ -568,6 +571,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
|||
desc="get_remote_media_thumbnails",
|
||||
)
|
||||
|
||||
@trace
|
||||
async def get_remote_media_thumbnail(
|
||||
self,
|
||||
origin: str,
|
||||
|
@ -599,6 +603,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
|
|||
desc="get_remote_media_thumbnail",
|
||||
)
|
||||
|
||||
@trace
|
||||
async def store_remote_media_thumbnail(
|
||||
self,
|
||||
origin: str,
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, cast
|
||||
|
||||
from synapse.api.presence import PresenceState, UserPresenceState
|
||||
|
@ -24,6 +23,7 @@ from synapse.storage.database import (
|
|||
)
|
||||
from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.storage.engines._base import IsolationLevel
|
||||
from synapse.storage.types import Connection
|
||||
from synapse.storage.util.id_generators import (
|
||||
AbstractStreamIdGenerator,
|
||||
|
@ -115,11 +115,16 @@ class PresenceStore(PresenceBackgroundUpdateStore, CacheInvalidationWorkerStore)
|
|||
)
|
||||
|
||||
async with stream_ordering_manager as stream_orderings:
|
||||
# Run the interaction with an isolation level of READ_COMMITTED to avoid
|
||||
# serialization errors(and rollbacks) in the database. This way it will
|
||||
# ignore new rows during the DELETE, but will pick them up the next time
|
||||
# this is run. Currently, that is between 5-60 seconds.
|
||||
await self.db_pool.runInteraction(
|
||||
"update_presence",
|
||||
self._update_presence_txn,
|
||||
stream_orderings,
|
||||
presence_states,
|
||||
isolation_level=IsolationLevel.READ_COMMITTED,
|
||||
)
|
||||
|
||||
return stream_orderings[-1], self._presence_id_gen.get_current_token()
|
||||
|
|
|
@ -173,10 +173,9 @@ class ProfileWorkerStore(SQLBaseStore):
|
|||
)
|
||||
|
||||
async def create_profile(self, user_id: UserID) -> None:
|
||||
user_localpart = user_id.localpart
|
||||
await self.db_pool.simple_insert(
|
||||
table="profiles",
|
||||
values={"user_id": user_localpart, "full_user_id": user_id.to_string()},
|
||||
values={"full_user_id": user_id.to_string()},
|
||||
desc="create_profile",
|
||||
)
|
||||
|
||||
|
@ -191,13 +190,11 @@ class ProfileWorkerStore(SQLBaseStore):
|
|||
new_displayname: The new display name. If this is None, the user's display
|
||||
name is removed.
|
||||
"""
|
||||
user_localpart = user_id.localpart
|
||||
await self.db_pool.simple_upsert(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
keyvalues={"full_user_id": user_id.to_string()},
|
||||
values={
|
||||
"displayname": new_displayname,
|
||||
"full_user_id": user_id.to_string(),
|
||||
},
|
||||
desc="set_profile_displayname",
|
||||
)
|
||||
|
@ -213,11 +210,10 @@ class ProfileWorkerStore(SQLBaseStore):
|
|||
new_avatar_url: The new avatar URL. If this is None, the user's avatar is
|
||||
removed.
|
||||
"""
|
||||
user_localpart = user_id.localpart
|
||||
await self.db_pool.simple_upsert(
|
||||
table="profiles",
|
||||
keyvalues={"user_id": user_localpart},
|
||||
values={"avatar_url": new_avatar_url, "full_user_id": user_id.to_string()},
|
||||
keyvalues={"full_user_id": user_id.to_string()},
|
||||
values={"avatar_url": new_avatar_url},
|
||||
desc="set_profile_avatar_url",
|
||||
)
|
||||
|
||||
|
|
|
@ -45,6 +45,15 @@ class PostgresEngine(
|
|||
|
||||
psycopg2.extensions.register_adapter(bytes, _disable_bytes_adapter)
|
||||
self.synchronous_commit: bool = database_config.get("synchronous_commit", True)
|
||||
# Set the statement timeout to 1 hour by default.
|
||||
# Any query taking more than 1 hour should probably be considered a bug;
|
||||
# most of the time this is a sign that work needs to be split up or that
|
||||
# some degenerate query plan has been created and the client has probably
|
||||
# timed out/walked off anyway.
|
||||
# This is in milliseconds.
|
||||
self.statement_timeout: Optional[int] = database_config.get(
|
||||
"statement_timeout", 60 * 60 * 1000
|
||||
)
|
||||
self._version: Optional[int] = None # unknown as yet
|
||||
|
||||
self.isolation_level_map: Mapping[int, int] = {
|
||||
|
@ -157,6 +166,10 @@ class PostgresEngine(
|
|||
if not self.synchronous_commit:
|
||||
cursor.execute("SET synchronous_commit TO OFF")
|
||||
|
||||
# Abort really long-running statements and turn them into errors.
|
||||
if self.statement_timeout is not None:
|
||||
cursor.execute("SET statement_timeout TO ?", (self.statement_timeout,))
|
||||
|
||||
cursor.close()
|
||||
db_conn.commit()
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
SCHEMA_VERSION = 78 # remember to update the list below when updating
|
||||
SCHEMA_VERSION = 79 # remember to update the list below when updating
|
||||
"""Represents the expectations made by the codebase about the database schema
|
||||
|
||||
This should be incremented whenever the codebase changes its requirements on the
|
||||
|
@ -106,6 +106,9 @@ Changes in SCHEMA_VERSION = 77
|
|||
|
||||
Changes in SCHEMA_VERSION = 78
|
||||
- Validate check (full_user_id IS NOT NULL) on tables profiles and user_filters
|
||||
|
||||
Changes in SCHEMA_VERSION = 79
|
||||
- We no longer write to column user_id of tables profiles and user_filters
|
||||
"""
|
||||
|
||||
|
||||
|
@ -118,7 +121,9 @@ SCHEMA_COMPAT_VERSION = (
|
|||
#
|
||||
# insertions to the column `full_user_id` of tables profiles and user_filters can no
|
||||
# longer be null
|
||||
76
|
||||
#
|
||||
# we no longer write to column `full_user_id` of tables profiles and user_filters
|
||||
78
|
||||
)
|
||||
"""Limit on how far the synapse codebase can be rolled back without breaking db compat
|
||||
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
# Copyright 2023 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""
|
||||
This migration adds foreign key constraint to `event_forward_extremities` table.
|
||||
"""
|
||||
from synapse.storage.background_updates import (
|
||||
ForeignKeyConstraint,
|
||||
run_validate_constraint_and_delete_rows_schema_delta,
|
||||
)
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import BaseDatabaseEngine
|
||||
|
||||
FORWARD_EXTREMITIES_TABLE_SCHEMA = """
|
||||
CREATE TABLE event_forward_extremities2(
|
||||
event_id TEXT NOT NULL,
|
||||
room_id TEXT NOT NULL,
|
||||
UNIQUE (event_id, room_id),
|
||||
CONSTRAINT event_forward_extremities_event_id FOREIGN KEY (event_id) REFERENCES events (event_id) DEFERRABLE INITIALLY DEFERRED
|
||||
)
|
||||
"""
|
||||
|
||||
|
||||
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
|
||||
# We mark this as a deferred constraint, as the previous version of Synapse
|
||||
# inserted the event into the forward extremities *before* the events table.
|
||||
# By marking as deferred we ensure that downgrading to the previous version
|
||||
# will continue to work.
|
||||
run_validate_constraint_and_delete_rows_schema_delta(
|
||||
cur,
|
||||
ordering=7803,
|
||||
update_name="event_forward_extremities_event_id_foreign_key_constraint_update",
|
||||
table="event_forward_extremities",
|
||||
constraint_name="event_forward_extremities_event_id",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"events", [("event_id", "event_id")], deferred=True
|
||||
),
|
||||
sqlite_table_name="event_forward_extremities2",
|
||||
sqlite_table_schema=FORWARD_EXTREMITIES_TABLE_SCHEMA,
|
||||
)
|
||||
|
||||
# We can't add a similar constraint to `event_backward_extremities` as the
|
||||
# events in there don't exist in the `events` table and `event_edges`
|
||||
# doesn't have a unique constraint on `prev_event_id` (so we can't make a
|
||||
# foreign key point to it).
|
|
@ -0,0 +1,152 @@
|
|||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
-- We implement read/write style locks by using two tables with mutual foreign
|
||||
-- key constraints. Note that this implementation is vulnerable to starving
|
||||
-- writers if read locks repeatedly get acquired.
|
||||
--
|
||||
-- The first table (`worker_read_write_locks_mode`) indicates that a given lock
|
||||
-- has either been acquired in read mode *or* write mode, but not both. This is
|
||||
-- enforced by the unique constraint. Each instance of a lock being acquired is
|
||||
-- associated with a random `token`.
|
||||
--
|
||||
-- The second table (`worker_read_write_locks`) tracks who has currently
|
||||
-- acquired a given lock. For a given lock_name/lock_key, there can be multiple
|
||||
-- read locks at a time but only one write lock (no mixing read and write locks
|
||||
-- at the same time).
|
||||
--
|
||||
-- The foreign key from the second to first table enforces that for any given
|
||||
-- lock the second table cannot have a mix of rows with read or write.
|
||||
--
|
||||
-- The foreign key from the first to second table enforces that we don't have a
|
||||
-- row for a lock in the first table if not in the second table.
|
||||
--
|
||||
--
|
||||
-- Furthermore, we add some triggers to automatically keep the first table up to
|
||||
-- date when inserting/deleting from the second table. This reduces the number
|
||||
-- of round trips needed to acquire and release locks, as those operations
|
||||
-- simply become an INSERT or DELETE. These triggers are added in a separate
|
||||
-- delta due to database specific syntax.
|
||||
|
||||
|
||||
-- A table to track whether a lock is currently acquired, and if so whether its
|
||||
-- in read or write mode.
|
||||
CREATE TABLE worker_read_write_locks_mode (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- Whether this lock is in read (false) or write (true) mode
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A token that has currently acquired the lock. We need this so that we can
|
||||
-- add a foreign constraint from this table to `worker_read_write_locks`.
|
||||
token TEXT NOT NULL
|
||||
);
|
||||
|
||||
-- Ensure that we can only have one row per lock
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key);
|
||||
-- We need this (redundant) constraint so that we can have a foreign key
|
||||
-- constraint against this table.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock);
|
||||
|
||||
|
||||
-- A table to track who has currently acquired a given lock.
|
||||
CREATE TABLE worker_read_write_locks (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- We write the instance name to ease manual debugging, we don't ever read
|
||||
-- from it.
|
||||
-- Note: instance names aren't guarenteed to be unique.
|
||||
instance_name TEXT NOT NULL,
|
||||
-- Whether the process has taken out a "read" or a "write" lock.
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A random string generated each time an instance takes out a lock. Used by
|
||||
-- the instance to tell whether the lock is still held by it (e.g. in the
|
||||
-- case where the process stalls for a long time the lock may time out and
|
||||
-- be taken out by another instance, at which point the original instance
|
||||
-- can tell it no longer holds the lock as the tokens no longer match).
|
||||
token TEXT NOT NULL,
|
||||
last_renewed_ts BIGINT NOT NULL,
|
||||
|
||||
-- This constraint ensures that a given lock has only been acquired in read
|
||||
-- xor write mode, but not both.
|
||||
FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token);
|
||||
-- Ensures that only one instance can acquire a lock in write mode at a time.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock;
|
||||
|
||||
|
||||
-- Add a foreign key constraint to ensure that if a lock is in
|
||||
-- `worker_read_write_locks_mode` then there must be a corresponding row in
|
||||
-- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in
|
||||
-- `worker_read_write_locks_mode` when the lock is not currently acquired).
|
||||
--
|
||||
-- We only add to PostgreSQL as SQLite does not support adding constraints
|
||||
-- after table creation, and so doesn't support "circular" foreign key
|
||||
-- constraints.
|
||||
ALTER TABLE worker_read_write_locks_mode ADD CONSTRAINT worker_read_write_locks_mode_foreign
|
||||
FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED;
|
||||
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
CREATE OR REPLACE FUNCTION upsert_read_write_lock_parent() RETURNS trigger AS $$
|
||||
BEGIN
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO NOTHING;
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER upsert_read_write_lock_parent_trigger BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE upsert_read_write_lock_parent();
|
||||
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
CREATE OR REPLACE FUNCTION delete_read_write_lock_parent() RETURNS trigger AS $$
|
||||
DECLARE
|
||||
new_token TEXT;
|
||||
BEGIN
|
||||
SELECT token INTO new_token FROM worker_read_write_locks
|
||||
WHERE
|
||||
lock_name = OLD.lock_name
|
||||
AND lock_key = OLD.lock_key;
|
||||
|
||||
IF NOT FOUND THEN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
ELSE
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = new_token
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END
|
||||
$$
|
||||
LANGUAGE plpgsql;
|
||||
|
||||
CREATE TRIGGER delete_read_write_lock_parent_trigger AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
EXECUTE PROCEDURE delete_read_write_lock_parent();
|
|
@ -0,0 +1,119 @@
|
|||
/* Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
-- c.f. the postgres version for context. The tables and constraints are the
|
||||
-- same, however they need to be defined slightly differently to work around how
|
||||
-- each database handles circular foreign key references.
|
||||
|
||||
|
||||
|
||||
-- A table to track whether a lock is currently acquired, and if so whether its
|
||||
-- in read or write mode.
|
||||
CREATE TABLE worker_read_write_locks_mode (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- Whether this lock is in read (false) or write (true) mode
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A token that has currently acquired the lock. We need this so that we can
|
||||
-- add a foreign constraint from this table to `worker_read_write_locks`.
|
||||
token TEXT NOT NULL,
|
||||
-- Add a foreign key constraint to ensure that if a lock is in
|
||||
-- `worker_read_write_locks_mode` then there must be a corresponding row in
|
||||
-- `worker_read_write_locks` (i.e. we don't accidentally end up with a row in
|
||||
-- `worker_read_write_locks_mode` when the lock is not currently acquired).
|
||||
FOREIGN KEY (lock_name, lock_key, token) REFERENCES worker_read_write_locks(lock_name, lock_key, token) DEFERRABLE INITIALLY DEFERRED
|
||||
);
|
||||
|
||||
-- Ensure that we can only have one row per lock
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_key ON worker_read_write_locks_mode (lock_name, lock_key);
|
||||
-- We need this (redundant) constraint so that we can have a foreign key
|
||||
-- constraint against this table.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_mode_type ON worker_read_write_locks_mode (lock_name, lock_key, write_lock);
|
||||
|
||||
|
||||
-- A table to track who has currently acquired a given lock.
|
||||
CREATE TABLE worker_read_write_locks (
|
||||
lock_name TEXT NOT NULL,
|
||||
lock_key TEXT NOT NULL,
|
||||
-- We write the instance name to ease manual debugging, we don't ever read
|
||||
-- from it.
|
||||
-- Note: instance names aren't guarenteed to be unique.
|
||||
instance_name TEXT NOT NULL,
|
||||
-- Whether the process has taken out a "read" or a "write" lock.
|
||||
write_lock BOOLEAN NOT NULL,
|
||||
-- A random string generated each time an instance takes out a lock. Used by
|
||||
-- the instance to tell whether the lock is still held by it (e.g. in the
|
||||
-- case where the process stalls for a long time the lock may time out and
|
||||
-- be taken out by another instance, at which point the original instance
|
||||
-- can tell it no longer holds the lock as the tokens no longer match).
|
||||
token TEXT NOT NULL,
|
||||
last_renewed_ts BIGINT NOT NULL,
|
||||
|
||||
-- This constraint ensures that a given lock has only been acquired in read
|
||||
-- xor write mode, but not both.
|
||||
FOREIGN KEY (lock_name, lock_key, write_lock) REFERENCES worker_read_write_locks_mode (lock_name, lock_key, write_lock)
|
||||
);
|
||||
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_key ON worker_read_write_locks (lock_name, lock_key, token);
|
||||
-- Ensures that only one instance can acquire a lock in write mode at a time.
|
||||
CREATE UNIQUE INDEX worker_read_write_locks_write ON worker_read_write_locks (lock_name, lock_key) WHERE write_lock;
|
||||
|
||||
|
||||
-- Add a trigger to UPSERT into `worker_read_write_locks_mode` whenever we try
|
||||
-- and acquire a lock, i.e. insert into `worker_read_write_locks`,
|
||||
CREATE TRIGGER IF NOT EXISTS upsert_read_write_lock_parent_trigger
|
||||
BEFORE INSERT ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
-- First ensure that `worker_read_write_locks_mode` doesn't have stale
|
||||
-- entries in it, as on SQLite we don't have the foreign key constraint to
|
||||
-- enforce this.
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = NEW.lock_name AND lock_key = NEW.lock_key
|
||||
);
|
||||
|
||||
INSERT INTO worker_read_write_locks_mode (lock_name, lock_key, write_lock, token)
|
||||
VALUES (NEW.lock_name, NEW.lock_key, NEW.write_lock, NEW.token)
|
||||
ON CONFLICT (lock_name, lock_key)
|
||||
DO NOTHING;
|
||||
END;
|
||||
|
||||
-- Ensure that we keep `worker_read_write_locks_mode` up to date whenever a lock
|
||||
-- is released (i.e. a row deleted from `worker_read_write_locks`). Either we
|
||||
-- update the `worker_read_write_locks_mode.token` to match another instance
|
||||
-- that has currently acquired the lock, or we delete the row if nobody has
|
||||
-- currently acquired a lock.
|
||||
CREATE TRIGGER IF NOT EXISTS delete_read_write_lock_parent_trigger
|
||||
AFTER DELETE ON worker_read_write_locks
|
||||
FOR EACH ROW
|
||||
BEGIN
|
||||
DELETE FROM worker_read_write_locks_mode
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
AND NOT EXISTS (
|
||||
SELECT 1 FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
);
|
||||
|
||||
UPDATE worker_read_write_locks_mode
|
||||
SET token = (
|
||||
SELECT token FROM worker_read_write_locks
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key
|
||||
)
|
||||
WHERE lock_name = OLD.lock_name AND lock_key = OLD.lock_key;
|
||||
END;
|
|
@ -0,0 +1,50 @@
|
|||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
|
||||
|
||||
|
||||
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
|
||||
"""
|
||||
Update to drop the NOT NULL constraint on column user_id so that we can cease to
|
||||
write to it without inserts to other columns triggering the constraint
|
||||
"""
|
||||
|
||||
if isinstance(database_engine, PostgresEngine):
|
||||
drop_sql = """
|
||||
ALTER TABLE profiles ALTER COLUMN user_id DROP NOT NULL
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
else:
|
||||
# irritatingly in SQLite we need to rewrite the table to drop the constraint.
|
||||
cur.execute("DROP TABLE IF EXISTS temp_profiles")
|
||||
|
||||
create_sql = """
|
||||
CREATE TABLE temp_profiles (
|
||||
full_user_id text NOT NULL,
|
||||
user_id text,
|
||||
displayname text,
|
||||
avatar_url text,
|
||||
UNIQUE (full_user_id),
|
||||
UNIQUE (user_id)
|
||||
)
|
||||
"""
|
||||
cur.execute(create_sql)
|
||||
|
||||
copy_sql = """
|
||||
INSERT INTO temp_profiles (
|
||||
user_id,
|
||||
displayname,
|
||||
avatar_url,
|
||||
full_user_id)
|
||||
SELECT user_id, displayname, avatar_url, full_user_id FROM profiles
|
||||
"""
|
||||
cur.execute(copy_sql)
|
||||
|
||||
drop_sql = """
|
||||
DROP TABLE profiles
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
|
||||
rename_sql = """
|
||||
ALTER TABLE temp_profiles RENAME to profiles
|
||||
"""
|
||||
cur.execute(rename_sql)
|
|
@ -0,0 +1,54 @@
|
|||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine
|
||||
|
||||
|
||||
def run_create(cur: LoggingTransaction, database_engine: BaseDatabaseEngine) -> None:
|
||||
"""
|
||||
Update to drop the NOT NULL constraint on column user_id so that we can cease to
|
||||
write to it without inserts to other columns triggering the constraint
|
||||
"""
|
||||
if isinstance(database_engine, PostgresEngine):
|
||||
drop_sql = """
|
||||
ALTER TABLE user_filters ALTER COLUMN user_id DROP NOT NULL
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
|
||||
else:
|
||||
# irritatingly in SQLite we need to rewrite the table to drop the constraint.
|
||||
cur.execute("DROP TABLE IF EXISTS temp_user_filters")
|
||||
|
||||
create_sql = """
|
||||
CREATE TABLE temp_user_filters (
|
||||
full_user_id text NOT NULL,
|
||||
user_id text,
|
||||
filter_id bigint NOT NULL,
|
||||
filter_json bytea NOT NULL
|
||||
)
|
||||
"""
|
||||
cur.execute(create_sql)
|
||||
|
||||
index_sql = """
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS user_filters_full_user_id_unique ON
|
||||
temp_user_filters (full_user_id, filter_id)
|
||||
"""
|
||||
cur.execute(index_sql)
|
||||
|
||||
copy_sql = """
|
||||
INSERT INTO temp_user_filters (
|
||||
user_id,
|
||||
filter_id,
|
||||
filter_json,
|
||||
full_user_id)
|
||||
SELECT user_id, filter_id, filter_json, full_user_id FROM user_filters
|
||||
"""
|
||||
cur.execute(copy_sql)
|
||||
|
||||
drop_sql = """
|
||||
DROP TABLE user_filters
|
||||
"""
|
||||
cur.execute(drop_sql)
|
||||
|
||||
rename_sql = """
|
||||
ALTER TABLE temp_user_filters RENAME to user_filters
|
||||
"""
|
||||
cur.execute(rename_sql)
|
|
@ -21,16 +21,13 @@ require. But this is probably just symptomatic of Python's package management.
|
|||
"""
|
||||
|
||||
import logging
|
||||
from importlib import metadata
|
||||
from typing import Iterable, NamedTuple, Optional
|
||||
|
||||
from packaging.requirements import Requirement
|
||||
|
||||
DISTRIBUTION_NAME = "matrix-synapse"
|
||||
|
||||
try:
|
||||
from importlib import metadata
|
||||
except ImportError:
|
||||
import importlib_metadata as metadata # type: ignore[no-redef]
|
||||
|
||||
__all__ = ["check_requirements"]
|
||||
|
||||
|
|
|
@ -25,9 +25,9 @@ class HomeserverAppStartTestCase(ConfigFileTestCase):
|
|||
# Add a blank line as otherwise the next addition ends up on a line with a comment
|
||||
self.add_lines_to_config([" "])
|
||||
self.add_lines_to_config(["worker_app: test_worker_app"])
|
||||
self.add_lines_to_config(["worker_replication_host: 127.0.0.1"])
|
||||
self.add_lines_to_config(["worker_replication_http_port: 0"])
|
||||
|
||||
self.add_lines_to_config(["worker_log_config: /data/logconfig.config"])
|
||||
self.add_lines_to_config(["instance_map:"])
|
||||
self.add_lines_to_config([" main:", " host: 127.0.0.1", " port: 1234"])
|
||||
# Ensure that starting master process with worker config raises an exception
|
||||
with self.assertRaises(ConfigError):
|
||||
synapse.app.homeserver.setup(["-c", self.config_file])
|
||||
|
|
|
@ -17,7 +17,7 @@ from unittest.mock import Mock
|
|||
from immutabledict import immutabledict
|
||||
|
||||
from synapse.config import ConfigError
|
||||
from synapse.config.workers import InstanceLocationConfig, WorkerConfig
|
||||
from synapse.config.workers import WorkerConfig
|
||||
|
||||
from tests.unittest import TestCase
|
||||
|
||||
|
@ -323,28 +323,3 @@ class WorkerDutyConfigTestCase(TestCase):
|
|||
)
|
||||
self.assertTrue(worker2_config.should_notify_appservices)
|
||||
self.assertFalse(worker2_config.should_update_user_directory)
|
||||
|
||||
def test_worker_instance_map_compat(self) -> None:
|
||||
"""
|
||||
Test that `worker_replication_*` settings are compatibly handled by
|
||||
adding them to the instance map as a `main` entry.
|
||||
"""
|
||||
|
||||
worker1_config = self._make_worker_config(
|
||||
worker_app="synapse.app.generic_worker",
|
||||
worker_name="worker1",
|
||||
extras={
|
||||
"notify_appservices_from_worker": "worker2",
|
||||
"update_user_directory_from_worker": "worker1",
|
||||
"worker_replication_host": "127.0.0.42",
|
||||
"worker_replication_http_port": 1979,
|
||||
},
|
||||
)
|
||||
self.assertEqual(
|
||||
worker1_config.instance_map,
|
||||
{
|
||||
"master": InstanceLocationConfig(
|
||||
host="127.0.0.42", port=1979, tls=False
|
||||
),
|
||||
},
|
||||
)
|
||||
|
|
|
@ -12,19 +12,13 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from importlib import metadata
|
||||
from typing import Dict, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
try:
|
||||
from importlib import metadata
|
||||
except ImportError:
|
||||
import importlib_metadata as metadata # type: ignore[no-redef]
|
||||
|
||||
from unittest.mock import patch
|
||||
|
||||
from pkg_resources import parse_version
|
||||
from prometheus_client.core import Sample
|
||||
from typing_extensions import Protocol
|
||||
|
||||
from synapse.app._base import _set_prometheus_client_use_created_metrics
|
||||
from synapse.metrics import REGISTRY, InFlightGauge, generate_latest
|
||||
|
|
|
@ -933,6 +933,84 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(1, len(non_admin_user_ids), non_admin_user_ids)
|
||||
self.assertEqual(not_approved_user, non_admin_user_ids[0])
|
||||
|
||||
def test_filter_not_user_types(self) -> None:
|
||||
"""Tests that the endpoint handles the not_user_types param"""
|
||||
|
||||
regular_user_id = self.register_user("normalo", "secret")
|
||||
|
||||
bot_user_id = self.register_user("robo", "secret")
|
||||
self.make_request(
|
||||
"PUT",
|
||||
"/_synapse/admin/v2/users/" + urllib.parse.quote(bot_user_id),
|
||||
{"user_type": UserTypes.BOT},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
support_user_id = self.register_user("foo", "secret")
|
||||
self.make_request(
|
||||
"PUT",
|
||||
"/_synapse/admin/v2/users/" + urllib.parse.quote(support_user_id),
|
||||
{"user_type": UserTypes.SUPPORT},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
def test_user_type(
|
||||
expected_user_ids: List[str], not_user_types: Optional[List[str]] = None
|
||||
) -> None:
|
||||
"""Runs a test for the not_user_types param
|
||||
Args:
|
||||
expected_user_ids: Ids of the users that are expected to be returned
|
||||
not_user_types: List of values for the not_user_types param
|
||||
"""
|
||||
|
||||
user_type_query = ""
|
||||
|
||||
if not_user_types is not None:
|
||||
user_type_query = "&".join(
|
||||
[f"not_user_type={u}" for u in not_user_types]
|
||||
)
|
||||
|
||||
test_url = f"{self.url}?{user_type_query}"
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
test_url,
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.assertEqual(200, channel.code)
|
||||
self.assertEqual(channel.json_body["total"], len(expected_user_ids))
|
||||
self.assertEqual(
|
||||
expected_user_ids,
|
||||
[u["name"] for u in channel.json_body["users"]],
|
||||
)
|
||||
|
||||
# Request without user_types → all users expected
|
||||
test_user_type([self.admin_user, support_user_id, regular_user_id, bot_user_id])
|
||||
|
||||
# Request and exclude bot users
|
||||
test_user_type(
|
||||
[self.admin_user, support_user_id, regular_user_id],
|
||||
not_user_types=[UserTypes.BOT],
|
||||
)
|
||||
|
||||
# Request and exclude bot and support users
|
||||
test_user_type(
|
||||
[self.admin_user, regular_user_id],
|
||||
not_user_types=[UserTypes.BOT, UserTypes.SUPPORT],
|
||||
)
|
||||
|
||||
# Request and exclude empty user types → only expected the bot and support user
|
||||
test_user_type([support_user_id, bot_user_id], not_user_types=[""])
|
||||
|
||||
# Request and exclude empty user types and bots → only expected the support user
|
||||
test_user_type([support_user_id], not_user_types=["", UserTypes.BOT])
|
||||
|
||||
# Request and exclude a custom type (neither service nor bot) → expect all users
|
||||
test_user_type(
|
||||
[self.admin_user, support_user_id, regular_user_id, bot_user_id],
|
||||
not_user_types=["custom"],
|
||||
)
|
||||
|
||||
def test_erasure_status(self) -> None:
|
||||
# Create a new user.
|
||||
user_id = self.register_user("eraseme", "eraseme")
|
||||
|
|
|
@ -166,4 +166,285 @@ class LockTestCase(unittest.HomeserverTestCase):
|
|||
# Now call the shutdown code
|
||||
self.get_success(self.store._on_shutdown())
|
||||
|
||||
self.assertEqual(self.store._live_tokens, {})
|
||||
self.assertEqual(self.store._live_lock_tokens, {})
|
||||
|
||||
|
||||
class ReadWriteLockTestCase(unittest.HomeserverTestCase):
|
||||
"""Test the read/write lock implementation."""
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def test_acquire_write_contention(self) -> None:
|
||||
"""Test that we can only acquire one write lock at a time"""
|
||||
# Track the number of tasks holding the lock.
|
||||
# Should be at most 1.
|
||||
in_lock = 0
|
||||
max_in_lock = 0
|
||||
|
||||
release_lock: "Deferred[None]" = Deferred()
|
||||
|
||||
async def task() -> None:
|
||||
nonlocal in_lock
|
||||
nonlocal max_in_lock
|
||||
|
||||
lock = await self.store.try_acquire_read_write_lock(
|
||||
"name", "key", write=True
|
||||
)
|
||||
if not lock:
|
||||
return
|
||||
|
||||
async with lock:
|
||||
in_lock += 1
|
||||
max_in_lock = max(max_in_lock, in_lock)
|
||||
|
||||
# Block to allow other tasks to attempt to take the lock.
|
||||
await release_lock
|
||||
|
||||
in_lock -= 1
|
||||
|
||||
# Start 3 tasks.
|
||||
task1 = defer.ensureDeferred(task())
|
||||
task2 = defer.ensureDeferred(task())
|
||||
task3 = defer.ensureDeferred(task())
|
||||
|
||||
# Give the reactor a kick so that the database transaction returns.
|
||||
self.pump()
|
||||
|
||||
release_lock.callback(None)
|
||||
|
||||
# Run the tasks to completion.
|
||||
# To work around `Linearizer`s using a different reactor to sleep when
|
||||
# contended (#12841), we call `runUntilCurrent` on
|
||||
# `twisted.internet.reactor`, which is a different reactor to that used
|
||||
# by the homeserver.
|
||||
assert isinstance(reactor, ReactorBase)
|
||||
self.get_success(task1)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task2)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task3)
|
||||
|
||||
# At most one task should have held the lock at a time.
|
||||
self.assertEqual(max_in_lock, 1)
|
||||
|
||||
def test_acquire_multiple_reads(self) -> None:
|
||||
"""Test that we can acquire multiple read locks at a time"""
|
||||
# Track the number of tasks holding the lock.
|
||||
in_lock = 0
|
||||
max_in_lock = 0
|
||||
|
||||
release_lock: "Deferred[None]" = Deferred()
|
||||
|
||||
async def task() -> None:
|
||||
nonlocal in_lock
|
||||
nonlocal max_in_lock
|
||||
|
||||
lock = await self.store.try_acquire_read_write_lock(
|
||||
"name", "key", write=False
|
||||
)
|
||||
if not lock:
|
||||
return
|
||||
|
||||
async with lock:
|
||||
in_lock += 1
|
||||
max_in_lock = max(max_in_lock, in_lock)
|
||||
|
||||
# Block to allow other tasks to attempt to take the lock.
|
||||
await release_lock
|
||||
|
||||
in_lock -= 1
|
||||
|
||||
# Start 3 tasks.
|
||||
task1 = defer.ensureDeferred(task())
|
||||
task2 = defer.ensureDeferred(task())
|
||||
task3 = defer.ensureDeferred(task())
|
||||
|
||||
# Give the reactor a kick so that the database transaction returns.
|
||||
self.pump()
|
||||
|
||||
release_lock.callback(None)
|
||||
|
||||
# Run the tasks to completion.
|
||||
# To work around `Linearizer`s using a different reactor to sleep when
|
||||
# contended (#12841), we call `runUntilCurrent` on
|
||||
# `twisted.internet.reactor`, which is a different reactor to that used
|
||||
# by the homeserver.
|
||||
assert isinstance(reactor, ReactorBase)
|
||||
self.get_success(task1)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task2)
|
||||
reactor.runUntilCurrent()
|
||||
self.get_success(task3)
|
||||
|
||||
# At most one task should have held the lock at a time.
|
||||
self.assertEqual(max_in_lock, 3)
|
||||
|
||||
def test_write_lock_acquired(self) -> None:
|
||||
"""Test that we can take out a write lock and that while we hold it
|
||||
nobody else can take it out.
|
||||
"""
|
||||
# First to acquire this lock, so it should complete
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
# Enter the context manager
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# Attempting to acquire the lock again fails, as both read and write.
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock2)
|
||||
|
||||
lock3 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=False)
|
||||
)
|
||||
self.assertIsNone(lock3)
|
||||
|
||||
# Calling `is_still_valid` reports true.
|
||||
self.assertTrue(self.get_success(lock.is_still_valid()))
|
||||
|
||||
# Drop the lock
|
||||
self.get_success(lock.__aexit__(None, None, None))
|
||||
|
||||
# We can now acquire the lock again.
|
||||
lock4 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock4 is not None
|
||||
self.get_success(lock4.__aenter__())
|
||||
self.get_success(lock4.__aexit__(None, None, None))
|
||||
|
||||
def test_read_lock_acquired(self) -> None:
|
||||
"""Test that we can take out a read lock and that while we hold it
|
||||
only other reads can use it.
|
||||
"""
|
||||
# First to acquire this lock, so it should complete
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=False)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
# Enter the context manager
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# Attempting to acquire the write lock fails
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock2)
|
||||
|
||||
# Attempting to acquire a read lock succeeds
|
||||
lock3 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=False)
|
||||
)
|
||||
assert lock3 is not None
|
||||
self.get_success(lock3.__aenter__())
|
||||
|
||||
# Calling `is_still_valid` reports true.
|
||||
self.assertTrue(self.get_success(lock.is_still_valid()))
|
||||
|
||||
# Drop the first lock
|
||||
self.get_success(lock.__aexit__(None, None, None))
|
||||
|
||||
# Attempting to acquire the write lock still fails, as lock3 is still
|
||||
# active.
|
||||
lock4 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock4)
|
||||
|
||||
# Drop the still open third lock
|
||||
self.get_success(lock3.__aexit__(None, None, None))
|
||||
|
||||
# We can now acquire the lock again.
|
||||
lock5 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock5 is not None
|
||||
self.get_success(lock5.__aenter__())
|
||||
self.get_success(lock5.__aexit__(None, None, None))
|
||||
|
||||
def test_maintain_lock(self) -> None:
|
||||
"""Test that we don't time out locks while they're still active (lock is
|
||||
renewed in the background if the process is still alive)"""
|
||||
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# Wait for ages with the lock, we should not be able to get the lock.
|
||||
self.reactor.advance(5 * _LOCK_TIMEOUT_MS / 1000)
|
||||
self.pump()
|
||||
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNone(lock2)
|
||||
|
||||
self.get_success(lock.__aexit__(None, None, None))
|
||||
|
||||
def test_timeout_lock(self) -> None:
|
||||
"""Test that we time out locks if they're not updated for ages"""
|
||||
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
assert lock is not None
|
||||
|
||||
self.get_success(lock.__aenter__())
|
||||
|
||||
# We simulate the process getting stuck by cancelling the looping call
|
||||
# that keeps the lock active.
|
||||
lock._looping_call.stop()
|
||||
|
||||
# Wait for the lock to timeout.
|
||||
self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000)
|
||||
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock2)
|
||||
|
||||
self.assertFalse(self.get_success(lock.is_still_valid()))
|
||||
|
||||
def test_drop(self) -> None:
|
||||
"""Test that dropping the context manager means we stop renewing the lock"""
|
||||
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock)
|
||||
|
||||
del lock
|
||||
|
||||
# Wait for the lock to timeout.
|
||||
self.reactor.advance(2 * _LOCK_TIMEOUT_MS / 1000)
|
||||
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock2)
|
||||
|
||||
def test_shutdown(self) -> None:
|
||||
"""Test that shutting down Synapse releases the locks"""
|
||||
# Acquire two locks
|
||||
lock = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock)
|
||||
lock2 = self.get_success(
|
||||
self.store.try_acquire_read_write_lock("name", "key2", write=True)
|
||||
)
|
||||
self.assertIsNotNone(lock2)
|
||||
|
||||
# Now call the shutdown code
|
||||
self.get_success(self.store._on_shutdown())
|
||||
|
||||
self.assertEqual(self.store._live_read_write_lock_tokens, {})
|
||||
|
|
|
@ -20,7 +20,14 @@ from twisted.internet.defer import Deferred, ensureDeferred
|
|||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.background_updates import BackgroundUpdater
|
||||
from synapse.storage.background_updates import (
|
||||
BackgroundUpdater,
|
||||
ForeignKeyConstraint,
|
||||
NotNullConstraint,
|
||||
run_validate_constraint_and_delete_rows_schema_delta,
|
||||
)
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock
|
||||
|
||||
|
@ -404,3 +411,225 @@ class BackgroundUpdateControllerTestCase(unittest.HomeserverTestCase):
|
|||
self.pump()
|
||||
self._update_ctx_manager.__aexit__.assert_called()
|
||||
self.get_success(do_update_d)
|
||||
|
||||
|
||||
class BackgroundUpdateValidateConstraintTestCase(unittest.HomeserverTestCase):
|
||||
"""Tests the validate contraint and delete background handlers."""
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.updates: BackgroundUpdater = self.hs.get_datastores().main.db_pool.updates
|
||||
# the base test class should have run the real bg updates for us
|
||||
self.assertTrue(
|
||||
self.get_success(self.updates.has_completed_background_updates())
|
||||
)
|
||||
|
||||
self.store = self.hs.get_datastores().main
|
||||
|
||||
def test_not_null_constraint(self) -> None:
|
||||
# Create the initial tables, where we have some invalid data.
|
||||
"""Tests adding a not null constraint."""
|
||||
table_sql = """
|
||||
CREATE TABLE test_constraint(
|
||||
a INT PRIMARY KEY,
|
||||
b INT
|
||||
);
|
||||
"""
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_not_null_constraint", lambda _: None, table_sql
|
||||
)
|
||||
)
|
||||
|
||||
# We add an index so that we can check that its correctly recreated when
|
||||
# using SQLite.
|
||||
index_sql = "CREATE INDEX test_index ON test_constraint(a)"
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_not_null_constraint", lambda _: None, index_sql
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1})
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None})
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3})
|
||||
)
|
||||
|
||||
# Now lets do the migration
|
||||
|
||||
table2_sqlite = """
|
||||
CREATE TABLE test_constraint2(
|
||||
a INT PRIMARY KEY,
|
||||
b INT,
|
||||
CONSTRAINT test_constraint_name CHECK (b is NOT NULL)
|
||||
);
|
||||
"""
|
||||
|
||||
def delta(txn: LoggingTransaction) -> None:
|
||||
run_validate_constraint_and_delete_rows_schema_delta(
|
||||
txn,
|
||||
ordering=1000,
|
||||
update_name="test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=NotNullConstraint("b"),
|
||||
sqlite_table_name="test_constraint2",
|
||||
sqlite_table_schema=table2_sqlite,
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction(
|
||||
"test_not_null_constraint",
|
||||
delta,
|
||||
)
|
||||
)
|
||||
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
# Postgres uses a background update
|
||||
self.updates.register_background_validate_constraint_and_delete_rows(
|
||||
"test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=NotNullConstraint("b"),
|
||||
unique_columns=["a"],
|
||||
)
|
||||
|
||||
# Tell the DataStore that it hasn't finished all updates yet
|
||||
self.store.db_pool.updates._all_done = False
|
||||
|
||||
# Now let's actually drive the updates to completion
|
||||
self.wait_for_background_updates()
|
||||
|
||||
# Check the correct values are in the new table.
|
||||
rows = self.get_success(
|
||||
self.store.db_pool.simple_select_list(
|
||||
table="test_constraint",
|
||||
keyvalues={},
|
||||
retcols=("a", "b"),
|
||||
)
|
||||
)
|
||||
|
||||
self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}])
|
||||
|
||||
# And check that invalid rows get correctly rejected.
|
||||
self.get_failure(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": None}),
|
||||
exc=self.store.database_engine.module.IntegrityError,
|
||||
)
|
||||
|
||||
# Check the index is still there for SQLite.
|
||||
if isinstance(self.store.database_engine, Sqlite3Engine):
|
||||
# Ensure the index exists in the schema.
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_select_one_onecol(
|
||||
table="sqlite_master",
|
||||
keyvalues={"tbl_name": "test_constraint"},
|
||||
retcol="name",
|
||||
)
|
||||
)
|
||||
|
||||
def test_foreign_constraint(self) -> None:
|
||||
"""Tests adding a not foreign key constraint."""
|
||||
|
||||
# Create the initial tables, where we have some invalid data.
|
||||
base_sql = """
|
||||
CREATE TABLE base_table(
|
||||
b INT PRIMARY KEY
|
||||
);
|
||||
"""
|
||||
|
||||
table_sql = """
|
||||
CREATE TABLE test_constraint(
|
||||
a INT PRIMARY KEY,
|
||||
b INT NOT NULL
|
||||
);
|
||||
"""
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_foreign_key_constraint", lambda _: None, base_sql
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"test_foreign_key_constraint", lambda _: None, table_sql
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 1}))
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 1, "b": 1})
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2})
|
||||
)
|
||||
self.get_success(self.store.db_pool.simple_insert("base_table", {"b": 3}))
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 3, "b": 3})
|
||||
)
|
||||
|
||||
table2_sqlite = """
|
||||
CREATE TABLE test_constraint2(
|
||||
a INT PRIMARY KEY,
|
||||
b INT NOT NULL,
|
||||
CONSTRAINT test_constraint_name FOREIGN KEY (b) REFERENCES base_table (b)
|
||||
);
|
||||
"""
|
||||
|
||||
def delta(txn: LoggingTransaction) -> None:
|
||||
run_validate_constraint_and_delete_rows_schema_delta(
|
||||
txn,
|
||||
ordering=1000,
|
||||
update_name="test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"base_table", [("b", "b")], deferred=False
|
||||
),
|
||||
sqlite_table_name="test_constraint2",
|
||||
sqlite_table_schema=table2_sqlite,
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction(
|
||||
"test_foreign_key_constraint",
|
||||
delta,
|
||||
)
|
||||
)
|
||||
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
# Postgres uses a background update
|
||||
self.updates.register_background_validate_constraint_and_delete_rows(
|
||||
"test_bg_update",
|
||||
table="test_constraint",
|
||||
constraint_name="test_constraint_name",
|
||||
constraint=ForeignKeyConstraint(
|
||||
"base_table", [("b", "b")], deferred=False
|
||||
),
|
||||
unique_columns=["a"],
|
||||
)
|
||||
|
||||
# Tell the DataStore that it hasn't finished all updates yet
|
||||
self.store.db_pool.updates._all_done = False
|
||||
|
||||
# Now let's actually drive the updates to completion
|
||||
self.wait_for_background_updates()
|
||||
|
||||
# Check the correct values are in the new table.
|
||||
rows = self.get_success(
|
||||
self.store.db_pool.simple_select_list(
|
||||
table="test_constraint",
|
||||
keyvalues={},
|
||||
retcols=("a", "b"),
|
||||
)
|
||||
)
|
||||
self.assertCountEqual(rows, [{"a": 1, "b": 1}, {"a": 3, "b": 3}])
|
||||
|
||||
# And check that invalid rows get correctly rejected.
|
||||
self.get_failure(
|
||||
self.store.db_pool.simple_insert("test_constraint", {"a": 2, "b": 2}),
|
||||
exc=self.store.database_engine.module.IntegrityError,
|
||||
)
|
||||
|
|
|
@ -20,6 +20,7 @@ from parameterized import parameterized
|
|||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.room_versions import (
|
||||
KNOWN_ROOM_VERSIONS,
|
||||
EventFormatVersions,
|
||||
|
@ -98,8 +99,32 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
|
|||
room2 = "#room2"
|
||||
room3 = "#room3"
|
||||
|
||||
def insert_event(txn: Cursor, i: int, room_id: str) -> None:
|
||||
def insert_event(txn: LoggingTransaction, i: int, room_id: str) -> None:
|
||||
event_id = "$event_%i:local" % i
|
||||
|
||||
# We need to insert into events table to get around the foreign key constraint.
|
||||
self.store.db_pool.simple_insert_txn(
|
||||
txn,
|
||||
table="events",
|
||||
values={
|
||||
"instance_name": "master",
|
||||
"stream_ordering": self.store._stream_id_gen.get_next_txn(txn),
|
||||
"topological_ordering": 1,
|
||||
"depth": 1,
|
||||
"event_id": event_id,
|
||||
"room_id": room_id,
|
||||
"type": EventTypes.Message,
|
||||
"processed": True,
|
||||
"outlier": False,
|
||||
"origin_server_ts": 0,
|
||||
"received_ts": 0,
|
||||
"sender": "@user:local",
|
||||
"contains_url": False,
|
||||
"state_key": None,
|
||||
"rejection_reason": None,
|
||||
},
|
||||
)
|
||||
|
||||
txn.execute(
|
||||
(
|
||||
"INSERT INTO event_forward_extremities (room_id, event_id) "
|
||||
|
@ -113,10 +138,14 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase):
|
|||
self.store.db_pool.runInteraction("insert", insert_event, i, room1)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction("insert", insert_event, i, room2)
|
||||
self.store.db_pool.runInteraction(
|
||||
"insert", insert_event, i + 100, room2
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.runInteraction("insert", insert_event, i, room3)
|
||||
self.store.db_pool.runInteraction(
|
||||
"insert", insert_event, i + 200, room3
|
||||
)
|
||||
)
|
||||
|
||||
# Test simple case
|
||||
|
|
|
@ -15,8 +15,6 @@
|
|||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.types import UserID
|
||||
from synapse.util import Clock
|
||||
|
||||
|
@ -64,64 +62,3 @@ class ProfileStoreTestCase(unittest.HomeserverTestCase):
|
|||
self.assertIsNone(
|
||||
self.get_success(self.store.get_profile_avatar_url(self.u_frank))
|
||||
)
|
||||
|
||||
def test_profiles_bg_migration(self) -> None:
|
||||
"""
|
||||
Test background job that copies entries from column user_id to full_user_id, adding
|
||||
the hostname in the process.
|
||||
"""
|
||||
updater = self.hs.get_datastores().main.db_pool.updates
|
||||
|
||||
# drop the constraint so we can insert nulls in full_user_id to populate the test
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE profiles DROP CONSTRAINT full_user_id_not_null"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
for i in range(0, 70):
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"profiles",
|
||||
{"user_id": f"hello{i:02}"},
|
||||
)
|
||||
)
|
||||
|
||||
# re-add the constraint so that when it's validated it actually exists
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE profiles ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"background_updates",
|
||||
values={
|
||||
"update_name": "populate_full_user_id_profiles",
|
||||
"progress_json": "{}",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
updater.run_background_updates(False),
|
||||
)
|
||||
|
||||
expected_values = []
|
||||
for i in range(0, 70):
|
||||
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
|
||||
|
||||
res = self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"", None, "SELECT full_user_id from profiles ORDER BY full_user_id"
|
||||
)
|
||||
)
|
||||
self.assertEqual(len(res), len(expected_values))
|
||||
self.assertEqual(res, expected_values)
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
# Copyright 2023 The Matrix.org Foundation C.I.C
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.server import HomeServer
|
||||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.engines import PostgresEngine
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
|
||||
|
||||
class UserFiltersStoreTestCase(unittest.HomeserverTestCase):
|
||||
"""
|
||||
Test background migration that copies entries from column user_id to full_user_id, adding
|
||||
the hostname in the process.
|
||||
"""
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
def test_bg_migration(self) -> None:
|
||||
updater = self.hs.get_datastores().main.db_pool.updates
|
||||
|
||||
# drop the constraint so we can insert nulls in full_user_id to populate the test
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE user_filters DROP CONSTRAINT full_user_id_not_null"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
for i in range(0, 70):
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"user_filters",
|
||||
{
|
||||
"user_id": f"hello{i:02}",
|
||||
"filter_id": i,
|
||||
"filter_json": bytearray(i),
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
# re-add the constraint so that when it's validated it actually exists
|
||||
if isinstance(self.store.database_engine, PostgresEngine):
|
||||
|
||||
def f(txn: LoggingTransaction) -> None:
|
||||
txn.execute(
|
||||
"ALTER TABLE user_filters ADD CONSTRAINT full_user_id_not_null CHECK (full_user_id IS NOT NULL) NOT VALID"
|
||||
)
|
||||
|
||||
self.get_success(self.store.db_pool.runInteraction("", f))
|
||||
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
"background_updates",
|
||||
values={
|
||||
"update_name": "populate_full_user_id_user_filters",
|
||||
"progress_json": "{}",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
updater.run_background_updates(False),
|
||||
)
|
||||
|
||||
expected_values = []
|
||||
for i in range(0, 70):
|
||||
expected_values.append((f"@hello{i:02}:{self.hs.hostname}",))
|
||||
|
||||
res = self.get_success(
|
||||
self.store.db_pool.execute(
|
||||
"", None, "SELECT full_user_id from user_filters ORDER BY full_user_id"
|
||||
)
|
||||
)
|
||||
self.assertEqual(len(res), len(expected_values))
|
||||
self.assertEqual(res, expected_values)
|
|
@ -268,7 +268,7 @@ class OptionsResourceTests(unittest.TestCase):
|
|||
)
|
||||
self.assertEqual(
|
||||
channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"),
|
||||
[b"Synapse-Trace-Id"],
|
||||
[b"Synapse-Trace-Id, Server"],
|
||||
)
|
||||
|
||||
def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None:
|
||||
|
|
Loading…
Reference in a new issue