Merge branch 'develop' into madlittlemods/17368-bust-_membership_stream_cache

This commit is contained in:
Eric Eastwood 2024-11-12 14:30:55 -06:00
commit 944d1e0cc8
113 changed files with 4712 additions and 750 deletions

View file

@ -36,11 +36,11 @@ IS_PR = os.environ["GITHUB_REF"].startswith("refs/pull/")
# First calculate the various trial jobs.
#
# For PRs, we only run each type of test with the oldest Python version supported (which
# is Python 3.8 right now)
# is Python 3.9 right now)
trial_sqlite_tests = [
{
"python-version": "3.8",
"python-version": "3.9",
"database": "sqlite",
"extras": "all",
}
@ -53,12 +53,12 @@ if not IS_PR:
"database": "sqlite",
"extras": "all",
}
for version in ("3.9", "3.10", "3.11", "3.12")
for version in ("3.10", "3.11", "3.12", "3.13")
)
trial_postgres_tests = [
{
"python-version": "3.8",
"python-version": "3.9",
"database": "postgres",
"postgres-version": "11",
"extras": "all",
@ -68,16 +68,16 @@ trial_postgres_tests = [
if not IS_PR:
trial_postgres_tests.append(
{
"python-version": "3.12",
"python-version": "3.13",
"database": "postgres",
"postgres-version": "16",
"postgres-version": "17",
"extras": "all",
}
)
trial_no_extra_tests = [
{
"python-version": "3.8",
"python-version": "3.9",
"database": "sqlite",
"extras": "",
}
@ -99,24 +99,24 @@ set_output("trial_test_matrix", test_matrix)
# First calculate the various sytest jobs.
#
# For each type of test we only run on focal on PRs
# For each type of test we only run on bullseye on PRs
sytest_tests = [
{
"sytest-tag": "focal",
"sytest-tag": "bullseye",
},
{
"sytest-tag": "focal",
"sytest-tag": "bullseye",
"postgres": "postgres",
},
{
"sytest-tag": "focal",
"sytest-tag": "bullseye",
"postgres": "multi-postgres",
"workers": "workers",
},
{
"sytest-tag": "focal",
"sytest-tag": "bullseye",
"postgres": "multi-postgres",
"workers": "workers",
"reactor": "asyncio",
@ -127,11 +127,11 @@ if not IS_PR:
sytest_tests.extend(
[
{
"sytest-tag": "focal",
"sytest-tag": "bullseye",
"reactor": "asyncio",
},
{
"sytest-tag": "focal",
"sytest-tag": "bullseye",
"postgres": "postgres",
"reactor": "asyncio",
},

View file

@ -1,5 +1,5 @@
#!/usr/bin/env bash
# this script is run by GitHub Actions in a plain `focal` container; it
# this script is run by GitHub Actions in a plain `jammy` container; it
# - installs the minimal system requirements, and poetry;
# - patches the project definition file to refer to old versions only;
# - creates a venv with these old versions using poetry; and finally

View file

@ -132,9 +132,9 @@ jobs:
fail-fast: false
matrix:
include:
- sytest-tag: focal
- sytest-tag: bullseye
- sytest-tag: focal
- sytest-tag: bullseye
postgres: postgres
workers: workers
redis: redis

View file

@ -91,10 +91,19 @@ jobs:
rm -rf /tmp/.buildx-cache
mv /tmp/.buildx-cache-new /tmp/.buildx-cache
- name: Artifact name
id: artifact-name
# We can't have colons in the upload name of the artifact, so we convert
# e.g. `debian:sid` to `sid`.
env:
DISTRO: ${{ matrix.distro }}
run: |
echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT"
- name: Upload debs as artifacts
uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
uses: actions/upload-artifact@v4
with:
name: debs
name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }}
path: debs/*
build-wheels:
@ -102,7 +111,7 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, macos-12]
os: [ubuntu-22.04, macos-12]
arch: [x86_64, aarch64]
# is_pr is a flag used to exclude certain jobs from the matrix on PRs.
# It is not read by the rest of the workflow.
@ -144,7 +153,7 @@ jobs:
- name: Only build a single wheel on PR
if: startsWith(github.ref, 'refs/pull/')
run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV
- name: Build wheels
run: python -m cibuildwheel --output-dir wheelhouse
@ -156,9 +165,9 @@ jobs:
CARGO_NET_GIT_FETCH_WITH_CLI: true
CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
- uses: actions/upload-artifact@v4
with:
name: Wheel
name: Wheel-${{ matrix.os }}-${{ matrix.arch }}
path: ./wheelhouse/*.whl
build-sdist:
@ -177,7 +186,7 @@ jobs:
- name: Build sdist
run: python -m build --sdist
- uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes
- uses: actions/upload-artifact@v4
with:
name: Sdist
path: dist/*.tar.gz
@ -194,19 +203,20 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Download all workflow run artifacts
uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact
uses: actions/download-artifact@v4
- name: Build a tarball for the debs
run: tar -cvJf debs.tar.xz debs
# We need to merge all the debs uploads into one folder, then compress
# that.
run: |
mkdir debs
mv debs*/* debs/
tar -cvJf debs.tar.xz debs
- name: Attach to release
uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109
uses: softprops/action-gh-release@v2
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
files: |
Sdist/*
Wheel/*
Wheel*/*
debs.tar.xz
# if it's not already published, keep the release as a draft.
draft: true
# mark it as a prerelease if the tag contains 'rc'.
prerelease: ${{ contains(github.ref, 'rc') }}

View file

@ -397,7 +397,7 @@ jobs:
needs:
- linting-done
- changes
runs-on: ubuntu-20.04
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v4
@ -409,12 +409,12 @@ jobs:
# their build dependencies
- run: |
sudo apt-get -qq update
sudo apt-get -qq install build-essential libffi-dev python-dev \
sudo apt-get -qq install build-essential libffi-dev python3-dev \
libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev
- uses: actions/setup-python@v5
with:
python-version: '3.8'
python-version: '3.9'
- name: Prepare old deps
if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true'
@ -458,7 +458,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["pypy-3.8"]
python-version: ["pypy-3.9"]
extras: ["all"]
steps:
@ -580,11 +580,11 @@ jobs:
strategy:
matrix:
include:
- python-version: "3.8"
- python-version: "3.9"
postgres-version: "11"
- python-version: "3.11"
postgres-version: "15"
- python-version: "3.13"
postgres-version: "17"
services:
postgres:

View file

@ -99,11 +99,11 @@ jobs:
if: needs.check_repo.outputs.should_run_workflow == 'true'
runs-on: ubuntu-latest
container:
# We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version.
# We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version.
# This job is a canary to warn us about unreleased twisted changes that would cause problems for us if
# they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest
# version, assuming that any incompatibilities on newer versions would also be present on the oldest.
image: matrixdotorg/sytest-synapse:focal
image: matrixdotorg/sytest-synapse:bullseye
volumes:
- ${{ github.workspace }}:/src

View file

@ -1,3 +1,129 @@
# Synapse 1.119.0rc2 (2024-11-11)
Note that due to packaging issues there was no v1.119.0rc1.
### Python 3.8 support dropped
Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no longer supported by Synapse. The minimum supported Python version is now 3.9.
If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse.
### Features
- Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374))
- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2). ([\#17888](https://github.com/element-hq/synapse/issues/17888))
### Bugfixes
- Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. ([\#17809](https://github.com/element-hq/synapse/issues/17809))
- Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. ([\#17839](https://github.com/element-hq/synapse/issues/17839))
- Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in
the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847))
- Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850))
- Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861))
- Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. ([\#17903](https://github.com/element-hq/synapse/pull/17903))
- Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. ([\#17915](https://github.com/element-hq/synapse/pull/17915))
### Internal Changes
- Remove support for python 3.8. ([\#17908](https://github.com/element-hq/synapse/issues/17908))
- Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786))
- Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890))
- Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830))
- The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. ([\#17852](https://github.com/element-hq/synapse/issues/17852))
- Minor speed-up of sliding sync by computing extensions results in parallel. ([\#17884](https://github.com/element-hq/synapse/issues/17884))
- Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887))
- Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894))
- Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905))
- Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911))
- Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902))
- Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909))
- Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912))
- Fix building and attaching release artifacts during the release process. ([\#17921](https://github.com/element-hq/synapse/issues/17921))
### Updates to locked dependencies
* Bump actions/download-artifact & actions/upload-artifact from 3 to 4 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657))
* Bump anyhow from 1.0.89 to 1.0.92. ([\#17858](https://github.com/element-hq/synapse/issues/17858), [\#17876](https://github.com/element-hq/synapse/issues/17876), [\#17901](https://github.com/element-hq/synapse/issues/17901))
* Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877))
* Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853))
* Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898))
* Bump phonenumbers from 8.13.47 to 8.13.49. ([\#17880](https://github.com/element-hq/synapse/issues/17880), [\#17899](https://github.com/element-hq/synapse/issues/17899))
* Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879))
* Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874))
* Bump ruff from 0.6.9 to 0.7.2. ([\#17868](https://github.com/element-hq/synapse/issues/17868), [\#17897](https://github.com/element-hq/synapse/issues/17897))
* Bump serde from 1.0.210 to 1.0.214. ([\#17875](https://github.com/element-hq/synapse/issues/17875), [\#17900](https://github.com/element-hq/synapse/issues/17900))
* Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857))
* Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855))
* Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856))
# Synapse 1.118.0 (2024-10-29)
No significant changes since 1.118.0rc1.
### Python 3.8 support will be dropped in the next release
Python 3.8 is now [end-of-life](https://devguide.python.org/versions/). As per our [Deprecation Policy for Platform Dependencies](https://element-hq.github.io/synapse/latest/deprecation_policy.html#policy), Synapse will be dropping support for Python 3.8 in the next release; Synapse 1.119.0.
Synapse 1.118.x will be the final release to support Python 3.8. If you are running Synapse with Python 3.8, please upgrade before the 1.119.0 release, due in less than one month.
### Python 3.13 and PostgreSQL 17 support
On the other end of the spectrum, Synapse 1.118.0 is the first release to support [Python 3.13](https://www.python.org/downloads/release/python-3130/)! [PostgreSQL 17](https://www.postgresql.org/about/news/postgresql-17-released-2936/) is also supported as of this release.
# Synapse 1.118.0rc1 (2024-10-22)
### Features
- Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload. ([\#17708](https://github.com/element-hq/synapse/issues/17708))
- Implement [MSC4210](https://github.com/matrix-org/matrix-spec-proposals/pull/4210): Remove legacy mentions. Contributed by @tulir @ Beeper. ([\#17783](https://github.com/element-hq/synapse/issues/17783))
### Bugfixes
- Fix saving of PNG thumbnails, when the original image is in the CMYK color space. ([\#17736](https://github.com/element-hq/synapse/issues/17736))
- Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. ([\#17785](https://github.com/element-hq/synapse/issues/17785), [\#17805](https://github.com/element-hq/synapse/issues/17805))
- Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync that would cause rooms to stay forgotten and hidden even after rejoining. ([\#17835](https://github.com/element-hq/synapse/issues/17835))
### Improved Documentation
- Clarify when the `user_may_invite` and `user_may_send_3pid_invite` module callbacks are called. ([\#17627](https://github.com/element-hq/synapse/issues/17627))
- Correct documentation to refer to the `--config-path` argument instead of `--config-file`. ([\#17802](https://github.com/element-hq/synapse/issues/17802))
- Fix typo in `target_cache_memory_usage` docs. ([\#17825](https://github.com/element-hq/synapse/issues/17825))
### Internal Changes
- Slight optimization when fetching state/events for Sliding Sync. ([\#17718](https://github.com/element-hq/synapse/issues/17718))
- Add Python 3.13 and Postgres 17 to the test matrix. ([\#17752](https://github.com/element-hq/synapse/issues/17752))
- Test github token before running release script steps. ([\#17803](https://github.com/element-hq/synapse/issues/17803))
- Build debian packages for new Ubuntu versions, and stop building for no longer supported versions. ([\#17824](https://github.com/element-hq/synapse/issues/17824))
- Enable the `.org.matrix.msc4028.encrypted_event` push rule by default in accordance with [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028). Note that the corresponding experimental feature must still be switched on for this push rule to have any effect. ([\#17826](https://github.com/element-hq/synapse/issues/17826))
- Fix some typing issues uncovered by upgrading mypy to 1.11.x. ([\#17842](https://github.com/element-hq/synapse/issues/17842))
### Updates to locked dependencies
* Bump mypy from 1.10.1 to 1.11.2. ([\#17842](https://github.com/element-hq/synapse/issues/17842))
* Bump mypy-zope from 1.0.5 to 1.0.7. ([\#17827](https://github.com/element-hq/synapse/issues/17827))
* Bump phonenumbers from 8.13.46 to 8.13.47. ([\#17797](https://github.com/element-hq/synapse/issues/17797))
* Bump psycopg2 from 2.9.9 to 2.9.10. ([\#17843](https://github.com/element-hq/synapse/issues/17843))
* Bump ruff from 0.6.8 to 0.6.9. ([\#17794](https://github.com/element-hq/synapse/issues/17794))
* Bump sentry-sdk from 2.14.0 to 2.15.0. ([\#17795](https://github.com/element-hq/synapse/issues/17795))
* Bump sentry-sdk from 2.15.0 to 2.16.0. ([\#17829](https://github.com/element-hq/synapse/issues/17829))
* Bump sentry-sdk from 2.16.0 to 2.17.0. ([\#17844](https://github.com/element-hq/synapse/issues/17844))
* Bump sigstore/cosign-installer from 3.6.0 to 3.7.0. ([\#17798](https://github.com/element-hq/synapse/issues/17798))
* Bump tomli from 2.0.1 to 2.0.2. ([\#17796](https://github.com/element-hq/synapse/issues/17796))
* Bump types-requests from 2.32.0.20240914 to 2.32.0.20241016. ([\#17841](https://github.com/element-hq/synapse/issues/17841))
* Bump types-setuptools from 75.1.0.20240917 to 75.1.0.20241014. ([\#17828](https://github.com/element-hq/synapse/issues/17828))
# Synapse 1.117.0 (2024-10-15)
No significant changes since 1.117.0rc1.
# Synapse 1.117.0rc1 (2024-10-08)
### Features

32
Cargo.lock generated
View file

@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "anyhow"
version = "1.0.89"
version = "1.0.93"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6"
checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775"
[[package]]
name = "arc-swap"
@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c"
[[package]]
name = "bytes"
version = "1.7.2"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3"
checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da"
[[package]]
name = "cfg-if"
@ -302,9 +302,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "proc-macro2"
version = "1.0.82"
version = "1.0.89"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e"
dependencies = [
"unicode-ident",
]
@ -444,9 +444,9 @@ dependencies = [
[[package]]
name = "regex"
version = "1.11.0"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8"
checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191"
dependencies = [
"aho-corasick",
"memchr",
@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "serde"
version = "1.0.210"
version = "1.0.214"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a"
checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.210"
version = "1.0.214"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f"
checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766"
dependencies = [
"proc-macro2",
"quote",
@ -505,9 +505,9 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.128"
version = "1.0.132"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8"
checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03"
dependencies = [
"itoa",
"memchr",
@ -551,9 +551,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc"
[[package]]
name = "syn"
version = "2.0.61"
version = "2.0.85"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9"
checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56"
dependencies = [
"proc-macro2",
"quote",

View file

@ -1 +0,0 @@
Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload.

View file

@ -1 +0,0 @@
Fix saving of PNG thumbnails, when the original image is in the CMYK color space.

View file

@ -1 +0,0 @@
Correct documentation to refer to the `--config-path` argument instead of `--config-file`.

1
changelog.d/17865.misc Normal file
View file

@ -0,0 +1 @@
Addressed some typos in docs and returned error message for unknown MXC ID.

1
changelog.d/17913.doc Normal file
View file

@ -0,0 +1 @@
Clarify the semantics of the `enable_authenticated_media` configuration option.

1
changelog.d/17923.misc Normal file
View file

@ -0,0 +1 @@
Unpin the upload release GHA action.

30
debian/changelog vendored
View file

@ -1,3 +1,33 @@
matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium
* New Synapse release 1.119.0rc2.
-- Synapse Packaging team <packages@matrix.org> Mon, 11 Nov 2024 14:33:02 +0000
matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium
* New Synapse release 1.119.0rc1.
-- Synapse Packaging team <packages@matrix.org> Wed, 06 Nov 2024 08:59:43 -0700
matrix-synapse-py3 (1.118.0) stable; urgency=medium
* New Synapse release 1.118.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 29 Oct 2024 15:29:53 +0100
matrix-synapse-py3 (1.118.0~rc1) stable; urgency=medium
* New Synapse release 1.118.0rc1.
-- Synapse Packaging team <packages@matrix.org> Tue, 22 Oct 2024 11:48:14 +0100
matrix-synapse-py3 (1.117.0) stable; urgency=medium
* New Synapse release 1.117.0.
-- Synapse Packaging team <packages@matrix.org> Tue, 15 Oct 2024 10:46:30 +0100
matrix-synapse-py3 (1.117.0~rc1) stable; urgency=medium
* New Synapse release 1.117.0rc1.

View file

@ -20,7 +20,7 @@
# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in
# in `poetry export` in the past.
ARG PYTHON_VERSION=3.11
ARG PYTHON_VERSION=3.12
###
### Stage 0: generate requirements.txt

View file

@ -5,6 +5,7 @@ basis. The currently supported features are:
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
for another client
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2
To use it, you will need to authenticate by providing an `access_token`
for a server admin: see [Admin API](../usage/administration/admin_api/).

View file

@ -1365,6 +1365,9 @@ _Added in Synapse 1.72.0._
## Redact all the events of a user
This endpoint allows an admin to redact the events of a given user. There are no restrictions on redactions for a
local user. By default, we puppet the user who sent the message to redact it themselves. Redactions for non-local users are issued using the admin user, and will fail in rooms where the admin user is not admin/does not have the specified power level to issue redactions.
The API is
```
POST /_synapse/admin/v1/user/$user_id/redact

View file

@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common
configuration:
```sh
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal
$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye
```
(Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.)

View file

@ -76,8 +76,9 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a
async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool]
```
Called when processing an invitation. Both inviter and invitee are
represented by their Matrix user ID (e.g. `@alice:example.com`).
Called when processing an invitation, both when one is created locally or when
receiving an invite over federation. Both inviter and invitee are represented by
their Matrix user ID (e.g. `@alice:example.com`).
The callback must return one of:
@ -112,7 +113,9 @@ async def user_may_send_3pid_invite(
```
Called when processing an invitation using a third-party identifier (also called a 3PID,
e.g. an email address or a phone number).
e.g. an email address or a phone number). It is only called when a 3PID invite is created
locally - not when one is received in a room over federation. If the 3PID is already associated
with a Matrix ID, the spam check will go through the `user_may_invite` callback instead.
The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the
invitee is represented by its medium (e.g. "email") and its address

View file

@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis
System requirements:
- POSIX-compliant system (tested on Linux & OS X)
- Python 3.8 or later, up to Python 3.11.
- Python 3.9 or later, up to Python 3.13.
- At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
If building on an uncommon architecture for which pre-built wheels are

View file

@ -117,6 +117,17 @@ each upgrade are complete before moving on to the next upgrade, to avoid
stacking them up. You can monitor the currently running background updates with
[the Admin API](usage/administration/admin_api/background_updates.html#status).
# Upgrading to v1.119.0
## Minimum supported Python version
The minimum supported Python version has been increased from v3.8 to v3.9.
You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024).
If you use current versions of the Matrix.org-distributed Docker images, no action is required.
Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8.
# Upgrading to v1.111.0
## New worker endpoints for authenticated client and federation media

View file

@ -1434,7 +1434,7 @@ number of entries that can be stored.
Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry
durations.
* `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted.
They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in
They will continue to be evicted until the memory usage drops below the `target_cache_memory_usage`, set in
the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option.
* `target_cache_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value
for this option.
@ -1890,6 +1890,26 @@ unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_mat
after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but
this will change to true in a future Synapse release.
In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this
case-by-case breakdown describes whether media downloads are permitted:
* `enable_authenticated_media = False`:
* unauthenticated client or homeserver requesting local media: allowed
* unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache,
or as long as the remote homeserver does not require authentication to retrieve the media
* `enable_authenticated_media = True`:
* unauthenticated client or homeserver requesting local media:
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
otherwise denied.
* unauthenticated client or homeserver requesting remote media: the same as for local media;
allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist);
otherwise denied.
It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`,
will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`.
This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media;
those older clients or homeservers will not be cut off from media they can already see.
Example configuration:
```yaml
enable_authenticated_media: true

View file

@ -186,16 +186,16 @@
},
"nixpkgs_2": {
"locked": {
"lastModified": 1690535733,
"narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=",
"lastModified": 1729265718,
"narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a",
"rev": "ccc0c2126893dd20963580b6478d1a10a4512185",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "master",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}

View file

@ -3,13 +3,13 @@
# (https://github.com/matrix-org/complement) Matrix homeserver test suites are also
# installed automatically.
#
# You must have already installed Nix (https://nixos.org) on your system to use this.
# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not
# directly supported, but Nix can be installed inside of WSL2 or even Docker
# You must have already installed Nix (https://nixos.org/download/) on your system to use this.
# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required.
# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker
# containers. Please refer to https://nixos.org/download for details.
#
# You must also enable support for flakes in Nix. See the following for how to
# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes
# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager
#
# Be warned: you'll need over 3.75 GB of free space to download all the dependencies.
#
@ -20,7 +20,7 @@
# locally from "services", such as PostgreSQL and Redis.
#
# You should now be dropped into a new shell with all programs and dependencies
# availabile to you!
# available to you!
#
# You can start up pre-configured local Synapse, PostgreSQL and Redis instances by
# running: `devenv up`. To stop them, use Ctrl-C.
@ -39,9 +39,9 @@
{
inputs = {
# Use the master/unstable branch of nixpkgs. Used to fetch the latest
# Use the rolling/unstable branch of nixpkgs. Used to fetch the latest
# available versions of packages.
nixpkgs.url = "github:NixOS/nixpkgs/master";
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
# Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS).
systems.url = "github:nix-systems/default";
# A development environment manager built on Nix. See https://devenv.sh.
@ -50,7 +50,7 @@
rust-overlay.url = "github:oxalica/rust-overlay";
};
outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs:
let
forEachSystem = nixpkgs.lib.genAttrs (import systems);
in {
@ -126,7 +126,7 @@
# Automatically activate the poetry virtualenv upon entering the shell.
languages.python.poetry.activate.enable = true;
# Install all extra Python dependencies; this is needed to run the unit
# tests and utilitise all Synapse features.
# tests and utilise all Synapse features.
languages.python.poetry.install.arguments = ["--extras all"];
# Install the 'matrix-synapse' package from the local checkout.
languages.python.poetry.install.installRootPackage = true;
@ -163,8 +163,8 @@
# Create a postgres user called 'synapse_user' which has ownership
# over the 'synapse' database.
services.postgres.initialScript = ''
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
CREATE USER synapse_user;
ALTER DATABASE synapse OWNER TO synapse_user;
'';
# Redis is needed in order to run Synapse in worker mode.

View file

@ -26,7 +26,7 @@ strict_equality = True
# Run mypy type checking with the minimum supported Python version to catch new usage
# that isn't backwards-compatible (types, overloads, etc).
python_version = 3.8
python_version = 3.9
files =
docker/,

477
poetry.lock generated
View file

@ -11,9 +11,6 @@ files = [
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[package.dependencies]
typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""}
[[package]]
name = "attrs"
version = "24.2.0"
@ -107,21 +104,20 @@ typecheck = ["mypy"]
[[package]]
name = "bleach"
version = "6.1.0"
version = "6.2.0"
description = "An easy safelist-based HTML-sanitizing tool."
optional = false
python-versions = ">=3.8"
python-versions = ">=3.9"
files = [
{file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"},
{file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"},
{file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"},
{file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"},
]
[package.dependencies]
six = ">=1.9.0"
webencodings = "*"
[package.extras]
css = ["tinycss2 (>=1.1.0,<1.3)"]
css = ["tinycss2 (>=1.1.0,<1.5)"]
[[package]]
name = "canonicaljson"
@ -147,75 +143,78 @@ files = [
[[package]]
name = "cffi"
version = "1.15.1"
version = "1.17.1"
description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = "*"
python-versions = ">=3.8"
files = [
{file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
{file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
{file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
{file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
{file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
{file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
{file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
{file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
{file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
{file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
{file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
{file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
{file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
{file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
{file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
{file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
{file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
{file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
{file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
{file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
{file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
{file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
{file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
{file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
{file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
{file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
{file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
{file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
{file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
{file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
{file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
{file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
{file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
{file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
{file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
{file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
{file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
{file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
{file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
{file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
{file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
{file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"},
{file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"},
{file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"},
{file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"},
{file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"},
{file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"},
{file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"},
{file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"},
{file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"},
{file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"},
{file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"},
{file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"},
{file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"},
{file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"},
{file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"},
{file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"},
{file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"},
{file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"},
{file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"},
{file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"},
{file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"},
{file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"},
{file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"},
{file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"},
{file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"},
{file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"},
{file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"},
{file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"},
{file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"},
{file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"},
{file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"},
{file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"},
{file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"},
{file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"},
{file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"},
]
[package.dependencies]
@ -357,38 +356,38 @@ files = [
[[package]]
name = "cryptography"
version = "43.0.1"
version = "43.0.3"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
python-versions = ">=3.7"
files = [
{file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"},
{file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"},
{file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"},
{file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"},
{file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"},
{file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"},
{file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"},
{file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"},
{file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"},
{file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"},
{file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"},
{file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"},
{file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"},
{file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"},
{file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"},
{file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"},
{file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"},
{file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"},
{file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"},
{file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"},
{file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"},
{file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"},
{file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"},
{file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"},
{file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"},
{file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"},
{file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"},
{file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"},
{file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"},
{file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"},
{file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"},
{file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"},
{file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"},
{file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"},
{file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"},
{file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"},
{file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"},
{file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"},
{file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"},
{file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"},
{file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"},
{file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"},
]
[package.dependencies]
@ -401,7 +400,7 @@ nox = ["nox"]
pep8test = ["check-sdist", "click", "mypy", "ruff"]
sdist = ["build"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
test-randomorder = ["pytest-randomly"]
[[package]]
@ -871,9 +870,7 @@ files = [
[package.dependencies]
attrs = ">=22.2.0"
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
jsonschema-specifications = ">=2023.03.6"
pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
referencing = ">=0.28.4"
rpds-py = ">=0.7.1"
@ -893,7 +890,6 @@ files = [
]
[package.dependencies]
importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
referencing = ">=0.28.0"
[[package]]
@ -909,7 +905,6 @@ files = [
[package.dependencies]
importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
"jaraco.classes" = "*"
jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
@ -1319,44 +1314,44 @@ files = [
[[package]]
name = "mypy"
version = "1.10.1"
version = "1.11.2"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"},
{file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"},
{file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"},
{file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"},
{file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"},
{file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"},
{file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"},
{file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"},
{file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"},
{file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"},
{file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"},
{file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"},
{file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"},
{file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"},
{file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"},
{file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"},
{file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"},
{file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"},
{file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"},
{file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"},
{file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"},
{file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"},
{file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"},
{file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"},
{file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"},
{file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"},
{file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"},
{file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"},
{file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"},
{file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"},
{file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"},
{file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"},
{file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"},
{file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"},
{file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"},
{file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"},
{file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"},
{file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"},
{file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"},
{file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"},
{file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"},
{file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"},
{file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"},
{file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"},
{file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"},
{file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"},
{file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"},
{file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"},
{file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"},
{file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"},
{file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"},
{file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"},
{file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"},
{file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"},
]
[package.dependencies]
mypy-extensions = ">=1.0.0"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
typing-extensions = ">=4.1.0"
typing-extensions = ">=4.6.0"
[package.extras]
dmypy = ["psutil (>=4.0)"]
@ -1377,16 +1372,17 @@ files = [
[[package]]
name = "mypy-zope"
version = "1.0.5"
version = "1.0.8"
description = "Plugin for mypy to support zope interfaces"
optional = false
python-versions = "*"
files = [
{file = "mypy_zope-1.0.5.tar.gz", hash = "sha256:2440406d49c0e1199c1cd819c92a2c4957de65579c6abc8a081c927f4bdc8d49"},
{file = "mypy_zope-1.0.8-py3-none-any.whl", hash = "sha256:8794a77dae0c7e2f28b8ac48569091310b3ee45bb9d6cd4797dcb837c40f9976"},
{file = "mypy_zope-1.0.8.tar.gz", hash = "sha256:854303a95aefc4289e8a0796808e002c2c7ecde0a10a8f7b8f48092f94ef9b9f"},
]
[package.dependencies]
mypy = ">=1.0.0,<1.11.0"
mypy = ">=1.0.0,<1.13.0"
"zope.interface" = "*"
"zope.schema" = "*"
@ -1447,13 +1443,13 @@ dev = ["jinja2"]
[[package]]
name = "phonenumbers"
version = "8.13.47"
version = "8.13.49"
description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
optional = false
python-versions = "*"
files = [
{file = "phonenumbers-8.13.47-py2.py3-none-any.whl", hash = "sha256:5d3c0142ef7055ca5551884352e3b6b93bfe002a0bc95b8eaba39b0e2184541b"},
{file = "phonenumbers-8.13.47.tar.gz", hash = "sha256:53c5e7c6d431cafe4efdd44956078404ae9bc8b0eacc47be3105d3ccc88aaffa"},
{file = "phonenumbers-8.13.49-py2.py3-none-any.whl", hash = "sha256:e17140955ab3d8f9580727372ea64c5ada5327932d6021ef6fd203c3db8c8139"},
{file = "phonenumbers-8.13.49.tar.gz", hash = "sha256:e608ccb61f0bd42e6db1d2c421f7c22186b88f494870bf40aa31d1a2718ab0ae"},
]
[[package]]
@ -1567,17 +1563,6 @@ files = [
[package.extras]
testing = ["pytest", "pytest-cov"]
[[package]]
name = "pkgutil-resolve-name"
version = "1.3.10"
description = "Resolve a name to an object."
optional = false
python-versions = ">=3.6"
files = [
{file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
{file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
]
[[package]]
name = "prometheus-client"
version = "0.21.0"
@ -1594,24 +1579,20 @@ twisted = ["twisted"]
[[package]]
name = "psycopg2"
version = "2.9.9"
version = "2.9.10"
description = "psycopg2 - Python-PostgreSQL Database Adapter"
optional = true
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"},
{file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"},
{file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"},
{file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"},
{file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"},
{file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"},
{file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"},
{file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"},
{file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"},
{file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"},
{file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"},
{file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"},
{file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"},
{file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"},
{file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"},
{file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"},
{file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"},
{file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"},
{file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"},
{file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"},
{file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"},
{file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"},
]
[[package]]
@ -1803,13 +1784,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0"
[[package]]
name = "pygithub"
version = "2.4.0"
version = "2.5.0"
description = "Use the full Github API v3"
optional = false
python-versions = ">=3.8"
files = [
{file = "PyGithub-2.4.0-py3-none-any.whl", hash = "sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24"},
{file = "pygithub-2.4.0.tar.gz", hash = "sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051"},
{file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"},
{file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"},
]
[package.dependencies]
@ -1948,7 +1929,6 @@ files = [
[package.dependencies]
cryptography = ">=3.1"
defusedxml = "*"
importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
pyopenssl = "*"
python-dateutil = "*"
pytz = "*"
@ -1974,13 +1954,13 @@ six = ">=1.5"
[[package]]
name = "python-multipart"
version = "0.0.12"
version = "0.0.16"
description = "A streaming multipart parser for Python"
optional = false
python-versions = ">=3.8"
files = [
{file = "python_multipart-0.0.12-py3-none-any.whl", hash = "sha256:43dcf96cf65888a9cd3423544dd0d75ac10f7aa0c3c28a175bbcd00c9ce1aebf"},
{file = "python_multipart-0.0.12.tar.gz", hash = "sha256:045e1f98d719c1ce085ed7f7e1ef9d8ccc8c02ba02b5566d5f7521410ced58cb"},
{file = "python_multipart-0.0.16-py3-none-any.whl", hash = "sha256:c2759b7b976ef3937214dfb592446b59dfaa5f04682a076f78b117c94776d87a"},
{file = "python_multipart-0.0.16.tar.gz", hash = "sha256:8dee37b88dab9b59922ca173c35acb627cc12ec74019f5cd4578369c6df36554"},
]
[[package]]
@ -2164,7 +2144,6 @@ files = [
[package.dependencies]
markdown-it-py = ">=2.2.0,<3.0.0"
pygments = ">=2.13.0,<3.0.0"
typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""}
[package.extras]
jupyter = ["ipywidgets (>=7.5.1,<9)"]
@ -2277,29 +2256,29 @@ files = [
[[package]]
name = "ruff"
version = "0.6.9"
version = "0.7.3"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
{file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"},
{file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"},
{file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"},
{file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"},
{file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"},
{file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"},
{file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"},
{file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"},
{file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"},
{file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"},
{file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"},
{file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"},
{file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"},
{file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"},
{file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"},
{file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"},
{file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"},
{file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"},
]
[[package]]
@ -2334,13 +2313,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"]
[[package]]
name = "sentry-sdk"
version = "2.15.0"
version = "2.17.0"
description = "Python client for Sentry (https://sentry.io)"
optional = true
python-versions = ">=3.6"
files = [
{file = "sentry_sdk-2.15.0-py2.py3-none-any.whl", hash = "sha256:8fb0d1a4e1a640172f31502e4503543765a1fe8a9209779134a4ac52d4677303"},
{file = "sentry_sdk-2.15.0.tar.gz", hash = "sha256:a599e7d3400787d6f43327b973e55a087b931ba2c592a7a7afa691f8eb5e75e2"},
{file = "sentry_sdk-2.17.0-py2.py3-none-any.whl", hash = "sha256:625955884b862cc58748920f9e21efdfb8e0d4f98cca4ab0d3918576d5b606ad"},
{file = "sentry_sdk-2.17.0.tar.gz", hash = "sha256:dd0a05352b78ffeacced73a94e86f38b32e2eae15fff5f30ca5abb568a72eacf"},
]
[package.dependencies]
@ -2363,6 +2342,7 @@ falcon = ["falcon (>=1.4)"]
fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"]
http2 = ["httpcore[http2] (==1.*)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
huggingface-hub = ["huggingface-hub (>=0.22)"]
@ -2782,13 +2762,13 @@ files = [
[[package]]
name = "types-psycopg2"
version = "2.9.21.20240819"
version = "2.9.21.20241019"
description = "Typing stubs for psycopg2"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-psycopg2-2.9.21.20240819.tar.gz", hash = "sha256:4ed6b47464d6374fa64e5e3b234cea0f710e72123a4596d67ab50b7415a84666"},
{file = "types_psycopg2-2.9.21.20240819-py3-none-any.whl", hash = "sha256:c9192311c27d7ad561eef705f1b2df1074f2cdcf445a98a6a2fcaaaad43278cf"},
{file = "types-psycopg2-2.9.21.20241019.tar.gz", hash = "sha256:bca89b988d2ebd19bcd08b177d22a877ea8b841decb10ed130afcf39404612fa"},
{file = "types_psycopg2-2.9.21.20241019-py3-none-any.whl", hash = "sha256:44d091e67732d16a941baae48cd7b53bf91911bc36888652447cf1ef0c1fb3f6"},
]
[[package]]
@ -2819,13 +2799,13 @@ files = [
[[package]]
name = "types-requests"
version = "2.32.0.20240914"
version = "2.32.0.20241016"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"},
{file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"},
{file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"},
{file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"},
]
[package.dependencies]
@ -2833,13 +2813,13 @@ urllib3 = ">=2"
[[package]]
name = "types-setuptools"
version = "75.1.0.20240917"
version = "75.2.0.20241019"
description = "Typing stubs for setuptools"
optional = false
python-versions = ">=3.8"
files = [
{file = "types-setuptools-75.1.0.20240917.tar.gz", hash = "sha256:12f12a165e7ed383f31def705e5c0fa1c26215dd466b0af34bd042f7d5331f55"},
{file = "types_setuptools-75.1.0.20240917-py3-none-any.whl", hash = "sha256:06f78307e68d1bbde6938072c57b81cf8a99bc84bd6dc7e4c5014730b097dc0c"},
{file = "types-setuptools-75.2.0.20241019.tar.gz", hash = "sha256:86ea31b5f6df2c6b8f2dc8ae3f72b213607f62549b6fa2ed5866e5299f968694"},
{file = "types_setuptools-75.2.0.20241019-py3-none-any.whl", hash = "sha256:2e48ff3acd4919471e80d5e3f049cce5c177e108d5d36d2d4cee3fa4d4104258"},
]
[[package]]
@ -3030,50 +3010,57 @@ test = ["zope.testrunner"]
[[package]]
name = "zope-interface"
version = "6.0"
version = "7.1.0"
description = "Interfaces for Python"
optional = false
python-versions = ">=3.7"
python-versions = ">=3.8"
files = [
{file = "zope.interface-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f299c020c6679cb389814a3b81200fe55d428012c5e76da7e722491f5d205990"},
{file = "zope.interface-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee4b43f35f5dc15e1fec55ccb53c130adb1d11e8ad8263d68b1284b66a04190d"},
{file = "zope.interface-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a158846d0fca0a908c1afb281ddba88744d403f2550dc34405c3691769cdd85"},
{file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72f23bab1848edb7472309e9898603141644faec9fd57a823ea6b4d1c4c8995"},
{file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f4d38cf4b462e75fac78b6f11ad47b06b1c568eb59896db5b6ec1094eb467f"},
{file = "zope.interface-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:87b690bbee9876163210fd3f500ee59f5803e4a6607d1b1238833b8885ebd410"},
{file = "zope.interface-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2363e5fd81afb650085c6686f2ee3706975c54f331b426800b53531191fdf28"},
{file = "zope.interface-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af169ba897692e9cd984a81cb0f02e46dacdc07d6cf9fd5c91e81f8efaf93d52"},
{file = "zope.interface-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa90bac61c9dc3e1a563e5babb3fd2c0c1c80567e815442ddbe561eadc803b30"},
{file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89086c9d3490a0f265a3c4b794037a84541ff5ffa28bb9c24cc9f66566968464"},
{file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:809fe3bf1a91393abc7e92d607976bbb8586512913a79f2bf7d7ec15bd8ea518"},
{file = "zope.interface-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:0ec9653825f837fbddc4e4b603d90269b501486c11800d7c761eee7ce46d1bbb"},
{file = "zope.interface-6.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:790c1d9d8f9c92819c31ea660cd43c3d5451df1df61e2e814a6f99cebb292788"},
{file = "zope.interface-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39b8711578dcfd45fc0140993403b8a81e879ec25d53189f3faa1f006087dca"},
{file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eba51599370c87088d8882ab74f637de0c4f04a6d08a312dce49368ba9ed5c2a"},
{file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee934f023f875ec2cfd2b05a937bd817efcc6c4c3f55c5778cbf78e58362ddc"},
{file = "zope.interface-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:042f2381118b093714081fd82c98e3b189b68db38ee7d35b63c327c470ef8373"},
{file = "zope.interface-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dfbbbf0809a3606046a41f8561c3eada9db811be94138f42d9135a5c47e75f6f"},
{file = "zope.interface-6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:424d23b97fa1542d7be882eae0c0fc3d6827784105264a8169a26ce16db260d8"},
{file = "zope.interface-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e538f2d4a6ffb6edfb303ce70ae7e88629ac6e5581870e66c306d9ad7b564a58"},
{file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12175ca6b4db7621aedd7c30aa7cfa0a2d65ea3a0105393e05482d7a2d367446"},
{file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3d7dfd897a588ec27e391edbe3dd320a03684457470415870254e714126b1f"},
{file = "zope.interface-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b3f543ae9d3408549a9900720f18c0194ac0fe810cecda2a584fd4dca2eb3bb8"},
{file = "zope.interface-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0583b75f2e70ec93f100931660328965bb9ff65ae54695fb3fa0a1255daa6f2"},
{file = "zope.interface-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:23ac41d52fd15dd8be77e3257bc51bbb82469cf7f5e9a30b75e903e21439d16c"},
{file = "zope.interface-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99856d6c98a326abbcc2363827e16bd6044f70f2ef42f453c0bd5440c4ce24e5"},
{file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1592f68ae11e557b9ff2bc96ac8fc30b187e77c45a3c9cd876e3368c53dc5ba8"},
{file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4407b1435572e3e1610797c9203ad2753666c62883b921318c5403fb7139dec2"},
{file = "zope.interface-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:5171eb073474a5038321409a630904fd61f12dd1856dd7e9d19cd6fe092cbbc5"},
{file = "zope.interface-6.0.tar.gz", hash = "sha256:aab584725afd10c710b8f1e6e208dbee2d0ad009f57d674cb9d1b3964037275d"},
{file = "zope.interface-7.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2bd9e9f366a5df08ebbdc159f8224904c1c5ce63893984abb76954e6fbe4381a"},
{file = "zope.interface-7.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:661d5df403cd3c5b8699ac480fa7f58047a3253b029db690efa0c3cf209993ef"},
{file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91b6c30689cfd87c8f264acb2fc16ad6b3c72caba2aec1bf189314cf1a84ca33"},
{file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6a4924f5bad9fe21d99f66a07da60d75696a136162427951ec3cb223a5570d"},
{file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a3c00b35f6170be5454b45abe2719ea65919a2f09e8a6e7b1362312a872cd3"},
{file = "zope.interface-7.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b936d61dbe29572fd2cfe13e30b925e5383bed1aba867692670f5a2a2eb7b4e9"},
{file = "zope.interface-7.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ac20581fc6cd7c754f6dff0ae06fedb060fa0e9ea6309d8be8b2701d9ea51c4"},
{file = "zope.interface-7.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:848b6fa92d7c8143646e64124ed46818a0049a24ecc517958c520081fd147685"},
{file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1ef1fdb6f014d5886b97e52b16d0f852364f447d2ab0f0c6027765777b6667"},
{file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bcff5c09d0215f42ba64b49205a278e44413d9bf9fa688fd9e42bfe472b5f4f"},
{file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07add15de0cc7e69917f7d286b64d54125c950aeb43efed7a5ea7172f000fbc1"},
{file = "zope.interface-7.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:9940d5bc441f887c5f375ec62bcf7e7e495a2d5b1da97de1184a88fb567f06af"},
{file = "zope.interface-7.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f245d039f72e6f802902375755846f5de1ee1e14c3e8736c078565599bcab621"},
{file = "zope.interface-7.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6159e767d224d8f18deff634a1d3722e68d27488c357f62ebeb5f3e2f5288b1f"},
{file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e956b1fd7f3448dd5e00f273072e73e50dfafcb35e4227e6d5af208075593c9"},
{file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff115ef91c0eeac69cd92daeba36a9d8e14daee445b504eeea2b1c0b55821984"},
{file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec001798ab62c3fc5447162bf48496ae9fba02edc295a9e10a0b0c639a6452e"},
{file = "zope.interface-7.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:124149e2d42067b9c6597f4dafdc7a0983d0163868f897b7bb5dc850b14f9a87"},
{file = "zope.interface-7.1.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:9733a9a0f94ef53d7aa64661811b20875b5bc6039034c6e42fb9732170130573"},
{file = "zope.interface-7.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5fcf379b875c610b5a41bc8a891841533f98de0520287d7f85e25386cd10d3e9"},
{file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0a45b5af9f72c805ee668d1479480ca85169312211bed6ed18c343e39307d5f"},
{file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af4a12b459a273b0b34679a5c3dc5e34c1847c3dd14a628aa0668e19e638ea2"},
{file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a735f82d2e3ed47ca01a20dfc4c779b966b16352650a8036ab3955aad151ed8a"},
{file = "zope.interface-7.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:5501e772aff595e3c54266bc1bfc5858e8f38974ce413a8f1044aae0f32a83a3"},
{file = "zope.interface-7.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec59fe53db7d32abb96c6d4efeed84aab4a7c38c62d7a901a9b20c09dd936e7a"},
{file = "zope.interface-7.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e53c291debef523b09e1fe3dffe5f35dde164f1c603d77f770b88a1da34b7ed6"},
{file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:711eebc77f2092c6a8b304bad0b81a6ce3cf5490b25574e7309fbc07d881e3af"},
{file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a00ead2e24c76436e1b457a5132d87f83858330f6c923640b7ef82d668525d1"},
{file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e28ea0bc4b084fc93a483877653a033062435317082cdc6388dec3438309faf"},
{file = "zope.interface-7.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:27cfb5205d68b12682b6e55ab8424662d96e8ead19550aad0796b08dd2c9a45e"},
{file = "zope.interface-7.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e3e48f3dea21c147e1b10c132016cb79af1159facca9736d231694ef5a740a8"},
{file = "zope.interface-7.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a99240b1d02dc469f6afbe7da1bf617645e60290c272968f4e53feec18d7dce8"},
{file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc8a318162123eddbdf22fcc7b751288ce52e4ad096d3766ff1799244352449d"},
{file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7b25db127db3e6b597c5f74af60309c4ad65acd826f89609662f0dc33a54728"},
{file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a29ac607e970b5576547f0e3589ec156e04de17af42839eedcf478450687317"},
{file = "zope.interface-7.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:a14c9decf0eb61e0892631271d500c1e306c7b6901c998c7035e194d9150fdd1"},
{file = "zope_interface-7.1.0.tar.gz", hash = "sha256:3f005869a1a05e368965adb2075f97f8ee9a26c61898a9e52a9764d93774f237"},
]
[package.dependencies]
setuptools = "*"
[package.extras]
docs = ["Sphinx", "repoze.sphinx.autointerface"]
test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"]
test = ["coverage[toml]", "zope.event", "zope.testing"]
testing = ["coverage[toml]", "zope.event", "zope.testing"]
[[package]]
name = "zope-schema"
@ -3113,5 +3100,5 @@ user-search = ["pyicu"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8.0"
content-hash = "c8a22f901970b2f851151e731532757fd3acf7ba02930952636d2e6c5c9c0c90"
python-versions = "^3.9.0"
content-hash = "d71159b19349fdc0b7cd8e06e8c8778b603fc37b941c6df34ddc31746783d94d"

View file

@ -36,7 +36,7 @@
[tool.ruff]
line-length = 88
target-version = "py38"
target-version = "py39"
[tool.ruff.lint]
# See https://beta.ruff.rs/docs/rules/#error-e
@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust"
[tool.poetry]
name = "matrix-synapse"
version = "1.117.0rc1"
version = "1.119.0rc2"
description = "Homeserver for the Matrix decentralised comms protocol"
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
license = "AGPL-3.0-or-later"
@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main"
update_synapse_database = "synapse._scripts.update_synapse_database:main"
[tool.poetry.dependencies]
python = "^3.8.0"
python = "^3.9.0"
# Mandatory Dependencies
# ----------------------
@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"}
treq = ">=15.1"
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
pyOpenSSL = ">=16.0.0"
PyYAML = ">=3.13"
PyYAML = ">=5.3"
pyasn1 = ">=0.1.9"
pyasn1-modules = ">=0.0.7"
bcrypt = ">=3.1.7"
@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true }
# `contrib/systemd/log_config.yaml`.
# Note: systemd-python 231 appears to have been yanked from pypi
systemd-python = { version = ">=231", optional = true }
lxml = { version = ">=4.2.0", optional = true }
lxml = { version = ">=4.5.2", optional = true }
sentry-sdk = { version = ">=0.7.2", optional = true }
opentracing = { version = ">=2.2.0", optional = true }
jaeger-client = { version = ">=4.0.0", optional = true }
@ -320,7 +320,7 @@ all = [
# failing on new releases. Keeping lower bounds loose here means that dependabot
# can bump versions without having to update the content-hash in the lockfile.
# This helps prevents merge conflicts when running a batch of dependabot updates.
ruff = "0.6.9"
ruff = "0.7.3"
# Type checking only works with the pydantic.v1 compat module from pydantic v2
pydantic = "^2"
@ -370,7 +370,7 @@ tomli = ">=1.2.3"
# runtime errors caused by build system changes.
# We are happy to raise these upper bounds upon request,
# provided we check that it's safe to do so (i.e. that CI passes).
requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"]
requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.8.1"]
build-backend = "poetry.core.masonry.api"
@ -378,13 +378,13 @@ build-backend = "poetry.core.masonry.api"
# Skip unsupported platforms (by us or by Rust).
# See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets.
# We skip:
# - CPython 3.6 and 3.7: EOLed
# - PyPy 3.7: we only support Python 3.8+
# - CPython 3.6, 3.7 and 3.8: EOLed
# - PyPy 3.7 and 3.8: we only support Python 3.9+
# - musllinux i686: excluded to reduce number of wheels we build.
# c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677
# - PyPy on Aarch64 and musllinux on aarch64: too slow to build.
# c.f. https://github.com/matrix-org/synapse/pull/14259
skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64"
# We need a rust compiler
before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal"

View file

@ -60,6 +60,7 @@ fn bench_match_exact(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@ -105,6 +106,7 @@ fn bench_match_word(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@ -150,6 +152,7 @@ fn bench_match_word_miss(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@ -195,6 +198,7 @@ fn bench_eval_message(b: &mut Bencher) {
true,
vec![],
false,
false,
)
.unwrap();
@ -205,6 +209,7 @@ fn bench_eval_message(b: &mut Bencher) {
false,
false,
false,
false,
);
b.iter(|| eval.run(&rules, Some("bob"), Some("person")));

View file

@ -81,7 +81,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[
))]),
actions: Cow::Borrowed(&[Action::Notify]),
default: true,
default_enabled: false,
default_enabled: true,
},
PushRule {
rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"),

View file

@ -105,6 +105,9 @@ pub struct PushRuleEvaluator {
/// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
/// flag as MSC1767 (extensible events core).
msc3931_enabled: bool,
// If MSC4210 (remove legacy mentions) is enabled.
msc4210_enabled: bool,
}
#[pymethods]
@ -122,6 +125,7 @@ impl PushRuleEvaluator {
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
msc4210_enabled,
))]
pub fn py_new(
flattened_keys: BTreeMap<String, JsonValue>,
@ -133,6 +137,7 @@ impl PushRuleEvaluator {
related_event_match_enabled: bool,
room_version_feature_flags: Vec<String>,
msc3931_enabled: bool,
msc4210_enabled: bool,
) -> Result<Self, Error> {
let body = match flattened_keys.get("content.body") {
Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(),
@ -150,6 +155,7 @@ impl PushRuleEvaluator {
related_event_match_enabled,
room_version_feature_flags,
msc3931_enabled,
msc4210_enabled,
})
}
@ -176,7 +182,8 @@ impl PushRuleEvaluator {
// For backwards-compatibility the legacy mention rules are disabled
// if the event contains the 'm.mentions' property.
if self.has_mentions
// Additionally, MSC4210 always disables the legacy rules.
if (self.has_mentions || self.msc4210_enabled)
&& (rule_id == "global/override/.m.rule.contains_display_name"
|| rule_id == "global/content/.m.rule.contains_user_name"
|| rule_id == "global/override/.m.rule.roomnotif")
@ -526,6 +533,7 @@ fn push_rule_evaluator() {
true,
vec![],
true,
false,
)
.unwrap();
@ -555,6 +563,7 @@ fn test_requires_room_version_supports_condition() {
false,
flags,
true,
false,
)
.unwrap();
@ -582,7 +591,7 @@ fn test_requires_room_version_supports_condition() {
};
let rules = PushRules::new(vec![custom_rule]);
result = evaluator.run(
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false),
&FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false),
None,
None,
);

View file

@ -534,6 +534,7 @@ pub struct FilteredPushRules {
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc4210_enabled: bool,
}
#[pymethods]
@ -546,6 +547,7 @@ impl FilteredPushRules {
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc4210_enabled: bool,
) -> Self {
Self {
push_rules,
@ -554,6 +556,7 @@ impl FilteredPushRules {
msc3381_polls_enabled,
msc3664_enabled,
msc4028_push_encrypted_events,
msc4210_enabled,
}
}
@ -596,6 +599,14 @@ impl FilteredPushRules {
return false;
}
if self.msc4210_enabled
&& (rule.rule_id == "global/override/.m.rule.contains_display_name"
|| rule.rule_id == "global/content/.m.rule.contains_user_name"
|| rule.rule_id == "global/override/.m.rule.roomnotif")
{
return false;
}
true
})
.map(|r| {

View file

@ -28,12 +28,11 @@ from typing import Collection, Optional, Sequence, Set
# example)
DISTS = (
"debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05)
"debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14)
"debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24)
"debian:sid", # (rolling distro, no EOL)
"ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04)
"ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24)
"ubuntu:noble", # 24.04 LTS (EOL 2029-06)
"ubuntu:oracular", # 24.10 (EOL 2025-07)
"debian:trixie", # (EOL not specified yet)
)

View file

@ -360,7 +360,7 @@ def is_cacheable(
# For a type alias, check if the underlying real type is cachable.
return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose)
elif isinstance(rt, UninhabitedType) and rt.is_noreturn:
elif isinstance(rt, UninhabitedType):
# There is no return value, just consider it cachable. This is only used
# in tests.
return True, None

View file

@ -40,7 +40,7 @@ import commonmark
import git
from click.exceptions import ClickException
from git import GitCommandError, Repo
from github import Github
from github import BadCredentialsException, Github
from packaging import version
@ -323,10 +323,8 @@ def tag(gh_token: Optional[str]) -> None:
def _tag(gh_token: Optional[str]) -> None:
"""Tags the release and generates a draft GitHub release"""
if gh_token:
# Test that the GH Token is valid before continuing.
gh = Github(gh_token)
gh.get_user()
# Test that the GH Token is valid before continuing.
check_valid_gh_token(gh_token)
# Make sure we're in a git repo.
repo = get_repo_and_check_clean_checkout()
@ -469,10 +467,8 @@ def upload(gh_token: Optional[str]) -> None:
def _upload(gh_token: Optional[str]) -> None:
"""Upload release to pypi."""
if gh_token:
# Test that the GH Token is valid before continuing.
gh = Github(gh_token)
gh.get_user()
# Test that the GH Token is valid before continuing.
check_valid_gh_token(gh_token)
current_version = get_package_version()
tag_name = f"v{current_version}"
@ -569,10 +565,8 @@ def wait_for_actions(gh_token: Optional[str]) -> None:
def _wait_for_actions(gh_token: Optional[str]) -> None:
if gh_token:
# Test that the GH Token is valid before continuing.
gh = Github(gh_token)
gh.get_user()
# Test that the GH Token is valid before continuing.
check_valid_gh_token(gh_token)
# Find out the version and tag name.
current_version = get_package_version()
@ -806,6 +800,22 @@ def get_repo_and_check_clean_checkout(
return repo
def check_valid_gh_token(gh_token: Optional[str]) -> None:
"""Check that a github token is valid, if supplied"""
if not gh_token:
# No github token supplied, so nothing to do.
return
try:
gh = Github(gh_token)
# We need to lookup name to trigger a request.
_name = gh.get_user().name
except BadCredentialsException as e:
raise click.ClickException(f"Github credentials are bad: {e}")
def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]:
"""Find the branch/ref, looking first locally then in the remote."""
if ref_name in repo.references:

View file

@ -39,8 +39,8 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True
# Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the
# if-statement completely.
py_version = sys.version_info
if py_version < (3, 8):
print("Synapse requires Python 3.8 or above.")
if py_version < (3, 9):
print("Synapse requires Python 3.9 or above.")
sys.exit(1)
# Allow using the asyncio reactor via env var.

View file

@ -447,3 +447,9 @@ class ExperimentalConfig(Config):
# MSC4151: Report room API (Client-Server API)
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
# MSC4210: Remove legacy mentions
self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False)
# MSC4222: Adding `state_after` to sync v2
self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False)

View file

@ -113,7 +113,7 @@ class Authenticator:
):
raise AuthenticationError(
HTTPStatus.UNAUTHORIZED,
"Destination mismatch in auth header",
f"Destination mismatch in auth header, received: {destination!r}",
Codes.UNAUTHORIZED,
)
if (

View file

@ -73,6 +73,8 @@ class AdminHandler:
self._redact_all_events, REDACT_ALL_EVENTS_ACTION_NAME
)
self.hs = hs
async def get_redact_task(self, redact_id: str) -> Optional[ScheduledTask]:
"""Get the current status of an active redaction process
@ -423,8 +425,10 @@ class AdminHandler:
user_id = task.params.get("user_id")
assert user_id is not None
# puppet the user if they're ours, otherwise use admin to redact
requester = create_requester(
user_id, authenticated_entity=admin.user.to_string()
user_id if self.hs.is_mine_id(user_id) else admin.user.to_string(),
authenticated_entity=admin.user.to_string(),
)
reason = task.params.get("reason")

View file

@ -615,7 +615,7 @@ class E2eKeysHandler:
3. Attempt to fetch fallback keys from the database.
Args:
local_query: An iterable of tuples of (user ID, device ID, algorithm).
local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
always_include_fallback_keys: True to always include fallback keys.
Returns:

View file

@ -196,7 +196,9 @@ class MessageHandler:
AuthError (403) if the user doesn't have permission to view
members of this room.
"""
state_filter = state_filter or StateFilter.all()
if state_filter is None:
state_filter = StateFilter.all()
user_id = requester.user.to_string()
if at_token:

View file

@ -1190,6 +1190,26 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
origin_server_ts=origin_server_ts,
)
async def check_for_any_membership_in_room(
self, *, user_id: str, room_id: str
) -> None:
"""
Check if the user has any membership in the room and raise error if not.
Args:
user_id: The user to check.
room_id: The room to check.
Raises:
AuthError if the user doesn't have any membership in the room.
"""
result = await self.store.get_local_current_membership_for_user_in_room(
user_id=user_id, room_id=room_id
)
if result is None or result == (None, None):
raise AuthError(403, f"User {user_id} has no membership in room {room_id}")
async def _should_perform_remote_join(
self,
user_id: str,

View file

@ -12,9 +12,10 @@
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
import itertools
import logging
from itertools import chain
from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, Tuple
from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple
from prometheus_client import Histogram
from typing_extensions import assert_never
@ -79,6 +80,15 @@ sync_processing_time = Histogram(
["initial"],
)
# Limit the number of state_keys we should remember sending down the connection for each
# (room_id, user_id). We don't want to store and pull out too much data in the database.
#
# 100 is an arbitrary but small-ish number. The idea is that we probably won't send down
# too many redundant member state events (that the client already knows about) for a
# given ongoing conversation if we keep 100 around. Most rooms don't have 100 members
# anyway and it takes a while to cycle through 100 members.
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER = 100
class SlidingSyncHandler:
def __init__(self, hs: "HomeServer"):
@ -452,13 +462,11 @@ class SlidingSyncHandler:
to_token=to_token,
)
event_map = await self.store.get_events(list(state_ids.values()))
events = await self.store.get_events_as_list(list(state_ids.values()))
state_map = {}
for key, event_id in state_ids.items():
event = event_map.get(event_id)
if event:
state_map[key] = event
for event in events:
state_map[(event.type, event.state_key)] = event
return state_map
@ -522,6 +530,8 @@ class SlidingSyncHandler:
state_reset_out_of_room = True
prev_room_sync_config = previous_connection_state.room_configs.get(room_id)
# Determine whether we should limit the timeline to the token range.
#
# We should return historical messages (before token range) in the
@ -550,7 +560,6 @@ class SlidingSyncHandler:
# or `limited` mean for clients that interpret them correctly. In future this
# behavior is almost certainly going to change.
#
# TODO: Also handle changes to `required_state`
from_bound = None
initial = True
ignore_timeline_bound = False
@ -571,7 +580,6 @@ class SlidingSyncHandler:
log_kv({"sliding_sync.room_status": room_status})
prev_room_sync_config = previous_connection_state.room_configs.get(room_id)
if prev_room_sync_config is not None:
# Check if the timeline limit has increased, if so ignore the
# timeline bound and record the change (see "XXX: Odd behavior"
@ -582,8 +590,6 @@ class SlidingSyncHandler:
):
ignore_timeline_bound = True
# TODO: Check for changes in `required_state``
log_kv(
{
"sliding_sync.from_bound": from_bound,
@ -877,6 +883,14 @@ class SlidingSyncHandler:
#
# Calculate the `StateFilter` based on the `required_state` for the room
required_state_filter = StateFilter.none()
# The requested `required_state_map` with the lazy membership expanded and
# `$ME` replaced with the user's ID. This allows us to see what membership we've
# sent down to the client in the next request.
#
# Make a copy so we can modify it. Still need to be careful to make a copy of
# the state key sets if we want to add/remove from them. We could make a deep
# copy but this saves us some work.
expanded_required_state_map = dict(room_sync_config.required_state_map)
if room_membership_for_user_at_to_token.membership not in (
Membership.INVITE,
Membership.KNOCK,
@ -942,21 +956,48 @@ class SlidingSyncHandler:
):
lazy_load_room_members = True
# Everyone in the timeline is relevant
#
# FIXME: We probably also care about invite, ban, kick, targets, etc
# but the spec only mentions "senders".
timeline_membership: Set[str] = set()
if timeline_events is not None:
for timeline_event in timeline_events:
timeline_membership.add(timeline_event.sender)
# Update the required state filter so we pick up the new
# membership
for user_id in timeline_membership:
required_state_types.append(
(EventTypes.Member, user_id)
)
# FIXME: We probably also care about invite, ban, kick, targets, etc
# but the spec only mentions "senders".
# Add an explicit entry for each user in the timeline
#
# Make a new set or copy of the state key set so we can
# modify it without affecting the original
# `required_state_map`
expanded_required_state_map[EventTypes.Member] = (
expanded_required_state_map.get(
EventTypes.Member, set()
)
| timeline_membership
)
elif state_key == StateValues.ME:
num_others += 1
required_state_types.append((state_type, user.to_string()))
# Replace `$ME` with the user's ID so we can deduplicate
# when someone requests the same state with `$ME` or with
# their user ID.
#
# Make a new set or copy of the state key set so we can
# modify it without affecting the original
# `required_state_map`
expanded_required_state_map[EventTypes.Member] = (
expanded_required_state_map.get(
EventTypes.Member, set()
)
| {user.to_string()}
)
else:
num_others += 1
required_state_types.append((state_type, state_key))
@ -997,6 +1038,10 @@ class SlidingSyncHandler:
include_others=required_state_filter.include_others,
)
# The required state map to store in the room sync config, if it has
# changed.
changed_required_state_map: Optional[Mapping[str, AbstractSet[str]]] = None
# We can return all of the state that was requested if this was the first
# time we've sent the room down this connection.
room_state: StateMap[EventBase] = {}
@ -1010,6 +1055,29 @@ class SlidingSyncHandler:
else:
assert from_bound is not None
if prev_room_sync_config is not None:
# Check if there are any changes to the required state config
# that we need to handle.
changed_required_state_map, added_state_filter = (
_required_state_changes(
user.to_string(),
prev_required_state_map=prev_room_sync_config.required_state_map,
request_required_state_map=expanded_required_state_map,
state_deltas=room_state_delta_id_map,
)
)
if added_state_filter:
# Some state entries got added, so we pull out the current
# state for them. If we don't do this we'd only send down new deltas.
state_ids = await self.get_current_state_ids_at(
room_id=room_id,
room_membership_for_user_at_to_token=room_membership_for_user_at_to_token,
state_filter=added_state_filter,
to_token=to_token,
)
room_state_delta_id_map.update(state_ids)
events = await self.store.get_events(
state_filter.filter_state(room_state_delta_id_map).values()
)
@ -1108,10 +1176,15 @@ class SlidingSyncHandler:
# sensible order again.
bump_stamp = 0
unstable_expanded_timeline = False
prev_room_sync_config = previous_connection_state.room_configs.get(room_id)
room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = (
expanded_required_state_map
)
if changed_required_state_map:
room_sync_required_state_map_to_persist = changed_required_state_map
# Record the `room_sync_config` if we're `ignore_timeline_bound` (which means
# that the `timeline_limit` has increased)
unstable_expanded_timeline = False
if ignore_timeline_bound:
# FIXME: We signal the fact that we're sending down more events to
# the client by setting `unstable_expanded_timeline` to true (see
@ -1120,7 +1193,7 @@ class SlidingSyncHandler:
new_connection_state.room_configs[room_id] = RoomSyncConfig(
timeline_limit=room_sync_config.timeline_limit,
required_state_map=room_sync_config.required_state_map,
required_state_map=room_sync_required_state_map_to_persist,
)
elif prev_room_sync_config is not None:
# If the result is `limited` then we need to record that the
@ -1149,13 +1222,20 @@ class SlidingSyncHandler:
):
new_connection_state.room_configs[room_id] = RoomSyncConfig(
timeline_limit=room_sync_config.timeline_limit,
required_state_map=room_sync_config.required_state_map,
required_state_map=room_sync_required_state_map_to_persist,
)
# TODO: Record changes in required_state.
elif changed_required_state_map is not None:
new_connection_state.room_configs[room_id] = RoomSyncConfig(
timeline_limit=room_sync_config.timeline_limit,
required_state_map=room_sync_required_state_map_to_persist,
)
else:
new_connection_state.room_configs[room_id] = room_sync_config
new_connection_state.room_configs[room_id] = RoomSyncConfig(
timeline_limit=room_sync_config.timeline_limit,
required_state_map=room_sync_required_state_map_to_persist,
)
set_tag(SynapseTags.RESULT_PREFIX + "initial", initial)
@ -1285,3 +1365,231 @@ class SlidingSyncHandler:
return new_bump_event_pos.stream
return None
def _required_state_changes(
user_id: str,
*,
prev_required_state_map: Mapping[str, AbstractSet[str]],
request_required_state_map: Mapping[str, AbstractSet[str]],
state_deltas: StateMap[str],
) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]:
"""Calculates the changes between the required state room config from the
previous requests compared with the current request.
This does two things. First, it calculates if we need to update the room
config due to changes to required state. Secondly, it works out which state
entries we need to pull from current state and return due to the state entry
now appearing in the required state when it previously wasn't (on top of the
state deltas).
This function tries to ensure to handle the case where a state entry is
added, removed and then added again to the required state. In that case we
only want to re-send that entry down sync if it has changed.
Returns:
A 2-tuple of updated required state config (or None if there is no update)
and the state filter to use to fetch extra current state that we need to
return.
"""
if prev_required_state_map == request_required_state_map:
# There has been no change. Return immediately.
return None, StateFilter.none()
prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set())
request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set())
# If we were previously fetching everything ("*", "*"), always update the effective
# room required state config to match the request. And since we we're previously
# already fetching everything, we don't have to fetch anything now that they've
# narrowed.
if StateValues.WILDCARD in prev_wildcard:
return request_required_state_map, StateFilter.none()
# If a event type wildcard has been added or removed we don't try and do
# anything fancy, and instead always update the effective room required
# state config to match the request.
if request_wildcard - prev_wildcard:
# Some keys were added, so we need to fetch everything
return request_required_state_map, StateFilter.all()
if prev_wildcard - request_wildcard:
# Keys were only removed, so we don't have to fetch everything.
return request_required_state_map, StateFilter.none()
# Contains updates to the required state map compared with the previous room
# config. This has the same format as `RoomSyncConfig.required_state`
changes: Dict[str, AbstractSet[str]] = {}
# The set of types/state keys that we need to fetch and return to the
# client. Passed to `StateFilter.from_types(...)`
added: List[Tuple[str, Optional[str]]] = []
# Convert the list of state deltas to map from type to state_keys that have
# changed.
changed_types_to_state_keys: Dict[str, Set[str]] = {}
for event_type, state_key in state_deltas:
changed_types_to_state_keys.setdefault(event_type, set()).add(state_key)
# First we calculate what, if anything, has been *added*.
for event_type in (
prev_required_state_map.keys() | request_required_state_map.keys()
):
old_state_keys = prev_required_state_map.get(event_type, set())
request_state_keys = request_required_state_map.get(event_type, set())
changed_state_keys = changed_types_to_state_keys.get(event_type, set())
if old_state_keys == request_state_keys:
# No change to this type
continue
if not request_state_keys - old_state_keys:
# Nothing *added*, so we skip. Removals happen below.
continue
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
invalidated_state_keys = (
old_state_keys - request_state_keys
) & changed_state_keys
# Figure out which state keys we should remember sending down the connection
inheritable_previous_state_keys = (
# Retain the previous state_keys that we've sent down before.
# Wildcard and lazy state keys are not sticky from previous requests.
(old_state_keys - {StateValues.WILDCARD, StateValues.LAZY})
- invalidated_state_keys
)
# Always update changes to include the newly added keys (we've expanded the set
# of state keys), use the new requested set with whatever hasn't been
# invalidated from the previous set.
changes[event_type] = request_state_keys | inheritable_previous_state_keys
# Limit the number of state_keys we should remember sending down the connection
# for each (room_id, user_id). We don't want to store and pull out too much data
# in the database. This is a happy-medium between remembering nothing and
# everything. We can avoid sending redundant state down the connection most of
# the time given that most rooms don't have 100 members anyway and it takes a
# while to cycle through 100 members.
#
# Only remember up to (MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER)
if len(changes[event_type]) > MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
# Reset back to only the requested state keys
changes[event_type] = request_state_keys
# Skip if there isn't any room to fill in the rest with previous state keys
if len(request_state_keys) < MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER:
# Fill the rest with previous state_keys. Ideally, we could sort
# these by recency but it's just a set so just pick an arbitrary
# subset (good enough).
changes[event_type] = changes[event_type] | set(
itertools.islice(
inheritable_previous_state_keys,
# Just taking the difference isn't perfect as there could be
# overlap in the keys between the requested and previous but we
# will decide to just take the easy route for now and avoid
# additional set operations to figure it out.
MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER
- len(request_state_keys),
)
)
if StateValues.WILDCARD in old_state_keys:
# We were previously fetching everything for this type, so we don't need to
# fetch anything new.
continue
# Record the new state keys to fetch for this type.
if StateValues.WILDCARD in request_state_keys:
# If we have added a wildcard then we always just fetch everything.
added.append((event_type, None))
else:
for state_key in request_state_keys - old_state_keys:
if state_key == StateValues.ME:
added.append((event_type, user_id))
elif state_key == StateValues.LAZY:
# We handle lazy loading separately (outside this function),
# so don't need to explicitly add anything here.
#
# LAZY values should also be ignore for event types that are
# not membership.
pass
else:
added.append((event_type, state_key))
added_state_filter = StateFilter.from_types(added)
# Figure out what changes we need to apply to the effective required state
# config.
for event_type, changed_state_keys in changed_types_to_state_keys.items():
old_state_keys = prev_required_state_map.get(event_type, set())
request_state_keys = request_required_state_map.get(event_type, set())
if old_state_keys == request_state_keys:
# No change.
continue
# If we see the `user_id` as a state_key, also add "$ME" to the list of state
# that has changed to account for people requesting `required_state` with `$ME`
# or their user ID.
if user_id in changed_state_keys:
changed_state_keys.add(StateValues.ME)
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
invalidated_state_keys = (
old_state_keys - request_state_keys
) & changed_state_keys
# We've expanded the set of state keys, ... (already handled above)
if request_state_keys - old_state_keys:
continue
old_state_key_wildcard = StateValues.WILDCARD in old_state_keys
request_state_key_wildcard = StateValues.WILDCARD in request_state_keys
if old_state_key_wildcard != request_state_key_wildcard:
# If a state_key wildcard has been added or removed, we always update the
# effective room required state config to match the request.
changes[event_type] = request_state_keys
continue
if event_type == EventTypes.Member:
old_state_key_lazy = StateValues.LAZY in old_state_keys
request_state_key_lazy = StateValues.LAZY in request_state_keys
if old_state_key_lazy != request_state_key_lazy:
# If a "$LAZY" has been added or removed we always update the effective room
# required state config to match the request.
changes[event_type] = request_state_keys
continue
# At this point there are no wildcards and no additions to the set of
# state keys requested, only deletions.
#
# We only remove state keys from the effective state if they've been
# removed from the request *and* the state has changed. This ensures
# that if a client removes and then re-adds a state key, we only send
# down the associated current state event if its changed (rather than
# sending down the same event twice).
if invalidated_state_keys:
changes[event_type] = old_state_keys - invalidated_state_keys
if changes:
# Update the required state config based on the changes.
new_required_state_map = dict(prev_required_state_map)
for event_type, state_keys in changes.items():
if state_keys:
new_required_state_map[event_type] = state_keys
else:
# Remove entries with empty state keys.
new_required_state_map.pop(event_type, None)
return new_required_state_map, added_state_filter
else:
return None, added_state_filter

View file

@ -49,7 +49,10 @@ from synapse.types.handlers.sliding_sync import (
SlidingSyncConfig,
SlidingSyncResult,
)
from synapse.util.async_helpers import concurrently_execute
from synapse.util.async_helpers import (
concurrently_execute,
gather_optional_coroutines,
)
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -97,26 +100,26 @@ class SlidingSyncExtensionHandler:
if sync_config.extensions is None:
return SlidingSyncResult.Extensions()
to_device_response = None
to_device_coro = None
if sync_config.extensions.to_device is not None:
to_device_response = await self.get_to_device_extension_response(
to_device_coro = self.get_to_device_extension_response(
sync_config=sync_config,
to_device_request=sync_config.extensions.to_device,
to_token=to_token,
)
e2ee_response = None
e2ee_coro = None
if sync_config.extensions.e2ee is not None:
e2ee_response = await self.get_e2ee_extension_response(
e2ee_coro = self.get_e2ee_extension_response(
sync_config=sync_config,
e2ee_request=sync_config.extensions.e2ee,
to_token=to_token,
from_token=from_token,
)
account_data_response = None
account_data_coro = None
if sync_config.extensions.account_data is not None:
account_data_response = await self.get_account_data_extension_response(
account_data_coro = self.get_account_data_extension_response(
sync_config=sync_config,
previous_connection_state=previous_connection_state,
new_connection_state=new_connection_state,
@ -127,9 +130,9 @@ class SlidingSyncExtensionHandler:
from_token=from_token,
)
receipts_response = None
receipts_coro = None
if sync_config.extensions.receipts is not None:
receipts_response = await self.get_receipts_extension_response(
receipts_coro = self.get_receipts_extension_response(
sync_config=sync_config,
previous_connection_state=previous_connection_state,
new_connection_state=new_connection_state,
@ -141,9 +144,9 @@ class SlidingSyncExtensionHandler:
from_token=from_token,
)
typing_response = None
typing_coro = None
if sync_config.extensions.typing is not None:
typing_response = await self.get_typing_extension_response(
typing_coro = self.get_typing_extension_response(
sync_config=sync_config,
actual_lists=actual_lists,
actual_room_ids=actual_room_ids,
@ -153,6 +156,20 @@ class SlidingSyncExtensionHandler:
from_token=from_token,
)
(
to_device_response,
e2ee_response,
account_data_response,
receipts_response,
typing_response,
) = await gather_optional_coroutines(
to_device_coro,
e2ee_coro,
account_data_coro,
receipts_coro,
typing_coro,
)
return SlidingSyncResult.Extensions(
to_device=to_device_response,
e2ee=e2ee_response,

View file

@ -143,6 +143,7 @@ class SyncConfig:
filter_collection: FilterCollection
is_guest: bool
device_id: Optional[str]
use_state_after: bool
@attr.s(slots=True, frozen=True, auto_attribs=True)
@ -1141,6 +1142,7 @@ class SyncHandler:
since_token: Optional[StreamToken],
end_token: StreamToken,
full_state: bool,
joined: bool,
) -> MutableStateMap[EventBase]:
"""Works out the difference in state between the end of the previous sync and
the start of the timeline.
@ -1155,6 +1157,7 @@ class SyncHandler:
the point just after their leave event.
full_state: Whether to force returning the full state.
`lazy_load_members` still applies when `full_state` is `True`.
joined: whether the user is currently joined to the room
Returns:
The state to return in the sync response for the room.
@ -1230,11 +1233,12 @@ class SyncHandler:
if full_state:
state_ids = await self._compute_state_delta_for_full_sync(
room_id,
sync_config.user,
sync_config,
batch,
end_token,
members_to_fetch,
timeline_state,
joined,
)
else:
# If this is an initial sync then full_state should be set, and
@ -1244,6 +1248,7 @@ class SyncHandler:
state_ids = await self._compute_state_delta_for_incremental_sync(
room_id,
sync_config,
batch,
since_token,
end_token,
@ -1316,20 +1321,24 @@ class SyncHandler:
async def _compute_state_delta_for_full_sync(
self,
room_id: str,
syncing_user: UserID,
sync_config: SyncConfig,
batch: TimelineBatch,
end_token: StreamToken,
members_to_fetch: Optional[Set[str]],
timeline_state: StateMap[str],
joined: bool,
) -> StateMap[str]:
"""Calculate the state events to be included in a full sync response.
As with `_compute_state_delta_for_incremental_sync`, the result will include
the membership events for the senders of each event in `members_to_fetch`.
Note that whether this returns the state at the start or the end of the
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
Args:
room_id: The room we are calculating for.
syncing_user: The user that is calling `/sync`.
sync_confg: The user that is calling `/sync`.
batch: The timeline batch for the room that will be sent to the user.
end_token: Token of the end of the current batch. Normally this will be
the same as the global "now_token", but if the user has left the room,
@ -1338,10 +1347,11 @@ class SyncHandler:
events in the timeline.
timeline_state: The contribution to the room state from state events in
`batch`. Only contains the last event for any given state key.
joined: whether the user is currently joined to the room
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` part of the sync response.
should be included in the `state` or `state_after` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading of membership events is enabled.
@ -1359,7 +1369,7 @@ class SyncHandler:
# is no guarantee that our membership will be in the auth events of
# timeline events when the room is partial stated.
state_filter = StateFilter.from_lazy_load_member_list(
members_to_fetch.union((syncing_user.to_string(),))
members_to_fetch.union((sync_config.user.to_string(),))
)
# We are happy to use partial state to compute the `/sync` response.
@ -1373,6 +1383,61 @@ class SyncHandler:
await_full_state = True
lazy_load_members = False
# Check if we are wanting to return the state at the start or end of the
# timeline. If at the end we can just use the current state.
if sync_config.use_state_after:
# If we're getting the state at the end of the timeline, we can just
# use the current state of the room (and roll back any changes
# between when we fetched the current state and `end_token`).
#
# For rooms we're not joined to, there might be a very large number
# of deltas between `end_token` and "now", and so instead we fetch
# the state at the end of the timeline.
if joined:
state_ids = await self._state_storage_controller.get_current_state_ids(
room_id,
state_filter=state_filter,
await_full_state=await_full_state,
)
# Now roll back the state by looking at the state deltas between
# end_token and now.
deltas = await self.store.get_current_state_deltas_for_room(
room_id,
from_token=end_token.room_key,
to_token=self.store.get_room_max_token(),
)
if deltas:
mutable_state_ids = dict(state_ids)
# We iterate over the deltas backwards so that if there are
# multiple changes of the same type/state_key we'll
# correctly pick the earliest delta.
for delta in reversed(deltas):
if delta.prev_event_id:
mutable_state_ids[(delta.event_type, delta.state_key)] = (
delta.prev_event_id
)
elif (delta.event_type, delta.state_key) in mutable_state_ids:
mutable_state_ids.pop((delta.event_type, delta.state_key))
state_ids = mutable_state_ids
return state_ids
else:
# Just use state groups to get the state at the end of the
# timeline, i.e. the state at the leave/etc event.
state_at_timeline_end = (
await self._state_storage_controller.get_state_ids_at(
room_id,
stream_position=end_token,
state_filter=state_filter,
await_full_state=await_full_state,
)
)
return state_at_timeline_end
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
room_id,
stream_position=end_token,
@ -1405,6 +1470,7 @@ class SyncHandler:
async def _compute_state_delta_for_incremental_sync(
self,
room_id: str,
sync_config: SyncConfig,
batch: TimelineBatch,
since_token: StreamToken,
end_token: StreamToken,
@ -1419,8 +1485,12 @@ class SyncHandler:
(`compute_state_delta`) is responsible for keeping track of which membership
events we have already sent to the client, and hence ripping them out.
Note that whether this returns the state at the start or the end of the
batch depends on `sync_config.use_state_after` (c.f. MSC4222).
Args:
room_id: The room we are calculating for.
sync_config
batch: The timeline batch for the room that will be sent to the user.
since_token: Token of the end of the previous batch.
end_token: Token of the end of the current batch. Normally this will be
@ -1433,7 +1503,7 @@ class SyncHandler:
Returns:
A map from (type, state_key) to event_id, for each event that we believe
should be included in the `state` part of the sync response.
should be included in the `state` or `state_after` part of the sync response.
"""
if members_to_fetch is not None:
# Lazy-loading is enabled. Only return the state that is needed.
@ -1445,6 +1515,51 @@ class SyncHandler:
await_full_state = True
lazy_load_members = False
# Check if we are wanting to return the state at the start or end of the
# timeline. If at the end we can just use the current state delta stream.
if sync_config.use_state_after:
delta_state_ids: MutableStateMap[str] = {}
if members_to_fetch:
# We're lazy-loading, so the client might need some more member
# events to understand the events in this timeline. So we always
# fish out all the member events corresponding to the timeline
# here. The caller will then dedupe any redundant ones.
member_ids = await self._state_storage_controller.get_current_state_ids(
room_id=room_id,
state_filter=StateFilter.from_types(
(EventTypes.Member, member) for member in members_to_fetch
),
await_full_state=await_full_state,
)
delta_state_ids.update(member_ids)
# We don't do LL filtering for incremental syncs - see
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
# N.B. this slows down incr syncs as we are now processing way more
# state in the server than if we were LLing.
#
# i.e. we return all state deltas, including membership changes that
# we'd normally exclude due to LL.
deltas = await self.store.get_current_state_deltas_for_room(
room_id=room_id,
from_token=since_token.room_key,
to_token=end_token.room_key,
)
for delta in deltas:
if delta.event_id is None:
# There was a state reset and this state entry is no longer
# present, but we have no way of informing the client about
# this, so we just skip it for now.
continue
# Note that deltas are in stream ordering, so if there are
# multiple deltas for a given type/state_key we'll always pick
# the latest one.
delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id
return delta_state_ids
# For a non-gappy sync if the events in the timeline are simply a linear
# chain (i.e. no merging/branching of the graph), then we know the state
# delta between the end of the previous sync and start of the new one is
@ -2867,6 +2982,7 @@ class SyncHandler:
since_token,
room_builder.end_token,
full_state=full_state,
joined=room_builder.rtype == "joined",
)
else:
# An out of band room won't have any state changes.

View file

@ -51,25 +51,17 @@ logger = logging.getLogger(__name__)
# "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616
# section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be
# consumed by the immediate recipient and not be forwarded on.
HOP_BY_HOP_HEADERS = {
"Connection",
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"TE",
"Trailers",
"Transfer-Encoding",
"Upgrade",
HOP_BY_HOP_HEADERS_LOWERCASE = {
"connection",
"keep-alive",
"proxy-authenticate",
"proxy-authorization",
"te",
"trailers",
"transfer-encoding",
"upgrade",
}
if hasattr(Headers, "_canonicalNameCaps"):
# Twisted < 24.7.0rc1
_canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined]
else:
# Twisted >= 24.7.0rc1
# But note that `_encodeName` still exists on prior versions,
# it just encodes differently
_canonicalHeaderName = Headers()._encodeName
assert all(header.lower() == header for header in HOP_BY_HOP_HEADERS_LOWERCASE)
def parse_connection_header_value(
@ -92,12 +84,12 @@ def parse_connection_header_value(
Returns:
The set of header names that should not be copied over from the remote response.
The keys are capitalized in canonical capitalization.
The keys are lowercased.
"""
extra_headers_to_remove: Set[str] = set()
if connection_header_value:
extra_headers_to_remove = {
_canonicalHeaderName(connection_option.strip()).decode("ascii")
connection_option.decode("ascii").strip().lower()
for connection_option in connection_header_value.split(b",")
}
@ -194,7 +186,7 @@ class ProxyResource(_AsyncResource):
# The `Connection` header also defines which headers should not be copied over.
connection_header = response_headers.getRawHeaders(b"connection")
extra_headers_to_remove = parse_connection_header_value(
extra_headers_to_remove_lowercase = parse_connection_header_value(
connection_header[0] if connection_header else None
)
@ -202,10 +194,10 @@ class ProxyResource(_AsyncResource):
for k, v in response_headers.getAllRawHeaders():
# Do not copy over any hop-by-hop headers. These are meant to only be
# consumed by the immediate recipient and not be forwarded on.
header_key = k.decode("ascii")
header_key_lowercase = k.decode("ascii").lower()
if (
header_key in HOP_BY_HOP_HEADERS
or header_key in extra_headers_to_remove
header_key_lowercase in HOP_BY_HOP_HEADERS_LOWERCASE
or header_key_lowercase in extra_headers_to_remove_lowercase
):
continue

View file

@ -39,7 +39,7 @@ from twisted.internet.endpoints import (
)
from twisted.internet.interfaces import (
IPushProducer,
IReactorTCP,
IReactorTime,
IStreamClientEndpoint,
)
from twisted.internet.protocol import Factory, Protocol
@ -113,7 +113,7 @@ class RemoteHandler(logging.Handler):
port: int,
maximum_buffer: int = 1000,
level: int = logging.NOTSET,
_reactor: Optional[IReactorTCP] = None,
_reactor: Optional[IReactorTime] = None,
):
super().__init__(level=level)
self.host = host

View file

@ -37,6 +37,7 @@ import warnings
from types import TracebackType
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Optional,
@ -850,6 +851,45 @@ def run_in_background(
return d
def run_coroutine_in_background(
coroutine: typing.Coroutine[Any, Any, R],
) -> "defer.Deferred[R]":
"""Run the coroutine, ensuring that the current context is restored after
return from the function, and that the sentinel context is set once the
deferred returned by the function completes.
Useful for wrapping coroutines that you don't yield or await on (for
instance because you want to pass it to deferred.gatherResults()).
This is a special case of `run_in_background` where we can accept a
coroutine directly rather than a function. We can do this because coroutines
do not run until called, and so calling an async function without awaiting
cannot change the log contexts.
"""
current = current_context()
d = defer.ensureDeferred(coroutine)
# The function may have reset the context before returning, so
# we need to restore it now.
ctx = set_current_context(current)
# The original context will be restored when the deferred
# completes, but there is nothing waiting for it, so it will
# get leaked into the reactor or some other function which
# wasn't expecting it. We therefore need to reset the context
# here.
#
# (If this feels asymmetric, consider it this way: we are
# effectively forking a new thread of execution. We are
# probably currently within a ``with LoggingContext()`` block,
# which is supposed to have a single entry and exit point. But
# by spawning off another deferred, we are effectively
# adding a new exit point.)
d.addBoth(_set_context_cb, ctx)
return d
T = TypeVar("T")

View file

@ -259,7 +259,7 @@ class MediaRepository:
"""
media = await self.store.get_local_media(media_id)
if media is None:
raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND)
raise NotFoundError("Unknown media ID")
if media.user_id != auth_user.to_string():
raise SynapseError(

View file

@ -436,6 +436,7 @@ class BulkPushRuleEvaluator:
self._related_event_match_enabled,
event.room_version.msc3931_push_features,
self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
self.hs.config.experimental.msc4210_enabled,
)
for uid, rules in rules_by_user.items():

View file

@ -43,12 +43,15 @@ class ExperimentalFeature(str, Enum):
MSC3881 = "msc3881"
MSC3575 = "msc3575"
MSC4222 = "msc4222"
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
if self is ExperimentalFeature.MSC3881:
return config.experimental.msc3881_enabled
if self is ExperimentalFeature.MSC3575:
return config.experimental.msc3575_enabled
if self is ExperimentalFeature.MSC4222:
return config.experimental.msc4222_enabled
assert_never(self)

View file

@ -20,11 +20,13 @@
#
import logging
import re
from http import HTTPStatus
from typing import TYPE_CHECKING, Tuple
from synapse._pydantic_compat import StrictStr
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
from synapse.api.urls import CLIENT_API_PREFIX
from synapse.http.server import HttpServer
from synapse.http.servlet import (
RestServlet,
@ -105,18 +107,17 @@ class ReportEventRestServlet(RestServlet):
class ReportRoomRestServlet(RestServlet):
"""This endpoint lets clients report a room for abuse.
Whilst MSC4151 is not yet merged, this unstable endpoint is enabled on matrix.org
for content moderation purposes, and therefore backwards compatibility should be
carefully considered when changing anything on this endpoint.
More details on the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/4151
Introduced by MSC4151: https://github.com/matrix-org/matrix-spec-proposals/pull/4151
"""
PATTERNS = client_patterns(
"/org.matrix.msc4151/rooms/(?P<room_id>[^/]*)/report$",
releases=[],
v1=False,
unstable=True,
# Cast the Iterable to a list so that we can `append` below.
PATTERNS = list(
client_patterns(
"/rooms/(?P<room_id>[^/]*)/report$",
releases=("v3",),
unstable=False,
v1=False,
)
)
def __init__(self, hs: "HomeServer"):
@ -126,6 +127,16 @@ class ReportRoomRestServlet(RestServlet):
self.clock = hs.get_clock()
self.store = hs.get_datastores().main
# TODO: Remove the unstable variant after 2-3 releases
# https://github.com/element-hq/synapse/issues/17373
if hs.config.experimental.msc4151_enabled:
self.PATTERNS.append(
re.compile(
f"^{CLIENT_API_PREFIX}/unstable/org.matrix.msc4151"
"/rooms/(?P<room_id>[^/]*)/report$"
)
)
class PostBody(RequestBodyModel):
reason: StrictStr
@ -153,6 +164,4 @@ class ReportRoomRestServlet(RestServlet):
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
ReportEventRestServlet(hs).register(http_server)
if hs.config.experimental.msc4151_enabled:
ReportRoomRestServlet(hs).register(http_server)
ReportRoomRestServlet(hs).register(http_server)

View file

@ -152,6 +152,14 @@ class SyncRestServlet(RestServlet):
filter_id = parse_string(request, "filter")
full_state = parse_boolean(request, "full_state", default=False)
use_state_after = False
if await self.store.is_feature_enabled(
user.to_string(), ExperimentalFeature.MSC4222
):
use_state_after = parse_boolean(
request, "org.matrix.msc4222.use_state_after", default=False
)
logger.debug(
"/sync: user=%r, timeout=%r, since=%r, "
"set_presence=%r, filter_id=%r, device_id=%r",
@ -184,6 +192,7 @@ class SyncRestServlet(RestServlet):
full_state,
device_id,
last_ignore_accdata_streampos,
use_state_after,
)
if filter_id is None:
@ -220,6 +229,7 @@ class SyncRestServlet(RestServlet):
filter_collection=filter_collection,
is_guest=requester.is_guest,
device_id=device_id,
use_state_after=use_state_after,
)
since_token = None
@ -258,7 +268,7 @@ class SyncRestServlet(RestServlet):
# We know that the the requester has an access token since appservices
# cannot use sync.
response_content = await self.encode_response(
time_now, sync_result, requester, filter_collection
time_now, sync_config, sync_result, requester, filter_collection
)
logger.debug("Event formatting complete")
@ -268,6 +278,7 @@ class SyncRestServlet(RestServlet):
async def encode_response(
self,
time_now: int,
sync_config: SyncConfig,
sync_result: SyncResult,
requester: Requester,
filter: FilterCollection,
@ -292,7 +303,7 @@ class SyncRestServlet(RestServlet):
)
joined = await self.encode_joined(
sync_result.joined, time_now, serialize_options
sync_config, sync_result.joined, time_now, serialize_options
)
invited = await self.encode_invited(
@ -304,7 +315,7 @@ class SyncRestServlet(RestServlet):
)
archived = await self.encode_archived(
sync_result.archived, time_now, serialize_options
sync_config, sync_result.archived, time_now, serialize_options
)
logger.debug("building sync response dict")
@ -372,6 +383,7 @@ class SyncRestServlet(RestServlet):
@trace_with_opname("sync.encode_joined")
async def encode_joined(
self,
sync_config: SyncConfig,
rooms: List[JoinedSyncResult],
time_now: int,
serialize_options: SerializeEventConfig,
@ -380,6 +392,7 @@ class SyncRestServlet(RestServlet):
Encode the joined rooms in a sync result
Args:
sync_config
rooms: list of sync results for rooms this user is joined to
time_now: current time - used as a baseline for age calculations
serialize_options: Event serializer options
@ -389,7 +402,11 @@ class SyncRestServlet(RestServlet):
joined = {}
for room in rooms:
joined[room.room_id] = await self.encode_room(
room, time_now, joined=True, serialize_options=serialize_options
sync_config,
room,
time_now,
joined=True,
serialize_options=serialize_options,
)
return joined
@ -477,6 +494,7 @@ class SyncRestServlet(RestServlet):
@trace_with_opname("sync.encode_archived")
async def encode_archived(
self,
sync_config: SyncConfig,
rooms: List[ArchivedSyncResult],
time_now: int,
serialize_options: SerializeEventConfig,
@ -485,6 +503,7 @@ class SyncRestServlet(RestServlet):
Encode the archived rooms in a sync result
Args:
sync_config
rooms: list of sync results for rooms this user is joined to
time_now: current time - used as a baseline for age calculations
serialize_options: Event serializer options
@ -494,13 +513,18 @@ class SyncRestServlet(RestServlet):
joined = {}
for room in rooms:
joined[room.room_id] = await self.encode_room(
room, time_now, joined=False, serialize_options=serialize_options
sync_config,
room,
time_now,
joined=False,
serialize_options=serialize_options,
)
return joined
async def encode_room(
self,
sync_config: SyncConfig,
room: Union[JoinedSyncResult, ArchivedSyncResult],
time_now: int,
joined: bool,
@ -508,6 +532,7 @@ class SyncRestServlet(RestServlet):
) -> JsonDict:
"""
Args:
sync_config
room: sync result for a single room
time_now: current time - used as a baseline for age calculations
token_id: ID of the user's auth token - used for namespacing
@ -548,13 +573,20 @@ class SyncRestServlet(RestServlet):
account_data = room.account_data
# We either include a `state` or `state_after` field depending on
# whether the client has opted in to the newer `state_after` behavior.
if sync_config.use_state_after:
state_key_name = "org.matrix.msc4222.state_after"
else:
state_key_name = "state"
result: JsonDict = {
"timeline": {
"events": serialized_timeline,
"prev_batch": await room.timeline.prev_batch.to_string(self.store),
"limited": room.timeline.limited,
},
"state": {"events": serialized_state},
state_key_name: {"events": serialized_state},
"account_data": {"events": account_data},
}
@ -688,6 +720,7 @@ class SlidingSyncE2eeRestServlet(RestServlet):
filter_collection=self.only_member_events_filter_collection,
is_guest=requester.is_guest,
device_id=device_id,
use_state_after=False, # We don't return any rooms so this flag is a no-op
)
since_token = None

View file

@ -78,6 +78,7 @@ class TagServlet(RestServlet):
super().__init__()
self.auth = hs.get_auth()
self.handler = hs.get_account_data_handler()
self.room_member_handler = hs.get_room_member_handler()
async def on_PUT(
self, request: SynapseRequest, user_id: str, room_id: str, tag: str
@ -85,6 +86,12 @@ class TagServlet(RestServlet):
requester = await self.auth.get_user_by_req(request)
if user_id != requester.user.to_string():
raise AuthError(403, "Cannot add tags for other users.")
# Check if the user has any membership in the room and raise error if not.
# Although it's not harmful for users to tag random rooms, it's just superfluous
# data we don't need to track or allow.
await self.room_member_handler.check_for_any_membership_in_room(
user_id=user_id, room_id=room_id
)
body = parse_json_object_from_request(request)

View file

@ -94,7 +94,7 @@ class BaseUploadServlet(RestServlet):
# if headers.hasHeader(b"Content-Disposition"):
# disposition = headers.getRawHeaders(b"Content-Disposition")[0]
# TODO(markjh): parse content-dispostion
# TODO(markjh): parse content-disposition
return content_length, upload_name, media_type

View file

@ -249,6 +249,7 @@ class HomeServer(metaclass=abc.ABCMeta):
"""
REQUIRED_ON_BACKGROUND_TASK_STARTUP = [
"admin",
"account_validity",
"auth",
"deactivate_account",

View file

@ -234,8 +234,11 @@ class StateStorageController:
RuntimeError if we don't have a state group for one or more of the events
(ie they are outliers or unknown)
"""
if state_filter is None:
state_filter = StateFilter.all()
await_full_state = True
if state_filter and not state_filter.must_await_full_state(self._is_mine_id):
if not state_filter.must_await_full_state(self._is_mine_id):
await_full_state = False
event_to_groups = await self.get_state_group_for_events(
@ -244,7 +247,7 @@ class StateStorageController:
groups = set(event_to_groups.values())
group_to_state = await self.stores.state._get_state_for_groups(
groups, state_filter or StateFilter.all()
groups, state_filter
)
state_event_map = await self.stores.main.get_events(
@ -292,10 +295,11 @@ class StateStorageController:
RuntimeError if we don't have a state group for one or more of the events
(ie they are outliers or unknown)
"""
if (
await_full_state
and state_filter
and not state_filter.must_await_full_state(self._is_mine_id)
if state_filter is None:
state_filter = StateFilter.all()
if await_full_state and not state_filter.must_await_full_state(
self._is_mine_id
):
# Full state is not required if the state filter is restrictive enough.
await_full_state = False
@ -306,7 +310,7 @@ class StateStorageController:
groups = set(event_to_groups.values())
group_to_state = await self.stores.state._get_state_for_groups(
groups, state_filter or StateFilter.all()
groups, state_filter
)
event_to_state = {
@ -335,9 +339,10 @@ class StateStorageController:
RuntimeError if we don't have a state group for the event (ie it is an
outlier or is unknown)
"""
state_map = await self.get_state_for_events(
[event_id], state_filter or StateFilter.all()
)
if state_filter is None:
state_filter = StateFilter.all()
state_map = await self.get_state_for_events([event_id], state_filter)
return state_map[event_id]
@trace
@ -365,9 +370,12 @@ class StateStorageController:
RuntimeError if we don't have a state group for the event (ie it is an
outlier or is unknown)
"""
if state_filter is None:
state_filter = StateFilter.all()
state_map = await self.get_state_ids_for_events(
[event_id],
state_filter or StateFilter.all(),
state_filter,
await_full_state=await_full_state,
)
return state_map[event_id]
@ -388,9 +396,12 @@ class StateStorageController:
at the event and `state_filter` is not satisfied by partial state.
Defaults to `True`.
"""
if state_filter is None:
state_filter = StateFilter.all()
state_ids = await self.get_state_ids_for_event(
event_id,
state_filter=state_filter or StateFilter.all(),
state_filter=state_filter,
await_full_state=await_full_state,
)
@ -426,6 +437,9 @@ class StateStorageController:
at the last event in the room before `stream_position` and
`state_filter` is not satisfied by partial state. Defaults to `True`.
"""
if state_filter is None:
state_filter = StateFilter.all()
# FIXME: This gets the state at the latest event before the stream ordering,
# which might not be the same as the "current state" of the room at the time
# of the stream token if there were multiple forward extremities at the time.
@ -442,7 +456,7 @@ class StateStorageController:
if last_event_id:
state = await self.get_state_after_event(
last_event_id,
state_filter=state_filter or StateFilter.all(),
state_filter=state_filter,
await_full_state=await_full_state,
)
@ -500,9 +514,10 @@ class StateStorageController:
Returns:
Dict of state group to state map.
"""
return await self.stores.state._get_state_for_groups(
groups, state_filter or StateFilter.all()
)
if state_filter is None:
state_filter = StateFilter.all()
return await self.stores.state._get_state_for_groups(groups, state_filter)
@trace
@tag_args
@ -583,12 +598,13 @@ class StateStorageController:
Returns:
The current state of the room.
"""
if await_full_state and (
not state_filter or state_filter.must_await_full_state(self._is_mine_id)
):
if state_filter is None:
state_filter = StateFilter.all()
if await_full_state and state_filter.must_await_full_state(self._is_mine_id):
await self._partial_state_room_tracker.await_full_state(room_id)
if state_filter and not state_filter.is_full():
if state_filter is not None and not state_filter.is_full():
return await self.stores.main.get_partial_filtered_current_state_ids(
room_id, state_filter
)

View file

@ -1422,7 +1422,7 @@ class DeviceWorkerStore(RoomMemberWorkerStore, EndToEndKeyWorkerStore):
DELETE FROM device_lists_outbound_last_success
WHERE destination = ? AND user_id = ?
"""
txn.execute_batch(sql, ((row[0], row[1]) for row in rows))
txn.execute_batch(sql, [(row[0], row[1]) for row in rows])
logger.info("Pruned %d device list outbound pokes", count)

View file

@ -99,6 +99,13 @@ class EndToEndKeyBackgroundStore(SQLBaseStore):
unique=True,
)
self.db_pool.updates.register_background_index_update(
update_name="add_otk_ts_added_index",
index_name="e2e_one_time_keys_json_user_id_device_id_algorithm_ts_added_idx",
table="e2e_one_time_keys_json",
columns=("user_id", "device_id", "algorithm", "ts_added_ms"),
)
class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorkerStore):
def __init__(
@ -1122,7 +1129,7 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
"""Take a list of one time keys out of the database.
Args:
query_list: An iterable of tuples of (user ID, device ID, algorithm).
query_list: An iterable of tuples of (user ID, device ID, algorithm, number of keys).
Returns:
A tuple (results, missing) of:
@ -1310,9 +1317,14 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
OTK was found.
"""
# Return the oldest keys from this device (based on `ts_added_ms`).
# Doing so means that keys are issued in the same order they were uploaded,
# which reduces the chances of a client expiring its copy of a (private)
# key while the public key is still on the server, waiting to be issued.
sql = """
SELECT key_id, key_json FROM e2e_one_time_keys_json
WHERE user_id = ? AND device_id = ? AND algorithm = ?
ORDER BY ts_added_ms
LIMIT ?
"""
@ -1354,13 +1366,22 @@ class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorker
A list of tuples (user_id, device_id, algorithm, key_id, key_json)
for each OTK claimed.
"""
# Find, delete, and return the oldest keys from each device (based on
# `ts_added_ms`).
#
# Doing so means that keys are issued in the same order they were uploaded,
# which reduces the chances of a client expiring its copy of a (private)
# key while the public key is still on the server, waiting to be issued.
sql = """
WITH claims(user_id, device_id, algorithm, claim_count) AS (
VALUES ?
), ranked_keys AS (
SELECT
user_id, device_id, algorithm, key_id, claim_count,
ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r
ROW_NUMBER() OVER (
PARTITION BY (user_id, device_id, algorithm)
ORDER BY ts_added_ms
) AS r
FROM e2e_one_time_keys_json
JOIN claims USING (user_id, device_id, algorithm)
)

View file

@ -1692,7 +1692,7 @@ class PersistEventsStore:
"""
txn.execute_batch(
sql,
(
[
(
stream_id,
self._instance_name,
@ -1705,17 +1705,17 @@ class PersistEventsStore:
state_key,
)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
],
)
# Now we actually update the current_state_events table
txn.execute_batch(
"DELETE FROM current_state_events"
" WHERE room_id = ? AND type = ? AND state_key = ?",
(
[
(room_id, etype, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
),
],
)
# We include the membership in the current state table, hence we do
@ -1805,11 +1805,11 @@ class PersistEventsStore:
txn.execute_batch(
"DELETE FROM local_current_membership"
" WHERE room_id = ? AND user_id = ?",
(
[
(room_id, state_key)
for etype, state_key in itertools.chain(to_delete, to_insert)
if etype == EventTypes.Member and self.is_mine_id(state_key)
),
],
)
if to_insert:
@ -1869,10 +1869,10 @@ class PersistEventsStore:
txn.execute_batch(
f"""
INSERT INTO sliding_sync_membership_snapshots
(room_id, user_id, sender, membership_event_id, membership, event_stream_ordering, event_instance_name
(room_id, user_id, sender, membership_event_id, membership, forgotten, event_stream_ordering, event_instance_name
{("," + ", ".join(sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""})
VALUES (
?, ?, ?, ?, ?,
?, ?, ?, ?, ?, ?,
(SELECT stream_ordering FROM events WHERE event_id = ?),
(SELECT COALESCE(instance_name, 'master') FROM events WHERE event_id = ?)
{("," + ", ".join("?" for _ in sliding_sync_snapshot_values)) if sliding_sync_snapshot_values else ""}
@ -1882,6 +1882,7 @@ class PersistEventsStore:
sender = EXCLUDED.sender,
membership_event_id = EXCLUDED.membership_event_id,
membership = EXCLUDED.membership,
forgotten = EXCLUDED.forgotten,
event_stream_ordering = EXCLUDED.event_stream_ordering
{("," + ", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""}
""",
@ -1892,6 +1893,9 @@ class PersistEventsStore:
membership_info.sender,
membership_info.membership_event_id,
membership_info.membership,
# Since this is a new membership, it isn't forgotten anymore (which
# matches how Synapse currently thinks about the forgotten status)
0,
# XXX: We do not use `membership_info.membership_event_stream_ordering` here
# because it is an unreliable value. See XXX note above.
membership_info.membership_event_id,
@ -2914,6 +2918,9 @@ class PersistEventsStore:
"sender": event.sender,
"membership_event_id": event.event_id,
"membership": event.membership,
# Since this is a new membership, it isn't forgotten anymore (which
# matches how Synapse currently thinks about the forgotten status)
"forgotten": 0,
"event_stream_ordering": event.internal_metadata.stream_ordering,
"event_instance_name": event.internal_metadata.instance_name,
}
@ -3214,7 +3221,7 @@ class PersistEventsStore:
if notifiable_events:
txn.execute_batch(
sql,
(
[
(
event.room_id,
event.internal_metadata.stream_ordering,
@ -3222,18 +3229,18 @@ class PersistEventsStore:
event.event_id,
)
for event in notifiable_events
),
],
)
# Now we delete the staging area for *all* events that were being
# persisted.
txn.execute_batch(
"DELETE FROM event_push_actions_staging WHERE event_id = ?",
(
[
(event.event_id,)
for event, _ in all_events_and_contexts
if event.internal_metadata.is_notifiable()
),
],
)
def _remove_push_actions_for_event_id_txn(

View file

@ -304,6 +304,12 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE,
self._sliding_sync_membership_snapshots_bg_update,
)
# Add a background update to fix data integrity issue in the
# `sliding_sync_membership_snapshots` -> `forgotten` column
self.db_pool.updates.register_background_update_handler(
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE,
self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update,
)
# We want this to run on the main database at startup before we start processing
# events.
@ -2429,6 +2435,118 @@ class EventsBackgroundUpdatesStore(StreamWorkerStore, StateDeltasStore, SQLBaseS
return len(memberships_to_update_rows)
async def _sliding_sync_membership_snapshots_fix_forgotten_column_bg_update(
self, progress: JsonDict, batch_size: int
) -> int:
"""
Background update to update the `sliding_sync_membership_snapshots` ->
`forgotten` column to be in sync with the `room_memberships` table.
Because of previously flawed code (now fixed); any room that someone has
forgotten and subsequently re-joined or had any new membership on, we need to go
and update the column to match the `room_memberships` table as it has fallen out
of sync.
"""
last_event_stream_ordering = progress.get(
"last_event_stream_ordering", -(1 << 31)
)
def _txn(
txn: LoggingTransaction,
) -> int:
"""
Returns:
The number of rows updated.
"""
# To simplify things, we can just recheck any row in
# `sliding_sync_membership_snapshots` with `forgotten=1`
txn.execute(
"""
SELECT
s.room_id,
s.user_id,
s.membership_event_id,
s.event_stream_ordering,
m.forgotten
FROM sliding_sync_membership_snapshots AS s
INNER JOIN room_memberships AS m ON (s.membership_event_id = m.event_id)
WHERE s.event_stream_ordering > ?
AND s.forgotten = 1
ORDER BY s.event_stream_ordering ASC
LIMIT ?
""",
(last_event_stream_ordering, batch_size),
)
memberships_to_update_rows = cast(
List[Tuple[str, str, str, int, int]],
txn.fetchall(),
)
if not memberships_to_update_rows:
return 0
# Assemble the values to update
#
# (room_id, user_id)
key_values: List[Tuple[str, str]] = []
# (forgotten,)
value_values: List[Tuple[int]] = []
for (
room_id,
user_id,
_membership_event_id,
_event_stream_ordering,
forgotten,
) in memberships_to_update_rows:
key_values.append(
(
room_id,
user_id,
)
)
value_values.append((forgotten,))
# Update all of the rows in one go
self.db_pool.simple_update_many_txn(
txn,
table="sliding_sync_membership_snapshots",
key_names=("room_id", "user_id"),
key_values=key_values,
value_names=("forgotten",),
value_values=value_values,
)
# Update the progress
(
_room_id,
_user_id,
_membership_event_id,
event_stream_ordering,
_forgotten,
) = memberships_to_update_rows[-1]
self.db_pool.updates._background_update_progress_txn(
txn,
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE,
{
"last_event_stream_ordering": event_stream_ordering,
},
)
return len(memberships_to_update_rows)
num_rows = await self.db_pool.runInteraction(
"_sliding_sync_membership_snapshots_fix_forgotten_column_bg_update",
_txn,
)
if not num_rows:
await self.db_pool.updates._end_background_update(
_BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE
)
return num_rows
def _resolve_stale_data_in_sliding_sync_tables(
txn: LoggingTransaction,

View file

@ -61,7 +61,13 @@ from synapse.logging.context import (
current_context,
make_deferred_yieldable,
)
from synapse.logging.opentracing import start_active_span, tag_args, trace
from synapse.logging.opentracing import (
SynapseTags,
set_tag,
start_active_span,
tag_args,
trace,
)
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
@ -525,6 +531,7 @@ class EventsWorkerStore(SQLBaseStore):
return event
@trace
async def get_events(
self,
event_ids: Collection[str],
@ -556,6 +563,11 @@ class EventsWorkerStore(SQLBaseStore):
Returns:
A mapping from event_id to event.
"""
set_tag(
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
str(len(event_ids)),
)
events = await self.get_events_as_list(
event_ids,
redact_behaviour=redact_behaviour,
@ -603,6 +615,10 @@ class EventsWorkerStore(SQLBaseStore):
Note that the returned list may be smaller than the list of event
IDs if not all events could be fetched.
"""
set_tag(
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
str(len(event_ids)),
)
if not event_ids:
return []
@ -723,10 +739,11 @@ class EventsWorkerStore(SQLBaseStore):
return events
@trace
@cancellable
async def get_unredacted_events_from_cache_or_db(
self,
event_ids: Iterable[str],
event_ids: Collection[str],
allow_rejected: bool = False,
) -> Dict[str, EventCacheEntry]:
"""Fetch a bunch of events from the cache or the database.
@ -748,6 +765,11 @@ class EventsWorkerStore(SQLBaseStore):
Returns:
map from event id to result
"""
set_tag(
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
str(len(event_ids)),
)
# Shortcut: check if we have any events in the *in memory* cache - this function
# may be called repeatedly for the same event so at this point we cannot reach
# out to any external cache for performance reasons. The external cache is
@ -936,7 +958,7 @@ class EventsWorkerStore(SQLBaseStore):
events, update_metrics=update_metrics
)
missing_event_ids = (e for e in events if e not in event_map)
missing_event_ids = [e for e in events if e not in event_map]
event_map.update(
await self._get_events_from_external_cache(
events=missing_event_ids,
@ -946,8 +968,9 @@ class EventsWorkerStore(SQLBaseStore):
return event_map
@trace
async def _get_events_from_external_cache(
self, events: Iterable[str], update_metrics: bool = True
self, events: Collection[str], update_metrics: bool = True
) -> Dict[str, EventCacheEntry]:
"""Fetch events from any configured external cache.
@ -957,6 +980,10 @@ class EventsWorkerStore(SQLBaseStore):
events: list of event_ids to fetch
update_metrics: Whether to update the cache hit ratio metrics
"""
set_tag(
SynapseTags.FUNC_ARG_PREFIX + "events.length",
str(len(events)),
)
event_map = {}
for event_id in events:
@ -1222,6 +1249,7 @@ class EventsWorkerStore(SQLBaseStore):
with PreserveLoggingContext():
self.hs.get_reactor().callFromThread(fire_errback, e)
@trace
async def _get_events_from_db(
self, event_ids: Collection[str]
) -> Dict[str, EventCacheEntry]:
@ -1240,6 +1268,11 @@ class EventsWorkerStore(SQLBaseStore):
map from event id to result. May return extra events which
weren't asked for.
"""
set_tag(
SynapseTags.FUNC_ARG_PREFIX + "event_ids.length",
str(len(event_ids)),
)
fetched_event_ids: Set[str] = set()
fetched_events: Dict[str, _EventRow] = {}

View file

@ -729,10 +729,10 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
txn.execute_batch(
sql,
(
[
(time_ms, media_origin, media_id)
for media_origin, media_id in remote_media
),
],
)
sql = (
@ -740,7 +740,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore):
" WHERE media_id = ?"
)
txn.execute_batch(sql, ((time_ms, media_id) for media_id in local_media))
txn.execute_batch(sql, [(time_ms, media_id) for media_id in local_media])
await self.db_pool.runInteraction(
"update_cached_last_access_time", update_cache_txn

View file

@ -109,6 +109,7 @@ def _load_rules(
msc3664_enabled=experimental_config.msc3664_enabled,
msc3381_polls_enabled=experimental_config.msc3381_polls_enabled,
msc4028_push_encrypted_events=experimental_config.msc4028_push_encrypted_events,
msc4210_enabled=experimental_config.msc4210_enabled,
)
return filtered_rules

View file

@ -1175,7 +1175,7 @@ class RoomWorkerStore(CacheInvalidationWorkerStore):
SET quarantined_by = ?
WHERE media_origin = ? AND media_id = ?
""",
((quarantined_by, origin, media_id) for origin, media_id in remote_mxcs),
[(quarantined_by, origin, media_id) for origin, media_id in remote_mxcs],
)
total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0
@ -2550,7 +2550,9 @@ class RoomStore(RoomBackgroundUpdateStore, RoomWorkerStore):
still contains events with partial state.
"""
try:
async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id:
async with (
self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id
):
await self.db_pool.runInteraction(
"clear_partial_state_room",
self._clear_partial_state_room_txn,

View file

@ -1375,6 +1375,7 @@ class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore):
keyvalues={"user_id": user_id, "room_id": room_id},
updatevalues={"forgotten": 1},
)
# Handle updating the `sliding_sync_membership_snapshots` table
self.db_pool.simple_update_txn(
txn,
table="sliding_sync_membership_snapshots",

View file

@ -94,7 +94,7 @@ class SearchWorkerStore(SQLBaseStore):
VALUES (?,?,?,to_tsvector('english', ?),?,?)
"""
args1 = (
args1 = [
(
entry.event_id,
entry.room_id,
@ -104,7 +104,7 @@ class SearchWorkerStore(SQLBaseStore):
entry.origin_server_ts,
)
for entry in entries
)
]
txn.execute_batch(sql, args1)

View file

@ -386,8 +386,8 @@ class SlidingSyncStore(SQLBaseStore):
required_state_map: Dict[int, Dict[str, Set[str]]] = {}
for row in rows:
state = required_state_map[row[0]] = {}
for event_type, state_keys in db_to_json(row[1]):
state[event_type] = set(state_keys)
for event_type, state_key in db_to_json(row[1]):
state.setdefault(event_type, set()).add(state_key)
# Get all the room configs, looking up the required state from the map
# above.

View file

@ -572,10 +572,10 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
Returns:
Map from type/state_key to event ID.
"""
if state_filter is None:
state_filter = StateFilter.all()
where_clause, where_args = (
state_filter or StateFilter.all()
).make_sql_filter_clause()
where_clause, where_args = (state_filter).make_sql_filter_clause()
if not where_clause:
# We delegate to the cached version
@ -584,7 +584,7 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
def _get_filtered_current_state_ids_txn(
txn: LoggingTransaction,
) -> StateMap[str]:
results = StateMapWrapper(state_filter=state_filter or StateFilter.all())
results = StateMapWrapper(state_filter=state_filter)
sql = """
SELECT type, state_key, event_id FROM current_state_events
@ -681,7 +681,9 @@ class StateGroupWorkerStore(EventsWorkerStore, SQLBaseStore):
context: EventContext,
) -> None:
"""Update the state group for a partial state event"""
async with self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id:
async with (
self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id
):
await self.db_pool.runInteraction(
"update_state_for_partial_state_event",
self._update_state_for_partial_state_event_txn,

View file

@ -20,18 +20,26 @@
#
import logging
from typing import List, Optional, Tuple
from typing import TYPE_CHECKING, List, Optional, Tuple
import attr
from synapse.logging.opentracing import trace
from synapse.storage._base import SQLBaseStore
from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
make_in_list_sql_clause,
)
from synapse.storage.databases.main.stream import _filter_results_by_stream
from synapse.types import RoomStreamToken, StrCollection
from synapse.util.caches.stream_change_cache import StreamChangeCache
from synapse.util.iterutils import batch_iter
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
@ -54,6 +62,21 @@ class StateDeltasStore(SQLBaseStore):
# attribute. TODO: can we get static analysis to enforce this?
_curr_state_delta_stream_cache: StreamChangeCache
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_index_update(
update_name="current_state_delta_stream_room_index",
index_name="current_state_delta_stream_room_idx",
table="current_state_delta_stream",
columns=("room_id", "stream_id"),
)
async def get_partial_current_state_deltas(
self, prev_stream_id: int, max_stream_id: int
) -> Tuple[int, List[StateDelta]]:

View file

@ -112,8 +112,8 @@ class StateGroupBackgroundUpdateStore(SQLBaseStore):
Returns:
Map from state_group to a StateMap at that point.
"""
state_filter = state_filter or StateFilter.all()
if state_filter is None:
state_filter = StateFilter.all()
results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups}

View file

@ -284,7 +284,8 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
Returns:
Dict of state group to state map.
"""
state_filter = state_filter or StateFilter.all()
if state_filter is None:
state_filter = StateFilter.all()
member_filter, non_member_filter = state_filter.get_member_split()
@ -804,11 +805,11 @@ class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
logger.info("[purge] removing redundant state groups")
txn.execute_batch(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
[(sg,) for sg in state_groups_to_delete],
)
txn.execute_batch(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
[(sg,) for sg in state_groups_to_delete],
)
@trace

View file

@ -153,6 +153,8 @@ Changes in SCHEMA_VERSION = 87
Changes in SCHEMA_VERSION = 88
- MSC4140: Add `delayed_events` table that keeps track of events that are to
be posted in response to a resettable timeout or an on-demand action.
- Add background update to fix data integrity issue in the
`sliding_sync_membership_snapshots` -> `forgotten` column
"""

View file

@ -0,0 +1,21 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Add a background update to update the `sliding_sync_membership_snapshots` ->
-- `forgotten` column to be in sync with the `room_memberships` table.
--
-- For any room that someone has forgotten and subsequently re-joined or had any new
-- membership on, we need to go and update the column to match the `room_memberships`
-- table as it has fallen out of sync.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(8802, 'sliding_sync_membership_snapshots_fix_forgotten_column_bg_update', '{}');

View file

@ -0,0 +1,18 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can
-- efficiently be issued in the same order they were uploaded.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(8803, 'add_otk_ts_added_index', '{}');

View file

@ -0,0 +1,18 @@
--
-- This file is licensed under the Affero General Public License (AGPL) version 3.
--
-- Copyright (C) 2024 New Vector, Ltd
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as
-- published by the Free Software Foundation, either version 3 of the
-- License, or (at your option) any later version.
--
-- See the GNU Affero General Public License for more details:
-- <https://www.gnu.org/licenses/agpl-3.0.html>.
-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can
-- efficiently be issued in the same order they were uploaded.
INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
(8804, 'current_state_delta_stream_room_index', '{}');

View file

@ -48,6 +48,7 @@ class FilteredPushRules:
msc3381_polls_enabled: bool,
msc3664_enabled: bool,
msc4028_push_encrypted_events: bool,
msc4210_enabled: bool,
): ...
def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
@ -65,6 +66,7 @@ class PushRuleEvaluator:
related_event_match_enabled: bool,
room_version_feature_flags: Tuple[str, ...],
msc3931_enabled: bool,
msc4210_enabled: bool,
): ...
def run(
self,

View file

@ -68,15 +68,23 @@ class StateFilter:
include_others: bool = False
def __attrs_post_init__(self) -> None:
# If `include_others` is set we canonicalise the filter by removing
# wildcards from the types dictionary
if self.include_others:
# If `include_others` is set we canonicalise the filter by removing
# wildcards from the types dictionary
# this is needed to work around the fact that StateFilter is frozen
object.__setattr__(
self,
"types",
immutabledict({k: v for k, v in self.types.items() if v is not None}),
)
else:
# Otherwise we remove entries where the value is the empty set.
object.__setattr__(
self,
"types",
immutabledict({k: v for k, v in self.types.items() if v is None or v}),
)
@staticmethod
def all() -> "StateFilter":
@ -616,6 +624,13 @@ class StateFilter:
return False
def __bool__(self) -> bool:
"""Returns true if this state filter will match any state, or false if
this is the empty filter"""
if self.include_others:
return True
return bool(self.types)
_ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True)
_ALL_NON_MEMBER_STATE_FILTER = StateFilter(

View file

@ -45,3 +45,6 @@ class _BackgroundUpdates:
SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE = (
"sliding_sync_membership_snapshots_bg_update"
)
SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE = (
"sliding_sync_membership_snapshots_fix_forgotten_column_bg_update"
)

View file

@ -51,7 +51,7 @@ from typing import (
)
import attr
from typing_extensions import Concatenate, Literal, ParamSpec
from typing_extensions import Concatenate, Literal, ParamSpec, Unpack
from twisted.internet import defer
from twisted.internet.defer import CancelledError
@ -61,6 +61,7 @@ from twisted.python.failure import Failure
from synapse.logging.context import (
PreserveLoggingContext,
make_deferred_yieldable,
run_coroutine_in_background,
run_in_background,
)
from synapse.util import Clock
@ -344,6 +345,7 @@ T1 = TypeVar("T1")
T2 = TypeVar("T2")
T3 = TypeVar("T3")
T4 = TypeVar("T4")
T5 = TypeVar("T5")
@overload
@ -402,6 +404,112 @@ def gather_results( # type: ignore[misc]
return deferred.addCallback(tuple)
@overload
async def gather_optional_coroutines(
*coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]]]],
) -> Tuple[Optional[T1]]: ...
@overload
async def gather_optional_coroutines(
*coroutines: Unpack[
Tuple[
Optional[Coroutine[Any, Any, T1]],
Optional[Coroutine[Any, Any, T2]],
]
],
) -> Tuple[Optional[T1], Optional[T2]]: ...
@overload
async def gather_optional_coroutines(
*coroutines: Unpack[
Tuple[
Optional[Coroutine[Any, Any, T1]],
Optional[Coroutine[Any, Any, T2]],
Optional[Coroutine[Any, Any, T3]],
]
],
) -> Tuple[Optional[T1], Optional[T2], Optional[T3]]: ...
@overload
async def gather_optional_coroutines(
*coroutines: Unpack[
Tuple[
Optional[Coroutine[Any, Any, T1]],
Optional[Coroutine[Any, Any, T2]],
Optional[Coroutine[Any, Any, T3]],
Optional[Coroutine[Any, Any, T4]],
]
],
) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4]]: ...
@overload
async def gather_optional_coroutines(
*coroutines: Unpack[
Tuple[
Optional[Coroutine[Any, Any, T1]],
Optional[Coroutine[Any, Any, T2]],
Optional[Coroutine[Any, Any, T3]],
Optional[Coroutine[Any, Any, T4]],
Optional[Coroutine[Any, Any, T5]],
]
],
) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5]]: ...
async def gather_optional_coroutines(
*coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]], ...]],
) -> Tuple[Optional[T1], ...]:
"""Helper function that allows waiting on multiple coroutines at once.
The return value is a tuple of the return values of the coroutines in order.
If a `None` is passed instead of a coroutine, it will be ignored and a None
is returned in the tuple.
Note: For typechecking we need to have an explicit overload for each
distinct number of coroutines passed in. If you see type problems, it's
likely because you're using many arguments and you need to add a new
overload above.
"""
try:
results = await make_deferred_yieldable(
defer.gatherResults(
[
run_coroutine_in_background(coroutine)
for coroutine in coroutines
if coroutine is not None
],
consumeErrors=True,
)
)
results_iter = iter(results)
return tuple(
next(results_iter) if coroutine is not None else None
for coroutine in coroutines
)
except defer.FirstError as dfe:
# unwrap the error from defer.gatherResults.
# The raised exception's traceback only includes func() etc if
# the 'await' happens before the exception is thrown - ie if the failure
# happens *asynchronously* - otherwise Twisted throws away the traceback as it
# could be large.
#
# We could maybe reconstruct a fake traceback from Failure.frames. Or maybe
# we could throw Twisted into the fires of Mordor.
# suppress exception chaining, because the FirstError doesn't tell us anything
# very interesting.
assert isinstance(dfe.subFailure.value, BaseException)
raise dfe.subFailure.value from None
@attr.s(slots=True, auto_attribs=True)
class _LinearizerEntry:
# The number of things executing.

View file

@ -19,9 +19,12 @@
#
#
import json
import os
import sys
import urllib.parse
from hashlib import blake2b
from importlib.metadata import Distribution, PackageNotFoundError
from typing import Optional
import synapse
from synapse.synapse_rust import get_rust_file_digest
@ -32,22 +35,17 @@ def check_rust_lib_up_to_date() -> None:
be rebuilt.
"""
if not _dist_is_editable():
return
synapse_dir = os.path.dirname(synapse.__file__)
synapse_root = os.path.abspath(os.path.join(synapse_dir, ".."))
# Double check we've not gone into site-packages...
if os.path.basename(synapse_root) == "site-packages":
return
# ... and it looks like the root of a python project.
if not os.path.exists("pyproject.toml"):
return
# Get the location of the editable install.
synapse_root = get_synapse_source_directory()
if synapse_root is None:
return None
# Get the hash of all Rust source files
hash = _hash_rust_files_in_directory(os.path.join(synapse_root, "rust", "src"))
rust_path = os.path.join(synapse_root, "rust", "src")
if not os.path.exists(rust_path):
return None
hash = _hash_rust_files_in_directory(rust_path)
if hash != get_rust_file_digest():
raise Exception("Rust module outdated. Please rebuild using `poetry install`")
@ -82,10 +80,55 @@ def _hash_rust_files_in_directory(directory: str) -> str:
return hasher.hexdigest()
def _dist_is_editable() -> bool:
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, "matrix-synapse.egg-link")
if os.path.isfile(egg_link):
return True
return False
def get_synapse_source_directory() -> Optional[str]:
"""Try and find the source directory of synapse for editable installs (like
those used in development).
Returns None if not an editable install (or otherwise can't find the source
directory).
"""
# Try and find the installed matrix-synapse package.
try:
package = Distribution.from_name("matrix-synapse")
except PackageNotFoundError:
# The package is not found, so it's not installed and so must be being
# pulled out from a local directory (usually the current one).
synapse_dir = os.path.dirname(synapse.__file__)
synapse_root = os.path.abspath(os.path.join(synapse_dir, ".."))
# Double check we've not gone into site-packages...
if os.path.basename(synapse_root) == "site-packages":
return None
# ... and it looks like the root of a python project.
if not os.path.exists("pyproject.toml"):
return None
return synapse_root
# Read the `direct_url.json` metadata for the package. This won't exist for
# packages installed via a repository/etc.
# c.f. https://packaging.python.org/en/latest/specifications/direct-url/
direct_url_json = package.read_text("direct_url.json")
if direct_url_json is None:
return None
# c.f. https://packaging.python.org/en/latest/specifications/direct-url/ for
# the format
direct_url_dict: dict = json.loads(direct_url_json)
# `url` must exist as a key, and point to where we fetched the repo from.
project_url = urllib.parse.urlparse(direct_url_dict["url"])
# If its not a local file then we must have built the rust libs either a)
# after we downloaded the package, or b) we built the download wheel.
if project_url.scheme != "file":
return None
# And finally if its not an editable install then the files can't have
# changed since we installed the package.
if not direct_url_dict.get("dir_info", {}).get("editable", False):
return None
return project_url.path

View file

@ -47,7 +47,6 @@ class WheelTimer(Generic[T]):
"""
self.bucket_size: int = bucket_size
self.entries: List[_Entry[T]] = []
self.current_tick: int = 0
def insert(self, now: int, obj: T, then: int) -> None:
"""Inserts object into timer.
@ -78,11 +77,10 @@ class WheelTimer(Generic[T]):
self.entries[max(min_key, then_key) - min_key].elements.add(obj)
return
next_key = now_key + 1
if self.entries:
last_key = self.entries[-1].end_key
last_key = self.entries[-1].end_key + 1
else:
last_key = next_key
last_key = now_key + 1
# Handle the case when `then` is in the past and `entries` is empty.
then_key = max(last_key, then_key)

View file

@ -90,6 +90,10 @@ if __name__ == "__main__":
if runner.args.worker:
if runner.args.log:
# sys.__stdout__ can technically be None, just exit if it's the case
if not sys.__stdout__:
exit(1)
globalLogBeginner.beginLoggingTo(
[textFileLogObserver(sys.__stdout__)], redirectStandardIO=False
)

View file

@ -151,18 +151,30 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
def test_claim_one_time_key(self) -> None:
local_user = "@boris:" + self.hs.hostname
device_id = "xyz"
keys = {"alg1:k1": "key1"}
res = self.get_success(
self.handler.upload_keys_for_user(
local_user, device_id, {"one_time_keys": keys}
local_user, device_id, {"one_time_keys": {"alg1:k1": "key1"}}
)
)
self.assertDictEqual(
res, {"one_time_key_counts": {"alg1": 1, "signed_curve25519": 0}}
)
res2 = self.get_success(
# Keys should be returned in the order they were uploaded. To test, advance time
# a little, then upload a second key with an earlier key ID; it should get
# returned second.
self.reactor.advance(1)
res = self.get_success(
self.handler.upload_keys_for_user(
local_user, device_id, {"one_time_keys": {"alg1:k0": "key0"}}
)
)
self.assertDictEqual(
res, {"one_time_key_counts": {"alg1": 2, "signed_curve25519": 0}}
)
# now claim both keys back. They should be in the same order
res = self.get_success(
self.handler.claim_one_time_keys(
{local_user: {device_id: {"alg1": 1}}},
self.requester,
@ -171,12 +183,27 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
)
)
self.assertEqual(
res2,
res,
{
"failures": {},
"one_time_keys": {local_user: {device_id: {"alg1:k1": "key1"}}},
},
)
res = self.get_success(
self.handler.claim_one_time_keys(
{local_user: {device_id: {"alg1": 1}}},
self.requester,
timeout=None,
always_include_fallback_keys=False,
)
)
self.assertEqual(
res,
{
"failures": {},
"one_time_keys": {local_user: {device_id: {"alg1:k0": "key0"}}},
},
)
def test_claim_one_time_key_bulk(self) -> None:
"""Like test_claim_one_time_key but claims multiple keys in one handler call."""
@ -336,6 +363,47 @@ class E2eKeysHandlerTestCase(unittest.HomeserverTestCase):
counts_by_alg, expected_counts_by_alg, f"{user_id}:{device_id}"
)
def test_claim_one_time_key_bulk_ordering(self) -> None:
"""Keys returned by the bulk claim call should be returned in the correct order"""
# Alice has lots of keys, uploaded in a specific order
alice = f"@alice:{self.hs.hostname}"
alice_dev = "alice_dev_1"
self.get_success(
self.handler.upload_keys_for_user(
alice,
alice_dev,
{"one_time_keys": {"alg1:k20": 20, "alg1:k21": 21, "alg1:k22": 22}},
)
)
# Advance time by 1s, to ensure that there is a difference in upload time.
self.reactor.advance(1)
self.get_success(
self.handler.upload_keys_for_user(
alice,
alice_dev,
{"one_time_keys": {"alg1:k10": 10, "alg1:k11": 11, "alg1:k12": 12}},
)
)
# Now claim some, and check we get the right ones.
claim_res = self.get_success(
self.handler.claim_one_time_keys(
{alice: {alice_dev: {"alg1": 2}}},
self.requester,
timeout=None,
always_include_fallback_keys=False,
)
)
# We should get the first-uploaded keys, even though they have later key ids.
# We should get a random set of two of k20, k21, k22.
self.assertEqual(claim_res["failures"], {})
claimed_keys = claim_res["one_time_keys"]["@alice:test"]["alice_dev_1"]
self.assertEqual(len(claimed_keys), 2)
for key_id in claimed_keys.keys():
self.assertIn(key_id, ["alg1:k20", "alg1:k21", "alg1:k22"])
def test_fallback_key(self) -> None:
local_user = "@boris:" + self.hs.hostname
device_id = "xyz"

View file

@ -661,9 +661,12 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase):
)
)
with patch.object(
fed_client, "make_membership_event", mock_make_membership_event
), patch.object(fed_client, "send_join", mock_send_join):
with (
patch.object(
fed_client, "make_membership_event", mock_make_membership_event
),
patch.object(fed_client, "send_join", mock_send_join),
):
# Join and check that our join event is rejected
# (The join event is rejected because it doesn't have any signatures)
join_exc = self.get_failure(
@ -708,9 +711,12 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase):
fed_handler = self.hs.get_federation_handler()
store = self.hs.get_datastores().main
with patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
), patch.object(store, "is_partial_state_room", mock_is_partial_state_room):
with (
patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
),
patch.object(store, "is_partial_state_room", mock_is_partial_state_room),
):
# Start the partial state sync.
fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id")
self.assertEqual(mock_sync_partial_state_room.call_count, 1)
@ -760,9 +766,12 @@ class PartialJoinTestCase(unittest.FederatingHomeserverTestCase):
fed_handler = self.hs.get_federation_handler()
store = self.hs.get_datastores().main
with patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
), patch.object(store, "is_partial_state_room", mock_is_partial_state_room):
with (
patch.object(
fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room
),
patch.object(store, "is_partial_state_room", mock_is_partial_state_room),
):
# Start the partial state sync.
fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id")
self.assertEqual(mock_sync_partial_state_room.call_count, 1)

View file

@ -172,20 +172,25 @@ class TestJoinsLimitedByPerRoomRateLimiter(FederatingHomeserverTestCase):
)
)
with patch.object(
self.handler.federation_handler.federation_client,
"make_membership_event",
mock_make_membership_event,
), patch.object(
self.handler.federation_handler.federation_client,
"send_join",
mock_send_join,
), patch(
"synapse.event_auth._is_membership_change_allowed",
return_value=None,
), patch(
"synapse.handlers.federation_event.check_state_dependent_auth_rules",
return_value=None,
with (
patch.object(
self.handler.federation_handler.federation_client,
"make_membership_event",
mock_make_membership_event,
),
patch.object(
self.handler.federation_handler.federation_client,
"send_join",
mock_send_join,
),
patch(
"synapse.event_auth._is_membership_change_allowed",
return_value=None,
),
patch(
"synapse.handlers.federation_event.check_state_dependent_auth_rules",
return_value=None,
),
):
self.get_success(
self.handler.update_membership(

File diff suppressed because it is too large Load diff

View file

@ -20,7 +20,7 @@
from typing import Collection, ContextManager, List, Optional
from unittest.mock import AsyncMock, Mock, patch
from parameterized import parameterized
from parameterized import parameterized, parameterized_class
from twisted.internet import defer
from twisted.test.proto_helpers import MemoryReactor
@ -32,7 +32,13 @@ from synapse.api.room_versions import RoomVersion, RoomVersions
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.federation.federation_base import event_from_pdu_json
from synapse.handlers.sync import SyncConfig, SyncRequestKey, SyncResult, SyncVersion
from synapse.handlers.sync import (
SyncConfig,
SyncRequestKey,
SyncResult,
SyncVersion,
TimelineBatch,
)
from synapse.rest import admin
from synapse.rest.client import knock, login, room
from synapse.server import HomeServer
@ -58,9 +64,21 @@ def generate_request_key() -> SyncRequestKey:
return ("request_key", _request_key)
@parameterized_class(
("use_state_after",),
[
(True,),
(False,),
],
class_name_func=lambda cls,
num,
params_dict: f"{cls.__name__}_{'state_after' if params_dict['use_state_after'] else 'state'}",
)
class SyncTestCase(tests.unittest.HomeserverTestCase):
"""Tests Sync Handler."""
use_state_after: bool
servlets = [
admin.register_servlets,
knock.register_servlets,
@ -79,7 +97,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
def test_wait_for_sync_for_user_auth_blocking(self) -> None:
user_id1 = "@user1:test"
user_id2 = "@user2:test"
sync_config = generate_sync_config(user_id1)
sync_config = generate_sync_config(
user_id1, use_state_after=self.use_state_after
)
requester = create_requester(user_id1)
self.reactor.advance(100) # So we get not 0 time
@ -112,7 +132,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.auth_blocking._hs_disabled = False
sync_config = generate_sync_config(user_id2)
sync_config = generate_sync_config(
user_id2, use_state_after=self.use_state_after
)
requester = create_requester(user_id2)
e = self.get_failure(
@ -141,7 +163,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
sync_config=generate_sync_config(
user, device_id="dev", use_state_after=self.use_state_after
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -175,7 +199,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user),
sync_config=generate_sync_config(
user, use_state_after=self.use_state_after
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -188,7 +214,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
sync_config=generate_sync_config(
user, device_id="dev", use_state_after=self.use_state_after
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=initial_result.next_batch,
@ -220,7 +248,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user),
sync_config=generate_sync_config(
user, use_state_after=self.use_state_after
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -233,7 +263,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
requester,
sync_config=generate_sync_config(user, device_id="dev"),
sync_config=generate_sync_config(
user, device_id="dev", use_state_after=self.use_state_after
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=initial_result.next_batch,
@ -276,7 +308,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
alice_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(owner),
generate_sync_config(owner),
generate_sync_config(owner, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -296,7 +328,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
# Eve syncs.
eve_requester = create_requester(eve)
eve_sync_config = generate_sync_config(eve)
eve_sync_config = generate_sync_config(
eve, use_state_after=self.use_state_after
)
eve_sync_after_ban: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
eve_requester,
@ -367,7 +401,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice),
generate_sync_config(alice, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -396,6 +430,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 2}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@ -442,7 +477,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice),
generate_sync_config(alice, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -481,6 +516,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
}
},
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@ -518,6 +554,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
... and a filter that means we only return 1 event, represented by the dashed
horizontal lines: `S2` must be included in the `state` section on the second sync.
When `use_state_after` is enabled, then we expect to see `s2` in the first sync.
"""
alice = self.register_user("alice", "password")
alice_tok = self.login(alice, "password")
@ -528,7 +566,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice),
generate_sync_config(alice, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -554,6 +592,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 1}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@ -567,10 +606,18 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e3_event],
)
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
if self.use_state_after:
# When using `state_after` we get told about s2 immediately
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
else:
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
# Now send another event that points to S2, but not E3.
with self._patch_get_latest_events([s2_event]):
@ -585,6 +632,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 1}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@ -598,10 +646,19 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e4_event],
)
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
if self.use_state_after:
# When using `state_after` we got told about s2 previously, so we
# don't again.
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
else:
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
def test_state_includes_changes_on_ungappy_syncs(self) -> None:
"""Test `state` where the sync is not gappy.
@ -638,6 +695,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
This is the last chance for us to tell the client about S2, so it *must* be
included in the response.
When `use_state_after` is enabled, then we expect to see `s2` in the first sync.
"""
alice = self.register_user("alice", "password")
alice_tok = self.login(alice, "password")
@ -648,7 +707,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice),
generate_sync_config(alice, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -673,6 +732,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
filter_collection=FilterCollection(
self.hs, {"room": {"timeline": {"limit": 1}}}
),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@ -684,7 +744,11 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e3_event],
)
self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()])
if self.use_state_after:
# When using `state_after` we get told about s2 immediately
self.assertIn(s2_event, [e.event_id for e in room_sync.state.values()])
else:
self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()])
# More events, E4 and E5
with self._patch_get_latest_events([e3_event]):
@ -695,7 +759,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
incremental_sync = self.get_success(
self.sync_handler.wait_for_sync_for_user(
alice_requester,
generate_sync_config(alice),
generate_sync_config(alice, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=initial_sync_result.next_batch,
@ -710,10 +774,19 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
[e.event_id for e in room_sync.timeline.events],
[e4_event, e5_event],
)
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
if self.use_state_after:
# When using `state_after` we got told about s2 previously, so we
# don't again.
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[],
)
else:
self.assertEqual(
[e.event_id for e in room_sync.state.values()],
[s2_event],
)
@parameterized.expand(
[
@ -721,7 +794,8 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
(True, False),
(False, True),
(True, True),
]
],
name_func=lambda func, num, p: f"{func.__name__}_{p.args[0]}_{p.args[1]}",
)
def test_archived_rooms_do_not_include_state_after_leave(
self, initial_sync: bool, empty_timeline: bool
@ -749,7 +823,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
initial_sync_result = self.get_success(
self.sync_handler.wait_for_sync_for_user(
bob_requester,
generate_sync_config(bob),
generate_sync_config(bob, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -780,7 +854,9 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
self.sync_handler.wait_for_sync_for_user(
bob_requester,
generate_sync_config(
bob, filter_collection=FilterCollection(self.hs, filter_dict)
bob,
filter_collection=FilterCollection(self.hs, filter_dict),
use_state_after=self.use_state_after,
),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
@ -791,7 +867,15 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
if empty_timeline:
# The timeline should be empty
self.assertEqual(sync_room_result.timeline.events, [])
else:
# The last three events in the timeline should be those leading up to the
# leave
self.assertEqual(
[e.event_id for e in sync_room_result.timeline.events[-3:]],
[before_message_event, before_state_event, leave_event],
)
if empty_timeline or self.use_state_after:
# And the state should include the leave event...
self.assertEqual(
sync_room_result.state[("m.room.member", bob)].event_id, leave_event
@ -801,12 +885,6 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_room_result.state[("test_state", "")].event_id, before_state_event
)
else:
# The last three events in the timeline should be those leading up to the
# leave
self.assertEqual(
[e.event_id for e in sync_room_result.timeline.events[-3:]],
[before_message_event, before_state_event, leave_event],
)
# ... And the state should be empty
self.assertEqual(sync_room_result.state, {})
@ -879,7 +957,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user),
generate_sync_config(user, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -928,7 +1006,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
private_sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(user2),
generate_sync_config(user2),
generate_sync_config(user2, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -954,7 +1032,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_result: SyncResult = self.get_success(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user),
generate_sync_config(user, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
)
@ -991,7 +1069,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_d = defer.ensureDeferred(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user),
generate_sync_config(user, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=since_token,
@ -1046,7 +1124,7 @@ class SyncTestCase(tests.unittest.HomeserverTestCase):
sync_d = defer.ensureDeferred(
self.sync_handler.wait_for_sync_for_user(
create_requester(user),
generate_sync_config(user),
generate_sync_config(user, use_state_after=self.use_state_after),
sync_version=SyncVersion.SYNC_V2,
request_key=generate_request_key(),
since_token=since_token,
@ -1062,6 +1140,7 @@ def generate_sync_config(
user_id: str,
device_id: Optional[str] = "device_id",
filter_collection: Optional[FilterCollection] = None,
use_state_after: bool = False,
) -> SyncConfig:
"""Generate a sync config (with a unique request key).
@ -1069,7 +1148,8 @@ def generate_sync_config(
user_id: user who is syncing.
device_id: device that is syncing. Defaults to "device_id".
filter_collection: filter to apply. Defaults to the default filter (ie,
return everything, with a default limit)
return everything, with a default limit)
use_state_after: whether the `use_state_after` flag was set.
"""
if filter_collection is None:
filter_collection = Filtering(Mock()).DEFAULT_FILTER_COLLECTION
@ -1079,4 +1159,138 @@ def generate_sync_config(
filter_collection=filter_collection,
is_guest=False,
device_id=device_id,
use_state_after=use_state_after,
)
class SyncStateAfterTestCase(tests.unittest.HomeserverTestCase):
"""Tests Sync Handler state behavior when using `use_state_after."""
servlets = [
admin.register_servlets,
knock.register_servlets,
login.register_servlets,
room.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.sync_handler = self.hs.get_sync_handler()
self.store = self.hs.get_datastores().main
# AuthBlocking reads from the hs' config on initialization. We need to
# modify its config instead of the hs'
self.auth_blocking = self.hs.get_auth_blocking()
def test_initial_sync_multiple_deltas(self) -> None:
"""Test that if multiple state deltas have happened during processing of
a full state sync we return the correct state"""
user = self.register_user("user", "password")
tok = self.login("user", "password")
# Create a room as the user and set some custom state.
joined_room = self.helper.create_room_as(user, tok=tok)
first_state = self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 1}, tok=tok
)
# Take a snapshot of the stream token, to simulate doing an initial sync
# at this point.
end_stream_token = self.hs.get_event_sources().get_current_token()
# Send some state *after* the stream token
self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 2}, tok=tok
)
# Calculating the full state will return the first state, and not the
# second.
state = self.get_success(
self.sync_handler._compute_state_delta_for_full_sync(
room_id=joined_room,
sync_config=generate_sync_config(user, use_state_after=True),
batch=TimelineBatch(
prev_batch=end_stream_token, events=[], limited=True
),
end_token=end_stream_token,
members_to_fetch=None,
timeline_state={},
joined=True,
)
)
self.assertEqual(state[("m.test_event", "")], first_state["event_id"])
def test_incremental_sync_multiple_deltas(self) -> None:
"""Test that if multiple state deltas have happened since an incremental
state sync we return the correct state"""
user = self.register_user("user", "password")
tok = self.login("user", "password")
# Create a room as the user and set some custom state.
joined_room = self.helper.create_room_as(user, tok=tok)
# Take a snapshot of the stream token, to simulate doing an incremental sync
# from this point.
since_token = self.hs.get_event_sources().get_current_token()
self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 1}, tok=tok
)
# Send some state *after* the stream token
second_state = self.helper.send_state(
joined_room, event_type="m.test_event", body={"num": 2}, tok=tok
)
end_stream_token = self.hs.get_event_sources().get_current_token()
# Calculating the incrementals state will return the second state, and not the
# first.
state = self.get_success(
self.sync_handler._compute_state_delta_for_incremental_sync(
room_id=joined_room,
sync_config=generate_sync_config(user, use_state_after=True),
batch=TimelineBatch(
prev_batch=end_stream_token, events=[], limited=True
),
since_token=since_token,
end_token=end_stream_token,
members_to_fetch=None,
timeline_state={},
)
)
self.assertEqual(state[("m.test_event", "")], second_state["event_id"])
def test_incremental_sync_lazy_loaded_no_timeline(self) -> None:
"""Test that lazy-loading with an empty timeline doesn't return the full
state.
There was a bug where an empty state filter would cause the DB to return
the full state, rather than an empty set.
"""
user = self.register_user("user", "password")
tok = self.login("user", "password")
# Create a room as the user and set some custom state.
joined_room = self.helper.create_room_as(user, tok=tok)
since_token = self.hs.get_event_sources().get_current_token()
end_stream_token = self.hs.get_event_sources().get_current_token()
state = self.get_success(
self.sync_handler._compute_state_delta_for_incremental_sync(
room_id=joined_room,
sync_config=generate_sync_config(user, use_state_after=True),
batch=TimelineBatch(
prev_batch=end_stream_token, events=[], limited=True
),
since_token=since_token,
end_token=end_stream_token,
members_to_fetch=set(),
timeline_state={},
)
)
self.assertEqual(state, {})

View file

@ -27,6 +27,7 @@ from typing import (
Callable,
ContextManager,
Dict,
Generator,
List,
Optional,
Set,
@ -49,7 +50,10 @@ from synapse.http.server import (
respond_with_json,
)
from synapse.http.site import SynapseRequest
from synapse.logging.context import LoggingContext, make_deferred_yieldable
from synapse.logging.context import (
LoggingContext,
make_deferred_yieldable,
)
from synapse.types import JsonDict
from tests.server import FakeChannel, make_request
@ -199,7 +203,7 @@ def make_request_with_cancellation_test(
#
# We would like to trigger a cancellation at the first `await`, re-run the
# request and cancel at the second `await`, and so on. By patching
# `Deferred.__next__`, we can intercept `await`s, track which ones we have or
# `Deferred.__await__`, we can intercept `await`s, track which ones we have or
# have not seen, and force them to block when they wouldn't have.
# The set of previously seen `await`s.
@ -211,7 +215,7 @@ def make_request_with_cancellation_test(
)
for request_number in itertools.count(1):
deferred_patch = Deferred__next__Patch(seen_awaits, request_number)
deferred_patch = Deferred__await__Patch(seen_awaits, request_number)
try:
with mock.patch(
@ -250,6 +254,8 @@ def make_request_with_cancellation_test(
)
if respond_mock.called:
_log_for_request(request_number, "--- response finished ---")
# The request ran to completion and we are done with testing it.
# `respond_with_json` writes the response asynchronously, so we
@ -311,8 +317,8 @@ def make_request_with_cancellation_test(
assert False, "unreachable" # noqa: B011
class Deferred__next__Patch:
"""A `Deferred.__next__` patch that will intercept `await`s and force them
class Deferred__await__Patch:
"""A `Deferred.__await__` patch that will intercept `await`s and force them
to block once it sees a new `await`.
When done with the patch, `unblock_awaits()` must be called to clean up after any
@ -322,7 +328,7 @@ class Deferred__next__Patch:
Usage:
seen_awaits = set()
deferred_patch = Deferred__next__Patch(seen_awaits, 1)
deferred_patch = Deferred__await__Patch(seen_awaits, 1)
try:
with deferred_patch.patch():
# do things
@ -335,14 +341,14 @@ class Deferred__next__Patch:
"""
Args:
seen_awaits: The set of stack traces of `await`s that have been previously
seen. When the `Deferred.__next__` patch sees a new `await`, it will add
seen. When the `Deferred.__await__` patch sees a new `await`, it will add
it to the set.
request_number: The request number to log against.
"""
self._request_number = request_number
self._seen_awaits = seen_awaits
self._original_Deferred___next__ = Deferred.__next__ # type: ignore[misc,unused-ignore]
self._original_Deferred__await__ = Deferred.__await__ # type: ignore[misc,unused-ignore]
# The number of `await`s on `Deferred`s we have seen so far.
self.awaits_seen = 0
@ -350,8 +356,13 @@ class Deferred__next__Patch:
# Whether we have seen a new `await` not in `seen_awaits`.
self.new_await_seen = False
# Whether to block new await points we see. This gets set to False once
# we have cancelled the request to allow things to run after
# cancellation.
self._block_new_awaits = True
# To force `await`s on resolved `Deferred`s to block, we make up a new
# unresolved `Deferred` and return it out of `Deferred.__next__` /
# unresolved `Deferred` and return it out of `Deferred.__await__` /
# `coroutine.send()`. We have to resolve it later, in case the `await`ing
# coroutine is part of some shared processing, such as `@cached`.
self._to_unblock: Dict[Deferred, Union[object, Failure]] = {}
@ -360,15 +371,15 @@ class Deferred__next__Patch:
self._previous_stack: List[inspect.FrameInfo] = []
def patch(self) -> ContextManager[Mock]:
"""Returns a context manager which patches `Deferred.__next__`."""
"""Returns a context manager which patches `Deferred.__await__`."""
def Deferred___next__(
deferred: "Deferred[T]", value: object = None
) -> "Deferred[T]":
"""Intercepts `await`s on `Deferred`s and rigs them to block once we have
seen enough of them.
def Deferred___await__(
deferred: "Deferred[T]",
) -> Generator["Deferred[T]", None, T]:
"""Intercepts calls to `__await__`, which returns a generator
yielding deferreds that we await on.
`Deferred.__next__` will normally:
The generator for `__await__` will normally:
* return `self` if the `Deferred` is unresolved, in which case
`coroutine.send()` will return the `Deferred`, and
`_defer.inlineCallbacks` will stop running the coroutine until the
@ -376,9 +387,43 @@ class Deferred__next__Patch:
* raise a `StopIteration(result)`, containing the result of the `await`.
* raise another exception, which will come out of the `await`.
"""
# Get the original generator.
gen = self._original_Deferred__await__(deferred)
# Run the generator, handling each iteration to see if we need to
# block.
try:
while True:
# We've hit a new await point (or the deferred has
# completed), handle it.
handle_next_iteration(deferred)
# Continue on.
yield gen.send(None)
except StopIteration as e:
# We need to convert `StopIteration` into a normal return.
return e.value
def handle_next_iteration(
deferred: "Deferred[T]",
) -> None:
"""Intercepts `await`s on `Deferred`s and rigs them to block once we have
seen enough of them.
Args:
deferred: The deferred that we've captured and are intercepting
`await` calls within.
"""
if not self._block_new_awaits:
# We're no longer blocking awaits points
return
self.awaits_seen += 1
stack = _get_stack(skip_frames=1)
stack = _get_stack(
skip_frames=2 # Ignore this function and `Deferred___await__` in stack trace
)
stack_hash = _hash_stack(stack)
if stack_hash not in self._seen_awaits:
@ -389,20 +434,29 @@ class Deferred__next__Patch:
if not self.new_await_seen:
# This `await` isn't interesting. Let it proceed normally.
_log_await_stack(
stack,
self._previous_stack,
self._request_number,
"already seen",
)
# Don't log the stack. It's been seen before in a previous run.
self._previous_stack = stack
return self._original_Deferred___next__(deferred, value)
return
# We want to block at the current `await`.
if deferred.called and not deferred.paused:
# This `Deferred` already has a result.
# We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait
# on. This blocks the coroutine that did this `await`.
# This `Deferred` already has a result. We chain a new,
# unresolved, `Deferred` to the end of this Deferred that it
# will wait on. This blocks the coroutine that did this `await`.
# We queue it up for unblocking later.
new_deferred: "Deferred[T]" = Deferred()
self._to_unblock[new_deferred] = deferred.result
deferred.addBoth(lambda _: make_deferred_yieldable(new_deferred))
_log_await_stack(
stack,
self._previous_stack,
@ -411,7 +465,9 @@ class Deferred__next__Patch:
)
self._previous_stack = stack
return make_deferred_yieldable(new_deferred)
# Continue iterating on the deferred now that we've blocked it
# again.
return
# This `Deferred` does not have a result yet.
# The `await` will block normally, so we don't have to do anything.
@ -423,9 +479,9 @@ class Deferred__next__Patch:
)
self._previous_stack = stack
return self._original_Deferred___next__(deferred, value)
return
return mock.patch.object(Deferred, "__next__", new=Deferred___next__)
return mock.patch.object(Deferred, "__await__", new=Deferred___await__)
def unblock_awaits(self) -> None:
"""Unblocks any shared processing that we forced to block.
@ -433,6 +489,9 @@ class Deferred__next__Patch:
Must be called when done, otherwise processing shared between multiple requests,
such as database queries started by `@cached`, will become permanently stuck.
"""
# Also disable blocking at future await points
self._block_new_awaits = False
to_unblock = self._to_unblock
self._to_unblock = {}
for deferred, result in to_unblock.items():

View file

@ -903,12 +903,19 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
headers=Headers(
{
"Content-Type": ["application/json"],
"Connection": ["close, X-Foo, X-Bar"],
"X-Test": ["test"],
# Define some hop-by-hop headers (try with varying casing to
# make sure we still match-up the headers)
"Connection": ["close, X-fOo, X-Bar, X-baz"],
# Should be removed because it's defined in the `Connection` header
"X-Foo": ["foo"],
"X-Bar": ["bar"],
# (not in canonical case)
"x-baZ": ["baz"],
# Should be removed because it's a hop-by-hop header
"Proxy-Authorization": "abcdef",
# Should be removed because it's a hop-by-hop header (not in canonical case)
"transfer-EnCoDiNg": "abcdef",
}
),
)
@ -938,9 +945,17 @@ class FederationClientProxyTests(BaseMultiWorkerStreamTestCase):
header_names = set(headers.keys())
# Make sure the response does not include the hop-by-hop headers
self.assertNotIn(b"X-Foo", header_names)
self.assertNotIn(b"X-Bar", header_names)
self.assertNotIn(b"Proxy-Authorization", header_names)
self.assertIncludes(
header_names,
{
b"Content-Type",
b"X-Test",
# Default headers from Twisted
b"Date",
b"Server",
},
exact=True,
)
# Make sure the response is as expected back on the main worker
self.assertEqual(res, {"foo": "bar"})

View file

@ -22,27 +22,42 @@ from typing import Set
from parameterized import parameterized
from synapse.http.proxy import parse_connection_header_value
from synapse.http.proxy import (
HOP_BY_HOP_HEADERS_LOWERCASE,
parse_connection_header_value,
)
from tests.unittest import TestCase
def mix_case(s: str) -> str:
"""
Mix up the case of each character in the string (upper or lower case)
"""
return "".join(c.upper() if i % 2 == 0 else c.lower() for i, c in enumerate(s))
class ProxyTests(TestCase):
@parameterized.expand(
[
[b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}],
[b"close, X-Foo, X-Bar", {"close", "x-foo", "x-bar"}],
# No whitespace
[b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}],
[b"close,X-Foo,X-Bar", {"close", "x-foo", "x-bar"}],
# More whitespace
[b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}],
[b"close, X-Foo, X-Bar", {"close", "x-foo", "x-bar"}],
# "close" directive in not the first position
[b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}],
[b"X-Foo, X-Bar, close", {"x-foo", "x-bar", "close"}],
# Normalizes header capitalization
[b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}],
[b"keep-alive, x-fOo, x-bAr", {"keep-alive", "x-foo", "x-bar"}],
# Handles header names with whitespace
[
b"keep-alive, x foo, x bar",
{"Keep-Alive", "X foo", "X bar"},
{"keep-alive", "x foo", "x bar"},
],
# Make sure we handle all of the hop-by-hop headers
[
mix_case(", ".join(HOP_BY_HOP_HEADERS_LOWERCASE)).encode("ascii"),
HOP_BY_HOP_HEADERS_LOWERCASE,
],
]
)
@ -54,7 +69,8 @@ class ProxyTests(TestCase):
"""
Tests that the connection header value is parsed correctly
"""
self.assertEqual(
self.assertIncludes(
expected_extra_headers_to_remove,
parse_connection_header_value(connection_header_value),
exact=True,
)

View file

@ -60,7 +60,7 @@ from synapse.util import Clock
from tests import unittest
from tests.server import FakeChannel
from tests.test_utils import SMALL_PNG
from tests.test_utils import SMALL_CMYK_JPEG, SMALL_PNG
from tests.unittest import override_config
from tests.utils import default_config
@ -187,6 +187,68 @@ small_png_with_transparency = TestImage(
# different versions of Pillow.
)
small_cmyk_jpeg = TestImage(
SMALL_CMYK_JPEG,
b"image/jpeg",
b".jpeg",
# These values were sourced simply by seeing at what the tests produced at
# the time of writing. If this changes, the tests will fail.
unhexlify(
b"ffd8ffe000104a46494600010100000100010000ffdb00430006"
b"040506050406060506070706080a100a0a09090a140e0f0c1017"
b"141818171416161a1d251f1a1b231c1616202c20232627292a29"
b"191f2d302d283025282928ffdb0043010707070a080a130a0a13"
b"281a161a28282828282828282828282828282828282828282828"
b"2828282828282828282828282828282828282828282828282828"
b"2828ffc00011080020002003012200021101031101ffc4001f00"
b"0001050101010101010000000000000000010203040506070809"
b"0a0bffc400b5100002010303020403050504040000017d010203"
b"00041105122131410613516107227114328191a1082342b1c115"
b"52d1f02433627282090a161718191a25262728292a3435363738"
b"393a434445464748494a535455565758595a636465666768696a"
b"737475767778797a838485868788898a92939495969798999aa2"
b"a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9bac2c3c4c5c6c7c8c9ca"
b"d2d3d4d5d6d7d8d9dae1e2e3e4e5e6e7e8e9eaf1f2f3f4f5f6f7"
b"f8f9faffc4001f01000301010101010101010100000000000001"
b"02030405060708090a0bffc400b5110002010204040304070504"
b"0400010277000102031104052131061241510761711322328108"
b"144291a1b1c109233352f0156272d10a162434e125f11718191a"
b"262728292a35363738393a434445464748494a53545556575859"
b"5a636465666768696a737475767778797a82838485868788898a"
b"92939495969798999aa2a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9"
b"bac2c3c4c5c6c7c8c9cad2d3d4d5d6d7d8d9dae2e3e4e5e6e7e8"
b"e9eaf2f3f4f5f6f7f8f9faffda000c03010002110311003f00fa"
b"a68a28a0028a28a0028a28a0028a28a00fffd9"
),
unhexlify(
b"ffd8ffe000104a46494600010100000100010000ffdb00430006"
b"040506050406060506070706080a100a0a09090a140e0f0c1017"
b"141818171416161a1d251f1a1b231c1616202c20232627292a29"
b"191f2d302d283025282928ffdb0043010707070a080a130a0a13"
b"281a161a28282828282828282828282828282828282828282828"
b"2828282828282828282828282828282828282828282828282828"
b"2828ffc00011080001000103012200021101031101ffc4001f00"
b"0001050101010101010000000000000000010203040506070809"
b"0a0bffc400b5100002010303020403050504040000017d010203"
b"00041105122131410613516107227114328191a1082342b1c115"
b"52d1f02433627282090a161718191a25262728292a3435363738"
b"393a434445464748494a535455565758595a636465666768696a"
b"737475767778797a838485868788898a92939495969798999aa2"
b"a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9bac2c3c4c5c6c7c8c9ca"
b"d2d3d4d5d6d7d8d9dae1e2e3e4e5e6e7e8e9eaf1f2f3f4f5f6f7"
b"f8f9faffc4001f01000301010101010101010100000000000001"
b"02030405060708090a0bffc400b5110002010204040304070504"
b"0400010277000102031104052131061241510761711322328108"
b"144291a1b1c109233352f0156272d10a162434e125f11718191a"
b"262728292a35363738393a434445464748494a53545556575859"
b"5a636465666768696a737475767778797a82838485868788898a"
b"92939495969798999aa2a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9"
b"bac2c3c4c5c6c7c8c9cad2d3d4d5d6d7d8d9dae2e3e4e5e6e7e8"
b"e9eaf2f3f4f5f6f7f8f9faffda000c03010002110311003f00fa"
b"a68a28a00fffd9"
),
)
small_lossless_webp = TestImage(
unhexlify(
b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700"

View file

@ -20,6 +20,7 @@
#
import json
from typing import Any
from parameterized import parameterized
@ -52,6 +53,7 @@ class OEmbedTests(HomeserverTestCase):
def test_version(self) -> None:
"""Accept versions that are similar to 1.0 as a string or int (or missing)."""
version: Any
for version in ("1.0", 1.0, 1):
result = self.parse_response({"version": version})
# An empty Open Graph response is an error, ensure the URL is included.
@ -69,6 +71,7 @@ class OEmbedTests(HomeserverTestCase):
def test_cache_age(self) -> None:
"""Ensure a cache-age is parsed properly."""
cache_age: Any
# Correct-ish cache ages are allowed.
for cache_age in ("1", 1.0, 1):
result = self.parse_response({"cache_age": cache_age})

View file

@ -120,9 +120,11 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase):
#
# We have seen stringy and null values for "room" in the wild, so presumably
# some of this validation was missing in the past.
with patch("synapse.events.validator.validate_canonicaljson"), patch(
"synapse.events.validator.jsonschema.validate"
), patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"):
with (
patch("synapse.events.validator.validate_canonicaljson"),
patch("synapse.events.validator.jsonschema.validate"),
patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"),
):
pl_event_id = self.helper.send_state(
self.room_id,
"m.room.power_levels",

View file

@ -149,6 +149,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
content: JsonMapping,
*,
related_events: Optional[JsonDict] = None,
msc4210: bool = False,
) -> PushRuleEvaluator:
event = FrozenEvent(
{
@ -174,6 +175,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
related_event_match_enabled=True,
room_version_feature_flags=event.room_version.msc3931_push_features,
msc3931_enabled=True,
msc4210_enabled=msc4210,
)
def test_display_name(self) -> None:
@ -452,6 +454,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
{"value": False},
"incorrect values should not match",
)
value: Any
for value in ("foobaz", 1, 1.1, None, [], {}):
self._assert_not_matches(
condition,
@ -492,6 +495,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase):
{"value": None},
"exact value should match",
)
value: Any
for value in ("foobaz", True, False, 1, 1.1, [], {}):
self._assert_not_matches(
condition,

View file

@ -96,7 +96,7 @@ class FederationTestCase(unittest.HomeserverTestCase):
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# unkown order_by
# unknown order_by
channel = self.make_request(
"GET",
self.url + "?order_by=bar",

View file

@ -82,7 +82,7 @@ class UserMediaStatisticsTestCase(unittest.HomeserverTestCase):
"""
If parameters are invalid, an error is returned.
"""
# unkown order_by
# unknown order_by
channel = self.make_request(
"GET",
self.url + "?order_by=bar",

View file

@ -23,6 +23,7 @@ import hashlib
import hmac
import json
import os
import time
import urllib.parse
from binascii import unhexlify
from http import HTTPStatus
@ -56,6 +57,7 @@ from synapse.types import JsonDict, UserID, create_requester
from synapse.util import Clock
from tests import unittest
from tests.replication._base import BaseMultiWorkerStreamTestCase
from tests.test_utils import SMALL_PNG
from tests.unittest import override_config
@ -717,7 +719,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# unkown order_by
# unknown order_by
channel = self.make_request(
"GET",
self.url + "?order_by=bar",
@ -3694,7 +3696,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase):
@parameterized.expand(["GET", "DELETE"])
def test_invalid_parameter(self, method: str) -> None:
"""If parameters are invalid, an error is returned."""
# unkown order_by
# unknown order_by
channel = self.make_request(
method,
self.url + "?order_by=bar",
@ -5127,7 +5129,6 @@ class UserRedactionTestCase(unittest.HomeserverTestCase):
"""
Test that request to redact events in all rooms user is member of is successful
"""
# join rooms, send some messages
originals = []
for rm in [self.rm1, self.rm2, self.rm3]:
@ -5404,3 +5405,98 @@ class UserRedactionTestCase(unittest.HomeserverTestCase):
matches.append((event_id, event))
# we redacted 6 messages
self.assertEqual(len(matches), 6)
class UserRedactionBackgroundTaskTestCase(BaseMultiWorkerStreamTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
admin.register_servlets,
room.register_servlets,
sync.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.admin = self.register_user("thomas", "pass", True)
self.admin_tok = self.login("thomas", "pass")
self.bad_user = self.register_user("teresa", "pass")
self.bad_user_tok = self.login("teresa", "pass")
# create rooms - room versions 11+ store the `redacts` key in content while
# earlier ones don't so we use a mix of room versions
self.rm1 = self.helper.create_room_as(
self.admin, tok=self.admin_tok, room_version="7"
)
self.rm2 = self.helper.create_room_as(self.admin, tok=self.admin_tok)
self.rm3 = self.helper.create_room_as(
self.admin, tok=self.admin_tok, room_version="11"
)
@override_config({"run_background_tasks_on": "worker1"})
def test_redact_messages_all_rooms(self) -> None:
"""
Test that redact task successfully runs when `run_background_tasks_on` is specified
"""
self.make_worker_hs(
"synapse.app.generic_worker",
extra_config={
"worker_name": "worker1",
"run_background_tasks_on": "worker1",
"redis": {"enabled": True},
},
)
# join rooms, send some messages
original_event_ids = set()
for rm in [self.rm1, self.rm2, self.rm3]:
join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok)
original_event_ids.add(join["event_id"])
for i in range(15):
event = {"body": f"hello{i}", "msgtype": "m.text"}
res = self.helper.send_event(
rm, "m.room.message", event, tok=self.bad_user_tok, expect_code=200
)
original_event_ids.add(res["event_id"])
# redact all events in all rooms
channel = self.make_request(
"POST",
f"/_synapse/admin/v1/user/{self.bad_user}/redact",
content={"rooms": []},
access_token=self.admin_tok,
)
self.assertEqual(channel.code, 200)
id = channel.json_body.get("redact_id")
timeout_s = 10
start_time = time.time()
redact_result = ""
while redact_result != "complete":
if start_time + timeout_s < time.time():
self.fail("Timed out waiting for redactions.")
channel2 = self.make_request(
"GET",
f"/_synapse/admin/v1/user/redact_status/{id}",
access_token=self.admin_tok,
)
redact_result = channel2.json_body["status"]
if redact_result == "failed":
self.fail("Redaction task failed.")
redaction_ids = set()
for rm in [self.rm1, self.rm2, self.rm3]:
filter = json.dumps({"types": [EventTypes.Redaction]})
channel = self.make_request(
"GET",
f"rooms/{rm}/messages?filter={filter}&limit=50",
access_token=self.admin_tok,
)
self.assertEqual(channel.code, 200)
for event in channel.json_body["chunk"]:
if event["type"] == "m.room.redaction":
redaction_ids.add(event["redacts"])
self.assertIncludes(redaction_ids, original_event_ids, exact=True)

View file

@ -381,10 +381,10 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_lazy_loading_room_members(self) -> None:
def test_rooms_required_state_lazy_loading_room_members_initial_sync(self) -> None:
"""
Test `rooms.required_state` returns people relevant to the timeline when
lazy-loading room members, `["m.room.member","$LAZY"]`.
On initial sync, test `rooms.required_state` returns people relevant to the
timeline when lazy-loading room members, `["m.room.member","$LAZY"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
@ -432,6 +432,255 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_lazy_loading_room_members_incremental_sync(
self,
) -> None:
"""
On incremental sync, test `rooms.required_state` returns people relevant to the
timeline when lazy-loading room members, `["m.room.member","$LAZY"]`.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
self.helper.send(room_id1, "2", tok=user2_tok)
self.helper.send(room_id1, "3", tok=user2_tok)
# Make the Sliding Sync request with lazy loading for the room members
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.LAZY],
],
"timeline_limit": 3,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send more timeline events into the room
self.helper.send(room_id1, "4", tok=user2_tok)
self.helper.send(room_id1, "5", tok=user4_tok)
self.helper.send(room_id1, "6", tok=user4_tok)
# Make an incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# but since we've seen user2 in the last sync (and their membership hasn't
# changed), we should only see user4 here.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user4_id)],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_expand_lazy_loading_room_members_incremental_sync(
self,
) -> None:
"""
Test that when we expand the `required_state` to include lazy-loading room
members, it returns people relevant to the timeline.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
self.helper.send(room_id1, "2", tok=user2_tok)
self.helper.send(room_id1, "3", tok=user2_tok)
# Make the Sliding Sync request *without* lazy loading for the room members
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 3,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send more timeline events into the room
self.helper.send(room_id1, "4", tok=user2_tok)
self.helper.send(room_id1, "5", tok=user4_tok)
self.helper.send(room_id1, "6", tok=user4_tok)
# Expand `required_state` and make an incremental Sliding Sync request *with*
# lazy-loading room members
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.LAZY],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# and we haven't seen any membership before this sync so we should see both
# users.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user2_id)],
state_map[(EventTypes.Member, user4_id)],
},
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
# Send a message so the room comes down sync.
self.helper.send(room_id1, "7", tok=user2_tok)
self.helper.send(room_id1, "8", tok=user4_tok)
self.helper.send(room_id1, "9", tok=user4_tok)
# Make another incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# but since we've seen both memberships in the last sync, they shouldn't appear
# again.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1].get("required_state", []),
set(),
exact=True,
)
self.assertIsNone(response_body["rooms"][room_id1].get("invite_state"))
def test_rooms_required_state_expand_retract_expand_lazy_loading_room_members_incremental_sync(
self,
) -> None:
"""
Test that when we expand the `required_state` to include lazy-loading room
members, it returns people relevant to the timeline.
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
user3_id = self.register_user("user3", "pass")
user3_tok = self.login(user3_id, "pass")
user4_id = self.register_user("user4", "pass")
user4_tok = self.login(user4_id, "pass")
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
self.helper.join(room_id1, user1_id, tok=user1_tok)
self.helper.join(room_id1, user3_id, tok=user3_tok)
self.helper.join(room_id1, user4_id, tok=user4_tok)
self.helper.send(room_id1, "1", tok=user2_tok)
self.helper.send(room_id1, "2", tok=user2_tok)
self.helper.send(room_id1, "3", tok=user2_tok)
# Make the Sliding Sync request *without* lazy loading for the room members
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 3,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# Send more timeline events into the room
self.helper.send(room_id1, "4", tok=user2_tok)
self.helper.send(room_id1, "5", tok=user4_tok)
self.helper.send(room_id1, "6", tok=user4_tok)
# Expand `required_state` and make an incremental Sliding Sync request *with*
# lazy-loading room members
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.LAZY],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# Only user2 and user4 sent events in the last 3 events we see in the `timeline`
# and we haven't seen any membership before this sync so we should see both
# users because we're lazy-loading the room members.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user2_id)],
state_map[(EventTypes.Member, user4_id)],
},
exact=True,
)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user4_tok)
# Retract `required_state` and make an incremental Sliding Sync request
# requesting a few memberships
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Member, StateValues.ME],
[EventTypes.Member, user2_id],
]
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
# We've seen user2's membership in the last sync so we shouldn't see it here
# even though it's requested. We should only see user1's membership.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Member, user1_id)],
},
exact=True,
)
def test_rooms_required_state_me(self) -> None:
"""
Test `rooms.required_state` correctly handles $ME.
@ -561,7 +810,7 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
)
self.helper.leave(room_id1, user3_id, tok=user3_tok)
# Make the Sliding Sync request with lazy loading for the room members
# Make an incremental Sliding Sync request
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
# Only user2 and user3 sent events in the 3 events we see in the `timeline`
@ -862,3 +1111,264 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase):
exact=True,
message=f"Expected only fully-stated rooms to show up for test_key={list_key}.",
)
def test_rooms_required_state_expand(self) -> None:
"""Test that when we expand the required state argument we get the
expanded state, and not just the changes to the new expanded."""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a room with a room name.
room_id1 = self.helper.create_room_as(
user1_id, tok=user1_tok, extra_content={"name": "Foo"}
)
# Only request the state event to begin with
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 1,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
},
exact=True,
)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# Update the sliding sync requests to include the room name
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Name, ""],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# We should see the room name, even though there haven't been any
# changes.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Name, "")],
},
exact=True,
)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# We should not see any state changes.
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
def test_rooms_required_state_expand_retract_expand(self) -> None:
"""Test that when expanding, retracting and then expanding the required
state, we get the changes that happened."""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a room with a room name.
room_id1 = self.helper.create_room_as(
user1_id, tok=user1_tok, extra_content={"name": "Foo"}
)
# Only request the state event to begin with
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 1,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
},
exact=True,
)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# Update the sliding sync requests to include the room name
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Name, ""],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# We should see the room name, even though there haven't been any
# changes.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Name, "")],
},
exact=True,
)
# Update the room name
self.helper.send_state(
room_id1, "m.room.name", {"name": "Bar"}, state_key="", tok=user1_tok
)
# Update the sliding sync requests to exclude the room name again
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# We should not see the updated room name in state (though it will be in
# the timeline).
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# Update the sliding sync requests to include the room name again
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Name, ""],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# We should see the *new* room name, even though there haven't been any
# changes.
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Name, "")],
},
exact=True,
)
def test_rooms_required_state_expand_deduplicate(self) -> None:
"""Test that when expanding, retracting and then expanding the required
state, we don't get the state down again if it hasn't changed"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a room with a room name.
room_id1 = self.helper.create_room_as(
user1_id, tok=user1_tok, extra_content={"name": "Foo"}
)
# Only request the state event to begin with
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 1]],
"required_state": [
[EventTypes.Create, ""],
],
"timeline_limit": 1,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
state_map = self.get_success(
self.storage_controllers.state.get_current_state(room_id1)
)
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Create, "")],
},
exact=True,
)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# Update the sliding sync requests to include the room name
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Name, ""],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# We should see the room name, even though there haven't been any
# changes.
self._assertRequiredStateIncludes(
response_body["rooms"][room_id1]["required_state"],
{
state_map[(EventTypes.Name, "")],
},
exact=True,
)
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# Update the sliding sync requests to exclude the room name again
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# We should not see any state updates
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))
# Send a message so the room comes down sync.
self.helper.send(room_id1, "msg", tok=user1_tok)
# Update the sliding sync requests to include the room name again
sync_body["lists"]["foo-list"]["required_state"] = [
[EventTypes.Create, ""],
[EventTypes.Name, ""],
]
response_body, from_token = self.do_sync(
sync_body, since=from_token, tok=user1_tok
)
# We should not see the room name again, as we have already sent that
# down.
self.assertIsNone(response_body["rooms"][room_id1].get("required_state"))

View file

@ -240,6 +240,7 @@ class SlidingSyncBase(unittest.HomeserverTestCase):
self,
invitee_user_id: str,
unsigned_invite_room_state: Optional[List[StrippedStateEvent]],
invite_room_id: Optional[str] = None,
) -> str:
"""
Create a fake invite for a remote room and persist it.
@ -252,19 +253,23 @@ class SlidingSyncBase(unittest.HomeserverTestCase):
invitee_user_id: The person being invited
unsigned_invite_room_state: List of stripped state events to assist the
receiver in identifying the room.
invite_room_id: Optional remote room ID to be invited to. When unset, we
will generate one.
Returns:
The room ID of the remote invite room
"""
store = self.hs.get_datastores().main
invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
if invite_room_id is None:
invite_room_id = f"!test_room{self._remote_invite_count}:remote_server"
invite_event_dict = {
"room_id": invite_room_id,
"sender": "@inviter:remote_server",
"state_key": invitee_user_id,
"depth": 1,
# Just keep advancing the depth
"depth": self._remote_invite_count,
"origin_server_ts": 1,
"type": EventTypes.Member,
"content": {"membership": Membership.INVITE},
@ -679,6 +684,112 @@ class SlidingSyncTestCase(SlidingSyncBase):
exact=True,
)
def test_rejoin_forgotten_room(self) -> None:
"""
Make sure we can see a forgotten room again if we rejoin (or any new membership
like an invite) (no longer forgotten)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
user2_id = self.register_user("user2", "pass")
user2_tok = self.login(user2_id, "pass")
room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True)
# User1 joins the room
self.helper.join(room_id, user1_id, tok=user1_tok)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# We should see the room (like normal)
self.assertIncludes(
set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
{room_id},
exact=True,
)
# Leave and forget the room
self.helper.leave(room_id, user1_id, tok=user1_tok)
# User1 forgets the room
channel = self.make_request(
"POST",
f"/_matrix/client/r0/rooms/{room_id}/forget",
content={},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.result)
# Re-join the room
self.helper.join(room_id, user1_id, tok=user1_tok)
# We should see the room again after re-joining
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertIncludes(
set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
{room_id},
exact=True,
)
def test_invited_to_forgotten_remote_room(self) -> None:
"""
Make sure we can see a forgotten room again if we are invited again
(remote/federated out-of-band memberships)
"""
user1_id = self.register_user("user1", "pass")
user1_tok = self.login(user1_id, "pass")
# Create a remote room invite (out-of-band membership)
room_id = self._create_remote_invite_room_for_user(user1_id, None)
# Make the Sliding Sync request
sync_body = {
"lists": {
"foo-list": {
"ranges": [[0, 99]],
"required_state": [],
"timeline_limit": 0,
}
}
}
response_body, from_token = self.do_sync(sync_body, tok=user1_tok)
# We should see the room (like normal)
self.assertIncludes(
set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
{room_id},
exact=True,
)
# Leave and forget the room
self.helper.leave(room_id, user1_id, tok=user1_tok)
# User1 forgets the room
channel = self.make_request(
"POST",
f"/_matrix/client/r0/rooms/{room_id}/forget",
content={},
access_token=user1_tok,
)
self.assertEqual(channel.code, 200, channel.result)
# Get invited to the room again
# self.helper.join(room_id, user1_id, tok=user1_tok)
self._create_remote_invite_room_for_user(user1_id, None, invite_room_id=room_id)
# We should see the room again after re-joining
response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok)
self.assertIncludes(
set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]),
{room_id},
exact=True,
)
def test_ignored_user_invites_initial_sync(self) -> None:
"""
Make sure we ignore invites if they are from one of the `m.ignored_user_list` on

Some files were not shown because too many files have changed in this diff Show more