mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-20 19:10:45 +03:00
Merge remote-tracking branch 'origin/release-v1.93' into matrix-org-hotfixes
This commit is contained in:
commit
7ab1b46029
105 changed files with 1240 additions and 953 deletions
|
@ -64,7 +64,7 @@ if not IS_PR:
|
|||
{
|
||||
"python-version": "3.11",
|
||||
"database": "postgres",
|
||||
"postgres-version": "15",
|
||||
"postgres-version": "16",
|
||||
"extras": "all",
|
||||
}
|
||||
)
|
||||
|
|
8
.github/workflows/docker.yml
vendored
8
.github/workflows/docker.yml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
steps:
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
|
@ -40,13 +40,13 @@ jobs:
|
|||
echo "SYNAPSE_VERSION=$(grep "^version" pyproject.toml | sed -E 's/version\s*=\s*["]([^"]*)["]/\1/')" >> $GITHUB_ENV
|
||||
|
||||
- name: Log in to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Log in to GHCR
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
type=pep440,pattern={{raw}}
|
||||
|
||||
- name: Build and push all platforms
|
||||
uses: docker/build-push-action@v4
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
push: true
|
||||
labels: |
|
||||
|
|
4
.github/workflows/push_complement_image.yml
vendored
4
.github/workflows/push_complement_image.yml
vendored
|
@ -48,14 +48,14 @@ jobs:
|
|||
with:
|
||||
ref: master
|
||||
- name: Login to registry
|
||||
uses: docker/login-action@v2
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Work out labels for complement image
|
||||
id: meta
|
||||
uses: docker/metadata-action@v4
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ghcr.io/${{ github.repository }}/complement-synapse
|
||||
tags: |
|
||||
|
|
2
.github/workflows/release-artifacts.yml
vendored
2
.github/workflows/release-artifacts.yml
vendored
|
@ -134,7 +134,7 @@ jobs:
|
|||
|
||||
- name: Set up QEMU to emulate aarch64
|
||||
if: matrix.arch == 'aarch64'
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
platforms: arm64
|
||||
|
||||
|
|
106
CHANGES.md
106
CHANGES.md
|
@ -1,3 +1,109 @@
|
|||
# Synapse 1.93.0rc1 (2023-09-19)
|
||||
|
||||
### Features
|
||||
|
||||
- Add automatic purge after all users have forgotten a room. Also restores purge/shutdown rooms after a Synapse restart. ([\#15488](https://github.com/matrix-org/synapse/issues/15488))
|
||||
- Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040). ([\#16137](https://github.com/matrix-org/synapse/issues/16137))
|
||||
- Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes. ([\#16219](https://github.com/matrix-org/synapse/issues/16219))
|
||||
- Add span information to requests sent to appservices. Contributed by MTRNord. ([\#16227](https://github.com/matrix-org/synapse/issues/16227))
|
||||
- Add the ability to enable/disable registrations when using CAS. Contributed by Aurélien Grimpard. ([\#16262](https://github.com/matrix-org/synapse/issues/16262))
|
||||
- Allow `/notifications` endpoint to be routed to workers. ([\#16265](https://github.com/matrix-org/synapse/issues/16265))
|
||||
- Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header. ([\#16274](https://github.com/matrix-org/synapse/issues/16274))
|
||||
- Report whether a user is `locked` in the [List Accounts admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#list-accounts), and exclude locked users by default. ([\#16328](https://github.com/matrix-org/synapse/issues/16328))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix a long-standing bug where multi-device accounts could cause high load due to presence. ([\#16066](https://github.com/matrix-org/synapse/issues/16066), [\#16170](https://github.com/matrix-org/synapse/issues/16170), [\#16171](https://github.com/matrix-org/synapse/issues/16171), [\#16172](https://github.com/matrix-org/synapse/issues/16172), [\#16174](https://github.com/matrix-org/synapse/issues/16174))
|
||||
- Fix a long-standing bug where appservices using [MSC2409](https://github.com/matrix-org/matrix-spec-proposals/pull/2409) to receive `to_device` messages would only get messages for one user. ([\#16251](https://github.com/matrix-org/synapse/issues/16251))
|
||||
- Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly. ([\#16252](https://github.com/matrix-org/synapse/issues/16252))
|
||||
- Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation. ([\#16257](https://github.com/matrix-org/synapse/issues/16257))
|
||||
- Avoid temporary storage of sensitive information. ([\#16272](https://github.com/matrix-org/synapse/issues/16272))
|
||||
- Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi. ([\#16288](https://github.com/matrix-org/synapse/issues/16288))
|
||||
- Fix a long-standing bug where invalid receipts would be accepted. ([\#16327](https://github.com/matrix-org/synapse/issues/16327))
|
||||
- Use standard name for UTF-8 charset in emails. ([\#16329](https://github.com/matrix-org/synapse/issues/16329))
|
||||
|
||||
### Improved Documentation
|
||||
|
||||
- Fix typos in the documentation. ([\#16282](https://github.com/matrix-org/synapse/issues/16282))
|
||||
- Link to the Alpine Linux community package for Synapse. ([\#16304](https://github.com/matrix-org/synapse/issues/16304))
|
||||
- Use string for `federation_client_minimum_tls_version` documentation examples. Contributed by @jcgruenhage. ([\#16353](https://github.com/matrix-org/synapse/issues/16353))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Allow modules to delete rooms. ([\#15997](https://github.com/matrix-org/synapse/issues/15997))
|
||||
- Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled. ([\#16090](https://github.com/matrix-org/synapse/issues/16090), [\#16263](https://github.com/matrix-org/synapse/issues/16263))
|
||||
- Fix type checking when using the new version of Twisted. ([\#16235](https://github.com/matrix-org/synapse/issues/16235))
|
||||
- Delete device messages asynchronously and in staged batches using the task scheduler. ([\#16240](https://github.com/matrix-org/synapse/issues/16240), [\#16311](https://github.com/matrix-org/synapse/issues/16311), [\#16312](https://github.com/matrix-org/synapse/issues/16312), [\#16313](https://github.com/matrix-org/synapse/issues/16313))
|
||||
- Bump minimum supported Rust version to 1.61.0. ([\#16248](https://github.com/matrix-org/synapse/issues/16248))
|
||||
- Update rust to version 1.71.1 in the nix development environment. ([\#16260](https://github.com/matrix-org/synapse/issues/16260))
|
||||
- Simplify server key storage. ([\#16261](https://github.com/matrix-org/synapse/issues/16261))
|
||||
- Reduce CPU overhead of change password endpoint. ([\#16264](https://github.com/matrix-org/synapse/issues/16264))
|
||||
- Stop purging from tables slated for removal. ([\#16273](https://github.com/matrix-org/synapse/issues/16273))
|
||||
- Improve type hints. ([\#16276](https://github.com/matrix-org/synapse/issues/16276), [\#16301](https://github.com/matrix-org/synapse/issues/16301), [\#16325](https://github.com/matrix-org/synapse/issues/16325), [\#16326](https://github.com/matrix-org/synapse/issues/16326))
|
||||
- Raise `setuptools_rust` version cap to 1.7.0. ([\#16277](https://github.com/matrix-org/synapse/issues/16277))
|
||||
- Fix using the new task scheduler causing lots of CPU to be used. ([\#16278](https://github.com/matrix-org/synapse/issues/16278))
|
||||
- Upgrade CI run of Python 3.12 from rc1 to rc2. ([\#16280](https://github.com/matrix-org/synapse/issues/16280))
|
||||
- Include values in SQL debug when using `execute_values` with Postgres. ([\#16281](https://github.com/matrix-org/synapse/issues/16281))
|
||||
- Enable additional linting checks. ([\#16283](https://github.com/matrix-org/synapse/issues/16283))
|
||||
- Don't try refetching device lists for users on remote hosts that are marked as "down". ([\#16298](https://github.com/matrix-org/synapse/issues/16298))
|
||||
- Refactor `receipts_graph` Postgres transactions to stop error messages. ([\#16299](https://github.com/matrix-org/synapse/issues/16299))
|
||||
- Small improvements to logging in replication code. ([\#16309](https://github.com/matrix-org/synapse/issues/16309))
|
||||
- Remove a reference cycle for in background processes. ([\#16314](https://github.com/matrix-org/synapse/issues/16314))
|
||||
- Only use literal strings for background process names. ([\#16315](https://github.com/matrix-org/synapse/issues/16315))
|
||||
- Refactor `get_user_by_id`. ([\#16316](https://github.com/matrix-org/synapse/issues/16316))
|
||||
- Speed up task to delete to-device messages. ([\#16318](https://github.com/matrix-org/synapse/issues/16318))
|
||||
- Avoid patching code in tests. ([\#16349](https://github.com/matrix-org/synapse/issues/16349))
|
||||
- Test against PostgreSQL 16. ([\#16351](https://github.com/matrix-org/synapse/issues/16351))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump mypy from 1.4.1 to 1.5.1. ([\#16300](https://github.com/matrix-org/synapse/issues/16300))
|
||||
* Bump black from 23.7.0 to 23.9.1. ([\#16295](https://github.com/matrix-org/synapse/issues/16295))
|
||||
* Bump docker/build-push-action from 4 to 5. ([\#16336](https://github.com/matrix-org/synapse/issues/16336))
|
||||
* Bump docker/login-action from 2 to 3. ([\#16339](https://github.com/matrix-org/synapse/issues/16339))
|
||||
* Bump docker/metadata-action from 4 to 5. ([\#16337](https://github.com/matrix-org/synapse/issues/16337))
|
||||
* Bump docker/setup-qemu-action from 2 to 3. ([\#16338](https://github.com/matrix-org/synapse/issues/16338))
|
||||
* Bump furo from 2023.8.19 to 2023.9.10. ([\#16340](https://github.com/matrix-org/synapse/issues/16340))
|
||||
* Bump gitpython from 3.1.32 to 3.1.35. ([\#16267](https://github.com/matrix-org/synapse/issues/16267), [\#16279](https://github.com/matrix-org/synapse/issues/16279))
|
||||
* Bump mypy-zope from 1.0.0 to 1.0.1. ([\#16291](https://github.com/matrix-org/synapse/issues/16291))
|
||||
* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344))
|
||||
* Bump regex from 1.9.4 to 1.9.5. ([\#16233](https://github.com/matrix-org/synapse/issues/16233))
|
||||
* Bump ruff from 0.0.286 to 0.0.290. ([\#16342](https://github.com/matrix-org/synapse/issues/16342))
|
||||
* Bump serde_json from 1.0.105 to 1.0.107. ([\#16296](https://github.com/matrix-org/synapse/issues/16296), [\#16345](https://github.com/matrix-org/synapse/issues/16345))
|
||||
* Bump twisted from 22.10.0 to 23.8.0. ([\#16235](https://github.com/matrix-org/synapse/issues/16235))
|
||||
* Bump types-pillow from 10.0.0.2 to 10.0.0.3. ([\#16293](https://github.com/matrix-org/synapse/issues/16293))
|
||||
* Bump types-setuptools from 68.0.0.3 to 68.2.0.0. ([\#16292](https://github.com/matrix-org/synapse/issues/16292))
|
||||
* Bump typing-extensions from 4.7.1 to 4.8.0. ([\#16341](https://github.com/matrix-org/synapse/issues/16341))
|
||||
|
||||
# Synapse 1.92.3 (2023-09-18)
|
||||
|
||||
This is again a security update targeted at mitigating [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863).
|
||||
It turns out that libwebp is bundled statically in Pillow wheels so we need to update this dependency instead of
|
||||
libwebp package at the OS level.
|
||||
|
||||
Unlike what was advertised in 1.92.2 changelog this release also impacts PyPI wheels and Debian packages from matrix.org.
|
||||
|
||||
We encourage admins to upgrade as soon as possible.
|
||||
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Pillow 10.0.1 is now mandatory because of libwebp CVE-2023-4863, since Pillow provides libwebp in the wheels. ([\#16347](https://github.com/matrix-org/synapse/issues/16347))
|
||||
|
||||
### Updates to locked dependencies
|
||||
|
||||
* Bump pillow from 10.0.0 to 10.0.1. ([\#16344](https://github.com/matrix-org/synapse/issues/16344))
|
||||
|
||||
# Synapse 1.92.2 (2023-09-15)
|
||||
|
||||
This is a Docker-only update to mitigate [CVE-2023-4863](https://cve.org/CVERecord?id=CVE-2023-4863), a critical vulnerability in `libwebp`. Server admins not using Docker should ensure that their `libwebp` is up to date (if installed). We encourage admins to upgrade as soon as possible.
|
||||
|
||||
|
||||
### Updates to the Docker image
|
||||
|
||||
- Update docker image to use Debian bookworm as the base. ([\#16324](https://github.com/matrix-org/synapse/issues/16324))
|
||||
|
||||
|
||||
# Synapse 1.92.1 (2023-09-12)
|
||||
|
||||
This minor release was needed only because of CI-related trouble on [v1.92.0](https://github.com/matrix-org/synapse/releases/tag/v1.92.0), which was never released.
|
||||
|
|
4
Cargo.lock
generated
4
Cargo.lock
generated
|
@ -352,9 +352,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.106"
|
||||
version = "1.0.107"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2cc66a619ed80bf7a0f6b17dd063a84b88f6dea1813737cf469aef1d081142c2"
|
||||
checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Allow modules to delete rooms.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where multi-device accounts could cause high load due to presence.
|
|
@ -1 +0,0 @@
|
|||
Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled.
|
|
@ -1 +0,0 @@
|
|||
Support resolving homeservers using `matrix-fed` DNS SRV records from [MSC4040](https://github.com/matrix-org/matrix-spec-proposals/pull/4040).
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where multi-device accounts could cause high load due to presence.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where multi-device accounts could cause high load due to presence.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where multi-device accounts could cause high load due to presence.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where multi-device accounts could cause high load due to presence.
|
|
@ -1 +0,0 @@
|
|||
Add the ability to use `G` (GiB) and `T` (TiB) suffixes in configuration options that refer to numbers of bytes.
|
|
@ -1 +0,0 @@
|
|||
Add span information to requests sent to appservices. Contributed by MTRNord.
|
|
@ -1 +0,0 @@
|
|||
Fix type checking when using the new version of Twisted.
|
|
@ -1 +0,0 @@
|
|||
Delete device messages asynchronously and in staged batches using the task scheduler.
|
|
@ -1 +0,0 @@
|
|||
Bump minimum supported Rust version to 1.61.0.
|
|
@ -1 +0,0 @@
|
|||
Fix a long-standing bug where appservices using MSC2409 to receive to_device messages, would only get messages for one user.
|
|
@ -1 +0,0 @@
|
|||
Fix bug when using workers where Synapse could end up re-requesting the same remote device repeatedly.
|
|
@ -1 +0,0 @@
|
|||
Fix long-standing bug where we kept re-requesting a remote server's key repeatedly, potentially causing delays in receiving events over federation.
|
|
@ -1 +0,0 @@
|
|||
Update rust to version 1.71.1 in the nix development environment.
|
|
@ -1 +0,0 @@
|
|||
Simplify server key storage.
|
|
@ -1 +0,0 @@
|
|||
Add the ability to enable/disable registrations when in the CAS flow. Contributed by Aurélien Grimpard.
|
|
@ -1 +0,0 @@
|
|||
Add GCC and GNU Make to the Nix flake development environment so that `ruff` can be compiled.
|
|
@ -1 +0,0 @@
|
|||
Reduce CPU overhead of change password endpoint.
|
|
@ -1 +0,0 @@
|
|||
Allow `/notifications` endpoint to be routed to workers.
|
|
@ -1 +0,0 @@
|
|||
Avoid temporary storage of sensitive information.
|
|
@ -1 +0,0 @@
|
|||
Stop purging from tables slated for removal.
|
|
@ -1 +0,0 @@
|
|||
Enable users to easily unsubscribe to notifications emails via the `List-Unsubscribe` header.
|
|
@ -1 +0,0 @@
|
|||
Improve type hints.
|
|
@ -1 +0,0 @@
|
|||
Raise setuptools_rust version cap to 1.7.0.
|
|
@ -1 +0,0 @@
|
|||
Fix using the new task scheduler causing lots of CPU to be used.
|
|
@ -1 +0,0 @@
|
|||
Upgrade CI run of Python 3.12 from rc1 to rc2.
|
|
@ -1 +0,0 @@
|
|||
Include values in SQL debug when using `execute_values` with Postgres.
|
|
@ -1 +0,0 @@
|
|||
Fix typos in the documentation.
|
|
@ -1 +0,0 @@
|
|||
Enable additional linting checks.
|
|
@ -1 +0,0 @@
|
|||
Fix bug introduced in Synapse 1.49.0 when using dehydrated devices ([MSC2697](https://github.com/matrix-org/matrix-spec-proposals/pull/2697)) and refresh tokens. Contributed by Hanadi.
|
|
@ -1 +0,0 @@
|
|||
Don't try refetching device lists for users on remote hosts that are marked as "down".
|
|
@ -1 +0,0 @@
|
|||
Bump mypy from 1.4.1 to 1.5.1.
|
|
@ -1 +0,0 @@
|
|||
Improve type hints.
|
|
@ -1 +0,0 @@
|
|||
Link to the Alpine Linux community package for Synapse.
|
|
@ -1 +0,0 @@
|
|||
Small improvements to logging in replication code.
|
|
@ -1 +0,0 @@
|
|||
Delete device messages asynchronously and in staged batches using the task scheduler.
|
|
@ -1 +0,0 @@
|
|||
Delete device messages asynchronously and in staged batches using the task scheduler.
|
|
@ -1 +0,0 @@
|
|||
Delete device messages asynchronously and in staged batches using the task scheduler.
|
|
@ -1 +0,0 @@
|
|||
Remove a reference cycle for in background processes.
|
|
@ -1 +0,0 @@
|
|||
Refactor `get_user_by_id`.
|
|
@ -1 +0,0 @@
|
|||
Speed up task to delete to-device messages.
|
18
debian/changelog
vendored
18
debian/changelog
vendored
|
@ -1,3 +1,21 @@
|
|||
matrix-synapse-py3 (1.93.0~rc1) stable; urgency=medium
|
||||
|
||||
* New synapse release 1.93.0rc1.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 19 Sep 2023 11:55:00 +0000
|
||||
|
||||
matrix-synapse-py3 (1.92.3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Mon, 18 Sep 2023 15:05:04 +0200
|
||||
|
||||
matrix-synapse-py3 (1.92.2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.2.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Fri, 15 Sep 2023 13:17:41 +0100
|
||||
|
||||
matrix-synapse-py3 (1.92.1) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.92.1.
|
||||
|
|
|
@ -25,9 +25,9 @@ ARG PYTHON_VERSION=3.11
|
|||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as requirements
|
||||
# We hardcode the use of Debian bookworm here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bookworm.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as requirements
|
||||
|
||||
# RUN --mount is specific to buildkit and is documented at
|
||||
# https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount.
|
||||
|
@ -87,7 +87,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \
|
|||
###
|
||||
### Stage 1: builder
|
||||
###
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye as builder
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm as builder
|
||||
|
||||
# install the OS build deps
|
||||
RUN \
|
||||
|
@ -158,7 +158,7 @@ RUN --mount=type=cache,target=/synapse/target,sharing=locked \
|
|||
### Stage 2: runtime
|
||||
###
|
||||
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
|
||||
|
||||
LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse'
|
||||
LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md'
|
||||
|
@ -173,10 +173,10 @@ RUN \
|
|||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
libwebp7 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
libicu67 \
|
||||
libicu72 \
|
||||
libssl-dev \
|
||||
openssl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
|
|
@ -7,7 +7,7 @@ ARG FROM=matrixdotorg/synapse:$SYNAPSE_VERSION
|
|||
# target image. For repeated rebuilds, this is much faster than apt installing
|
||||
# each time.
|
||||
|
||||
FROM docker.io/library/debian:bullseye-slim AS deps_base
|
||||
FROM docker.io/library/debian:bookworm-slim AS deps_base
|
||||
RUN \
|
||||
--mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
--mount=type=cache,target=/var/lib/apt,sharing=locked \
|
||||
|
@ -21,7 +21,7 @@ FROM docker.io/library/debian:bullseye-slim AS deps_base
|
|||
# which makes it much easier to copy (but we need to make sure we use an image
|
||||
# based on the same debian version as the synapse image, to make sure we get
|
||||
# the expected version of libc.
|
||||
FROM docker.io/library/redis:7-bullseye AS redis_base
|
||||
FROM docker.io/library/redis:7-bookworm AS redis_base
|
||||
|
||||
# now build the final image, based on the the regular Synapse docker image
|
||||
FROM $FROM
|
||||
|
|
|
@ -20,8 +20,8 @@ FROM $FROM
|
|||
# the same debian version as Synapse's docker image (so the versions of the
|
||||
# shared libraries match).
|
||||
RUN adduser --system --uid 999 postgres --home /var/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bookworm /usr/lib/postgresql /usr/lib/postgresql
|
||||
COPY --from=docker.io/library/postgres:13-bookworm /usr/share/postgresql /usr/share/postgresql
|
||||
RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql
|
||||
ENV PATH="${PATH}:/usr/lib/postgresql/13/bin"
|
||||
ENV PGDATA=/var/lib/postgresql/data
|
||||
|
|
|
@ -8,9 +8,9 @@ ARG PYTHON_VERSION=3.9
|
|||
###
|
||||
### Stage 0: generate requirements.txt
|
||||
###
|
||||
# We hardcode the use of Debian bullseye here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bullseye.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bullseye
|
||||
# We hardcode the use of Debian bookworm here because this could change upstream
|
||||
# and other Dockerfiles used for testing are expecting bookworm.
|
||||
FROM docker.io/library/python:${PYTHON_VERSION}-slim-bookworm
|
||||
|
||||
# Install Rust and other dependencies (stolen from normal Dockerfile)
|
||||
# install the OS build deps
|
||||
|
@ -33,7 +33,7 @@ RUN \
|
|||
gosu \
|
||||
libjpeg62-turbo \
|
||||
libpq5 \
|
||||
libwebp6 \
|
||||
libwebp7 \
|
||||
xmlsec1 \
|
||||
libjemalloc2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
|
|
@ -54,7 +54,8 @@ It returns a JSON body like the following:
|
|||
"external_id": "<user_id_provider_2>"
|
||||
}
|
||||
],
|
||||
"user_type": null
|
||||
"user_type": null,
|
||||
"locked": false
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -103,7 +104,8 @@ with a body of:
|
|||
],
|
||||
"admin": false,
|
||||
"deactivated": false,
|
||||
"user_type": null
|
||||
"user_type": null,
|
||||
"locked": false
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -184,7 +186,8 @@ A response body like the following is returned:
|
|||
"shadow_banned": 0,
|
||||
"displayname": "<User One>",
|
||||
"avatar_url": null,
|
||||
"creation_ts": 1560432668000
|
||||
"creation_ts": 1560432668000,
|
||||
"locked": false
|
||||
}, {
|
||||
"name": "<user_id2>",
|
||||
"is_guest": 0,
|
||||
|
@ -195,7 +198,8 @@ A response body like the following is returned:
|
|||
"shadow_banned": 0,
|
||||
"displayname": "<User Two>",
|
||||
"avatar_url": "<avatar_url>",
|
||||
"creation_ts": 1561550621000
|
||||
"creation_ts": 1561550621000,
|
||||
"locked": false
|
||||
}
|
||||
],
|
||||
"next_token": "100",
|
||||
|
@ -249,6 +253,8 @@ The following parameters should be set in the URL:
|
|||
- `not_user_type` - Exclude certain user types, such as bot users, from the request.
|
||||
Can be provided multiple times. Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
- `locked` - string representing a bool - Is optional and if `true` will **include** locked users.
|
||||
Defaults to `false` to exclude locked users. Note: Introduced in v1.93.
|
||||
|
||||
Caution. The database only has indexes on the columns `name` and `creation_ts`.
|
||||
This means that if a different sort order is used (`is_guest`, `admin`,
|
||||
|
@ -274,10 +280,11 @@ The following fields are returned in the JSON response body:
|
|||
- `avatar_url` - string - The user's avatar URL if they have set one.
|
||||
- `creation_ts` - integer - The user's creation timestamp in ms.
|
||||
- `last_seen_ts` - integer - The user's last activity timestamp in ms.
|
||||
|
||||
- `locked` - bool - Status if that user has been marked as locked. Note: Introduced in v1.93.
|
||||
- `next_token`: string representing a positive integer - Indication for pagination. See above.
|
||||
- `total` - integer - Total number of media.
|
||||
|
||||
*Added in Synapse 1.93:* the `locked` query parameter and response field.
|
||||
|
||||
## Query current sessions for a user
|
||||
|
||||
|
|
|
@ -936,6 +936,17 @@ Example configuration:
|
|||
redaction_retention_period: 28d
|
||||
```
|
||||
---
|
||||
### `forgotten_room_retention_period`
|
||||
|
||||
How long to keep locally forgotten rooms before purging them from the DB.
|
||||
|
||||
Defaults to `null`, meaning it's disabled.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
forgotten_room_retention_period: 28d
|
||||
```
|
||||
---
|
||||
### `user_ips_max_age`
|
||||
|
||||
How long to track users' last seen time and IPs in the database.
|
||||
|
@ -1122,14 +1133,14 @@ federation_verify_certificates: false
|
|||
|
||||
The minimum TLS version that will be used for outbound federation requests.
|
||||
|
||||
Defaults to `1`. Configurable to `1`, `1.1`, `1.2`, or `1.3`. Note
|
||||
that setting this value higher than `1.2` will prevent federation to most
|
||||
of the public Matrix network: only configure it to `1.3` if you have an
|
||||
Defaults to `"1"`. Configurable to `"1"`, `"1.1"`, `"1.2"`, or `"1.3"`. Note
|
||||
that setting this value higher than `"1.2"` will prevent federation to most
|
||||
of the public Matrix network: only configure it to `"1.3"` if you have an
|
||||
entirely private federation setup and you can ensure TLS 1.3 support.
|
||||
|
||||
Example configuration:
|
||||
```yaml
|
||||
federation_client_minimum_tls_version: 1.2
|
||||
federation_client_minimum_tls_version: "1.2"
|
||||
```
|
||||
---
|
||||
### `federation_certificate_verification_whitelist`
|
||||
|
|
176
poetry.lock
generated
176
poetry.lock
generated
|
@ -555,13 +555,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler",
|
|||
|
||||
[[package]]
|
||||
name = "furo"
|
||||
version = "2023.8.19"
|
||||
version = "2023.9.10"
|
||||
description = "A clean customisable Sphinx documentation theme."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "furo-2023.8.19-py3-none-any.whl", hash = "sha256:12f99f87a1873b6746228cfde18f77244e6c1ffb85d7fed95e638aae70d80590"},
|
||||
{file = "furo-2023.8.19.tar.gz", hash = "sha256:e671ee638ab3f1b472f4033b0167f502ab407830e0db0f843b1c1028119c9cd1"},
|
||||
{file = "furo-2023.9.10-py3-none-any.whl", hash = "sha256:513092538537dc5c596691da06e3c370714ec99bc438680edc1debffb73e5bfc"},
|
||||
{file = "furo-2023.9.10.tar.gz", hash = "sha256:5707530a476d2a63b8cad83b4f961f3739a69f4b058bcf38a03a39fa537195b2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -1618,67 +1618,65 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "pillow"
|
||||
version = "10.0.0"
|
||||
version = "10.0.1"
|
||||
description = "Python Imaging Library (Fork)"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "Pillow-10.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f62406a884ae75fb2f818694469519fb685cc7eaff05d3451a9ebe55c646891"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d5db32e2a6ccbb3d34d87c87b432959e0db29755727afb37290e10f6e8e62614"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edf4392b77bdc81f36e92d3a07a5cd072f90253197f4a52a55a8cec48a12483b"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:520f2a520dc040512699f20fa1c363eed506e94248d71f85412b625026f6142c"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8c11160913e3dd06c8ffdb5f233a4f254cb449f4dfc0f8f4549eda9e542c93d1"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a74ba0c356aaa3bb8e3eb79606a87669e7ec6444be352870623025d75a14a2bf"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5d0dae4cfd56969d23d94dc8e89fb6a217be461c69090768227beb8ed28c0a3"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22c10cc517668d44b211717fd9775799ccec4124b9a7f7b3635fc5386e584992"},
|
||||
{file = "Pillow-10.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:dffe31a7f47b603318c609f378ebcd57f1554a3a6a8effbc59c3c69f804296de"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:9fb218c8a12e51d7ead2a7c9e101a04982237d4855716af2e9499306728fb485"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d35e3c8d9b1268cbf5d3670285feb3528f6680420eafe35cccc686b73c1e330f"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ed64f9ca2f0a95411e88a4efbd7a29e5ce2cea36072c53dd9d26d9c76f753b3"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b6eb5502f45a60a3f411c63187db83a3d3107887ad0d036c13ce836f8a36f1d"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:c1fbe7621c167ecaa38ad29643d77a9ce7311583761abf7836e1510c580bf3dd"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cd25d2a9d2b36fcb318882481367956d2cf91329f6892fe5d385c346c0649629"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3b08d4cc24f471b2c8ca24ec060abf4bebc6b144cb89cba638c720546b1cf538"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d737a602fbd82afd892ca746392401b634e278cb65d55c4b7a8f48e9ef8d008d"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:3a82c40d706d9aa9734289740ce26460a11aeec2d9c79b7af87bb35f0073c12f"},
|
||||
{file = "Pillow-10.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:bc2ec7c7b5d66b8ec9ce9f720dbb5fa4bace0f545acd34870eff4a369b44bf37"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:d80cf684b541685fccdd84c485b31ce73fc5c9b5d7523bf1394ce134a60c6883"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76de421f9c326da8f43d690110f0e79fe3ad1e54be811545d7d91898b4c8493e"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81ff539a12457809666fef6624684c008e00ff6bf455b4b89fd00a140eecd640"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce543ed15570eedbb85df19b0a1a7314a9c8141a36ce089c0a894adbfccb4568"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:685ac03cc4ed5ebc15ad5c23bc555d68a87777586d970c2c3e216619a5476223"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:d72e2ecc68a942e8cf9739619b7f408cc7b272b279b56b2c83c6123fcfa5cdff"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d50b6aec14bc737742ca96e85d6d0a5f9bfbded018264b3b70ff9d8c33485551"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:00e65f5e822decd501e374b0650146063fbb30a7264b4d2744bdd7b913e0cab5"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:f31f9fdbfecb042d046f9d91270a0ba28368a723302786c0009ee9b9f1f60199"},
|
||||
{file = "Pillow-10.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:1ce91b6ec08d866b14413d3f0bbdea7e24dfdc8e59f562bb77bc3fe60b6144ca"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:349930d6e9c685c089284b013478d6f76e3a534e36ddfa912cde493f235372f3"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3a684105f7c32488f7153905a4e3015a3b6c7182e106fe3c37fbb5ef3e6994c3"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4f69b3700201b80bb82c3a97d5e9254084f6dd5fb5b16fc1a7b974260f89f43"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3f07ea8d2f827d7d2a49ecf1639ec02d75ffd1b88dcc5b3a61bbb37a8759ad8d"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:040586f7d37b34547153fa383f7f9aed68b738992380ac911447bb78f2abe530"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f88a0b92277de8e3ca715a0d79d68dc82807457dae3ab8699c758f07c20b3c51"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c7cf14a27b0d6adfaebb3ae4153f1e516df54e47e42dcc073d7b3d76111a8d86"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3400aae60685b06bb96f99a21e1ada7bc7a413d5f49bce739828ecd9391bb8f7"},
|
||||
{file = "Pillow-10.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:dbc02381779d412145331789b40cc7b11fdf449e5d94f6bc0b080db0a56ea3f0"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9211e7ad69d7c9401cfc0e23d49b69ca65ddd898976d660a2fa5904e3d7a9baa"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:faaf07ea35355b01a35cb442dd950d8f1bb5b040a7787791a535de13db15ed90"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9f72a021fbb792ce98306ffb0c348b3c9cb967dce0f12a49aa4c3d3fdefa967"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f7c16705f44e0504a3a2a14197c1f0b32a95731d251777dcb060aa83022cb2d"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:76edb0a1fa2b4745fb0c99fb9fb98f8b180a1bbceb8be49b087e0b21867e77d3"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:368ab3dfb5f49e312231b6f27b8820c823652b7cd29cfbd34090565a015e99ba"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:608bfdee0d57cf297d32bcbb3c728dc1da0907519d1784962c5f0c68bb93e5a3"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5c6e3df6bdd396749bafd45314871b3d0af81ff935b2d188385e970052091017"},
|
||||
{file = "Pillow-10.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:7be600823e4c8631b74e4a0d38384c73f680e6105a7d3c6824fcf226c178c7e6"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:92be919bbc9f7d09f7ae343c38f5bb21c973d2576c1d45600fce4b74bafa7ac0"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8182b523b2289f7c415f589118228d30ac8c355baa2f3194ced084dac2dbba"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:38250a349b6b390ee6047a62c086d3817ac69022c127f8a5dc058c31ccef17f3"},
|
||||
{file = "Pillow-10.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:88af2003543cc40c80f6fca01411892ec52b11021b3dc22ec3bc9d5afd1c5334"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:c189af0545965fa8d3b9613cfdb0cd37f9d71349e0f7750e1fd704648d475ed2"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce7b031a6fc11365970e6a5686d7ba8c63e4c1cf1ea143811acbb524295eabed"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db24668940f82321e746773a4bc617bfac06ec831e5c88b643f91f122a785684"},
|
||||
{file = "Pillow-10.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:efe8c0681042536e0d06c11f48cebe759707c9e9abf880ee213541c5b46c5bf3"},
|
||||
{file = "Pillow-10.0.0.tar.gz", hash = "sha256:9c82b5b3e043c7af0d95792d0d20ccf68f61a1fec6b3530e718b688422727396"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:8f06be50669087250f319b706decf69ca71fdecd829091a37cc89398ca4dc17a"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:50bd5f1ebafe9362ad622072a1d2f5850ecfa44303531ff14353a4059113b12d"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6a90167bcca1216606223a05e2cf991bb25b14695c518bc65639463d7db722d"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f11c9102c56ffb9ca87134bd025a43d2aba3f1155f508eff88f694b33a9c6d19"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:186f7e04248103482ea6354af6d5bcedb62941ee08f7f788a1c7707bc720c66f"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:0462b1496505a3462d0f35dc1c4d7b54069747d65d00ef48e736acda2c8cbdff"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d889b53ae2f030f756e61a7bff13684dcd77e9af8b10c6048fb2c559d6ed6eaf"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:552912dbca585b74d75279a7570dd29fa43b6d93594abb494ebb31ac19ace6bd"},
|
||||
{file = "Pillow-10.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:787bb0169d2385a798888e1122c980c6eff26bf941a8ea79747d35d8f9210ca0"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fd2a5403a75b54661182b75ec6132437a181209b901446ee5724b589af8edef1"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2d7e91b4379f7a76b31c2dda84ab9e20c6220488e50f7822e59dac36b0cd92b1"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19e9adb3f22d4c416e7cd79b01375b17159d6990003633ff1d8377e21b7f1b21"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93139acd8109edcdeffd85e3af8ae7d88b258b3a1e13a038f542b79b6d255c54"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:92a23b0431941a33242b1f0ce6c88a952e09feeea9af4e8be48236a68ffe2205"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cbe68deb8580462ca0d9eb56a81912f59eb4542e1ef8f987405e35a0179f4ea2"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:522ff4ac3aaf839242c6f4e5b406634bfea002469656ae8358644fc6c4856a3b"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:84efb46e8d881bb06b35d1d541aa87f574b58e87f781cbba8d200daa835b42e1"},
|
||||
{file = "Pillow-10.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:898f1d306298ff40dc1b9ca24824f0488f6f039bc0e25cfb549d3195ffa17088"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:bcf1207e2f2385a576832af02702de104be71301c2696d0012b1b93fe34aaa5b"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d6c9049c6274c1bb565021367431ad04481ebb54872edecfcd6088d27edd6ed"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28444cb6ad49726127d6b340217f0627abc8732f1194fd5352dec5e6a0105635"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de596695a75496deb3b499c8c4f8e60376e0516e1a774e7bc046f0f48cd620ad"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:2872f2d7846cf39b3dbff64bc1104cc48c76145854256451d33c5faa55c04d1a"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4ce90f8a24e1c15465048959f1e94309dfef93af272633e8f37361b824532e91"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ee7810cf7c83fa227ba9125de6084e5e8b08c59038a7b2c9045ef4dde61663b4"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b1be1c872b9b5fcc229adeadbeb51422a9633abd847c0ff87dc4ef9bb184ae08"},
|
||||
{file = "Pillow-10.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:98533fd7fa764e5f85eebe56c8e4094db912ccbe6fbf3a58778d543cadd0db08"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:764d2c0daf9c4d40ad12fbc0abd5da3af7f8aa11daf87e4fa1b834000f4b6b0a"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fcb59711009b0168d6ee0bd8fb5eb259c4ab1717b2f538bbf36bacf207ef7a68"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:697a06bdcedd473b35e50a7e7506b1d8ceb832dc238a336bd6f4f5aa91a4b500"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f665d1e6474af9f9da5e86c2a3a2d2d6204e04d5af9c06b9d42afa6ebde3f21"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2fa6dd2661838c66f1a5473f3b49ab610c98a128fc08afbe81b91a1f0bf8c51d"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:3a04359f308ebee571a3127fdb1bd01f88ba6f6fb6d087f8dd2e0d9bff43f2a7"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:723bd25051454cea9990203405fa6b74e043ea76d4968166dfd2569b0210886a"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:71671503e3015da1b50bd18951e2f9daf5b6ffe36d16f1eb2c45711a301521a7"},
|
||||
{file = "Pillow-10.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:44e7e4587392953e5e251190a964675f61e4dae88d1e6edbe9f36d6243547ff3"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:3855447d98cced8670aaa63683808df905e956f00348732448b5a6df67ee5849"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ed2d9c0704f2dc4fa980b99d565c0c9a543fe5101c25b3d60488b8ba80f0cce1"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5bb289bb835f9fe1a1e9300d011eef4d69661bb9b34d5e196e5e82c4cb09b37"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a0d3e54ab1df9df51b914b2233cf779a5a10dfd1ce339d0421748232cea9876"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2cc6b86ece42a11f16f55fe8903595eff2b25e0358dec635d0a701ac9586588f"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ca26ba5767888c84bf5a0c1a32f069e8204ce8c21d00a49c90dabeba00ce0145"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f0b4b06da13275bc02adfeb82643c4a6385bd08d26f03068c2796f60d125f6f2"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bc2e3069569ea9dbe88d6b8ea38f439a6aad8f6e7a6283a38edf61ddefb3a9bf"},
|
||||
{file = "Pillow-10.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:8b451d6ead6e3500b6ce5c7916a43d8d8d25ad74b9102a629baccc0808c54971"},
|
||||
{file = "Pillow-10.0.1-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:32bec7423cdf25c9038fef614a853c9d25c07590e1a870ed471f47fb80b244db"},
|
||||
{file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b7cf63d2c6928b51d35dfdbda6f2c1fddbe51a6bc4a9d4ee6ea0e11670dd981e"},
|
||||
{file = "Pillow-10.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f6d3d4c905e26354e8f9d82548475c46d8e0889538cb0657aa9c6f0872a37aa4"},
|
||||
{file = "Pillow-10.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:847e8d1017c741c735d3cd1883fa7b03ded4f825a6e5fcb9378fd813edee995f"},
|
||||
{file = "Pillow-10.0.1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:7f771e7219ff04b79e231d099c0a28ed83aa82af91fd5fa9fdb28f5b8d5addaf"},
|
||||
{file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459307cacdd4138edee3875bbe22a2492519e060660eaf378ba3b405d1c66317"},
|
||||
{file = "Pillow-10.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b059ac2c4c7a97daafa7dc850b43b2d3667def858a4f112d1aa082e5c3d6cf7d"},
|
||||
{file = "Pillow-10.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6caf3cd38449ec3cd8a68b375e0c6fe4b6fd04edb6c9766b55ef84a6e8ddf2d"},
|
||||
{file = "Pillow-10.0.1.tar.gz", hash = "sha256:d72967b06be9300fed5cfbc8b5bafceec48bf7cdc7dab66b1d2549035287191d"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
|
@ -2077,6 +2075,7 @@ files = [
|
|||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
|
||||
{file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
|
||||
|
@ -2084,8 +2083,15 @@ files = [
|
|||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
|
||||
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
|
||||
{file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
|
||||
{file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
|
||||
|
@ -2102,6 +2108,7 @@ files = [
|
|||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
|
||||
{file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
|
||||
|
@ -2109,6 +2116,7 @@ files = [
|
|||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
|
||||
{file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
|
||||
{file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
|
||||
|
@ -2324,28 +2332,28 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.0.286"
|
||||
version = "0.0.290"
|
||||
description = "An extremely fast Python linter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.0.286-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:8e22cb557e7395893490e7f9cfea1073d19a5b1dd337f44fd81359b2767da4e9"},
|
||||
{file = "ruff-0.0.286-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:68ed8c99c883ae79a9133cb1a86d7130feee0397fdf5ba385abf2d53e178d3fa"},
|
||||
{file = "ruff-0.0.286-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8301f0bb4ec1a5b29cfaf15b83565136c47abefb771603241af9d6038f8981e8"},
|
||||
{file = "ruff-0.0.286-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acc4598f810bbc465ce0ed84417ac687e392c993a84c7eaf3abf97638701c1ec"},
|
||||
{file = "ruff-0.0.286-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88c8e358b445eb66d47164fa38541cfcc267847d1e7a92dd186dddb1a0a9a17f"},
|
||||
{file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0433683d0c5dbcf6162a4beb2356e820a593243f1fa714072fec15e2e4f4c939"},
|
||||
{file = "ruff-0.0.286-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddb61a0c4454cbe4623f4a07fef03c5ae921fe04fede8d15c6e36703c0a73b07"},
|
||||
{file = "ruff-0.0.286-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:47549c7c0be24c8ae9f2bce6f1c49fbafea83bca80142d118306f08ec7414041"},
|
||||
{file = "ruff-0.0.286-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:559aa793149ac23dc4310f94f2c83209eedb16908a0343663be19bec42233d25"},
|
||||
{file = "ruff-0.0.286-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d73cfb1c3352e7aa0ce6fb2321f36fa1d4a2c48d2ceac694cb03611ddf0e4db6"},
|
||||
{file = "ruff-0.0.286-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3dad93b1f973c6d1db4b6a5da8690c5625a3fa32bdf38e543a6936e634b83dc3"},
|
||||
{file = "ruff-0.0.286-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26afc0851f4fc3738afcf30f5f8b8612a31ac3455cb76e611deea80f5c0bf3ce"},
|
||||
{file = "ruff-0.0.286-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:9b6b116d1c4000de1b9bf027131dbc3b8a70507788f794c6b09509d28952c512"},
|
||||
{file = "ruff-0.0.286-py3-none-win32.whl", hash = "sha256:556e965ac07c1e8c1c2d759ac512e526ecff62c00fde1a046acb088d3cbc1a6c"},
|
||||
{file = "ruff-0.0.286-py3-none-win_amd64.whl", hash = "sha256:5d295c758961376c84aaa92d16e643d110be32add7465e197bfdaec5a431a107"},
|
||||
{file = "ruff-0.0.286-py3-none-win_arm64.whl", hash = "sha256:1d6142d53ab7f164204b3133d053c4958d4d11ec3a39abf23a40b13b0784e3f0"},
|
||||
{file = "ruff-0.0.286.tar.gz", hash = "sha256:f1e9d169cce81a384a26ee5bb8c919fe9ae88255f39a1a69fd1ebab233a85ed2"},
|
||||
{file = "ruff-0.0.290-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:0e2b09ac4213b11a3520221083866a5816616f3ae9da123037b8ab275066fbac"},
|
||||
{file = "ruff-0.0.290-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:4ca6285aa77b3d966be32c9a3cd531655b3d4a0171e1f9bf26d66d0372186767"},
|
||||
{file = "ruff-0.0.290-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35e3550d1d9f2157b0fcc77670f7bb59154f223bff281766e61bdd1dd854e0c5"},
|
||||
{file = "ruff-0.0.290-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d748c8bd97874f5751aed73e8dde379ce32d16338123d07c18b25c9a2796574a"},
|
||||
{file = "ruff-0.0.290-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:982af5ec67cecd099e2ef5e238650407fb40d56304910102d054c109f390bf3c"},
|
||||
{file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bbd37352cea4ee007c48a44c9bc45a21f7ba70a57edfe46842e346651e2b995a"},
|
||||
{file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d9be6351b7889462912e0b8185a260c0219c35dfd920fb490c7f256f1d8313e"},
|
||||
{file = "ruff-0.0.290-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75cdc7fe32dcf33b7cec306707552dda54632ac29402775b9e212a3c16aad5e6"},
|
||||
{file = "ruff-0.0.290-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb07f37f7aecdbbc91d759c0c09870ce0fb3eed4025eebedf9c4b98c69abd527"},
|
||||
{file = "ruff-0.0.290-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2ab41bc0ba359d3f715fc7b705bdeef19c0461351306b70a4e247f836b9350ed"},
|
||||
{file = "ruff-0.0.290-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:150bf8050214cea5b990945b66433bf9a5e0cef395c9bc0f50569e7de7540c86"},
|
||||
{file = "ruff-0.0.290-py3-none-musllinux_1_2_i686.whl", hash = "sha256:75386ebc15fe5467248c039f5bf6a0cfe7bfc619ffbb8cd62406cd8811815fca"},
|
||||
{file = "ruff-0.0.290-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ac93eadf07bc4ab4c48d8bb4e427bf0f58f3a9c578862eb85d99d704669f5da0"},
|
||||
{file = "ruff-0.0.290-py3-none-win32.whl", hash = "sha256:461fbd1fb9ca806d4e3d5c745a30e185f7cf3ca77293cdc17abb2f2a990ad3f7"},
|
||||
{file = "ruff-0.0.290-py3-none-win_amd64.whl", hash = "sha256:f1f49f5ec967fd5778813780b12a5650ab0ebcb9ddcca28d642c689b36920796"},
|
||||
{file = "ruff-0.0.290-py3-none-win_arm64.whl", hash = "sha256:ae5a92dfbdf1f0c689433c223f8dac0782c2b2584bd502dfdbc76475669f1ba1"},
|
||||
{file = "ruff-0.0.290.tar.gz", hash = "sha256:949fecbc5467bb11b8db810a7fa53c7e02633856ee6bd1302b2f43adcd71b88d"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -3070,13 +3078,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.7.1"
|
||||
description = "Backported and Experimental Type Hints for Python 3.7+"
|
||||
version = "4.8.0"
|
||||
description = "Backported and Experimental Type Hints for Python 3.8+"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"},
|
||||
{file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"},
|
||||
{file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
|
||||
{file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -3339,4 +3347,4 @@ user-search = ["pyicu"]
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.8.0"
|
||||
content-hash = "4a3a82becd89b91e76e2bc2f8ba72123f665c517d9b841d9a34cd01b83a1adc3"
|
||||
content-hash = "104f108b3c966be05e17cf9975b4061942b354fe9a57cbf7372371fd56b1bf24"
|
||||
|
|
|
@ -95,7 +95,7 @@ manifest-path = "rust/Cargo.toml"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.92.1"
|
||||
version = "1.93.0rc1"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "Apache-2.0"
|
||||
|
@ -180,7 +180,9 @@ PyYAML = ">=3.13"
|
|||
pyasn1 = ">=0.1.9"
|
||||
pyasn1-modules = ">=0.0.7"
|
||||
bcrypt = ">=3.1.7"
|
||||
Pillow = ">=5.4.0"
|
||||
# 10.0.1 minimum is mandatory here because of libwebp CVE-2023-4863.
|
||||
# Packagers that already took care of libwebp can lower that down to 5.4.0.
|
||||
Pillow = ">=10.0.1"
|
||||
# We use SortedDict.peekitem(), which was added in sortedcontainers 1.5.2.
|
||||
sortedcontainers = ">=1.5.2"
|
||||
pymacaroons = ">=0.13.0"
|
||||
|
@ -318,7 +320,7 @@ all = [
|
|||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.0.286"
|
||||
ruff = "0.0.290"
|
||||
|
||||
# Typechecking
|
||||
lxml-stubs = ">=0.4.0"
|
||||
|
|
|
@ -17,7 +17,7 @@ import logging
|
|||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
from typing import List, Mapping, Optional
|
||||
from typing import List, Mapping, Optional, Sequence
|
||||
|
||||
from twisted.internet import defer, task
|
||||
|
||||
|
@ -57,7 +57,7 @@ from synapse.storage.databases.main.state import StateGroupWorkerStore
|
|||
from synapse.storage.databases.main.stream import StreamWorkerStore
|
||||
from synapse.storage.databases.main.tags import TagsWorkerStore
|
||||
from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
|
||||
from synapse.types import JsonDict, StateMap
|
||||
from synapse.types import JsonMapping, StateMap
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.logcontext import LoggingContext
|
||||
|
||||
|
@ -198,7 +198,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
|
|||
for event in state.values():
|
||||
json.dump(event, fp=f)
|
||||
|
||||
def write_profile(self, profile: JsonDict) -> None:
|
||||
def write_profile(self, profile: JsonMapping) -> None:
|
||||
user_directory = os.path.join(self.base_directory, "user_data")
|
||||
os.makedirs(user_directory, exist_ok=True)
|
||||
profile_file = os.path.join(user_directory, "profile")
|
||||
|
@ -206,7 +206,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
|
|||
with open(profile_file, "a") as f:
|
||||
json.dump(profile, fp=f)
|
||||
|
||||
def write_devices(self, devices: List[JsonDict]) -> None:
|
||||
def write_devices(self, devices: Sequence[JsonMapping]) -> None:
|
||||
user_directory = os.path.join(self.base_directory, "user_data")
|
||||
os.makedirs(user_directory, exist_ok=True)
|
||||
device_file = os.path.join(user_directory, "devices")
|
||||
|
@ -215,7 +215,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
|
|||
with open(device_file, "a") as f:
|
||||
json.dump(device, fp=f)
|
||||
|
||||
def write_connections(self, connections: List[JsonDict]) -> None:
|
||||
def write_connections(self, connections: Sequence[JsonMapping]) -> None:
|
||||
user_directory = os.path.join(self.base_directory, "user_data")
|
||||
os.makedirs(user_directory, exist_ok=True)
|
||||
connection_file = os.path.join(user_directory, "connections")
|
||||
|
@ -225,7 +225,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
|
|||
json.dump(connection, fp=f)
|
||||
|
||||
def write_account_data(
|
||||
self, file_name: str, account_data: Mapping[str, JsonDict]
|
||||
self, file_name: str, account_data: Mapping[str, JsonMapping]
|
||||
) -> None:
|
||||
account_data_directory = os.path.join(
|
||||
self.base_directory, "user_data", "account_data"
|
||||
|
@ -237,7 +237,7 @@ class FileExfiltrationWriter(ExfiltrationWriter):
|
|||
with open(account_data_file, "a") as f:
|
||||
json.dump(account_data, fp=f)
|
||||
|
||||
def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None:
|
||||
def write_media_id(self, media_id: str, media_metadata: JsonMapping) -> None:
|
||||
file_directory = os.path.join(self.base_directory, "media_ids")
|
||||
os.makedirs(file_directory, exist_ok=True)
|
||||
media_id_file = os.path.join(file_directory, media_id)
|
||||
|
|
|
@ -77,6 +77,7 @@ from synapse.storage.databases.main.monthly_active_users import (
|
|||
)
|
||||
from synapse.storage.databases.main.presence import PresenceStore
|
||||
from synapse.storage.databases.main.profile import ProfileWorkerStore
|
||||
from synapse.storage.databases.main.purge_events import PurgeEventsStore
|
||||
from synapse.storage.databases.main.push_rule import PushRulesWorkerStore
|
||||
from synapse.storage.databases.main.pusher import PusherWorkerStore
|
||||
from synapse.storage.databases.main.receipts import ReceiptsWorkerStore
|
||||
|
@ -134,6 +135,7 @@ class GenericWorkerStore(
|
|||
RelationsWorkerStore,
|
||||
EventFederationWorkerStore,
|
||||
EventPushActionsWorkerStore,
|
||||
PurgeEventsStore,
|
||||
StateGroupWorkerStore,
|
||||
SignatureWorkerStore,
|
||||
UserErasureWorkerStore,
|
||||
|
|
|
@ -200,9 +200,7 @@ class _ServiceQueuer:
|
|||
if service.id in self.requests_in_flight:
|
||||
return
|
||||
|
||||
run_as_background_process(
|
||||
"as-sender-%s" % (service.id,), self._send_request, service
|
||||
)
|
||||
run_as_background_process("as-sender", self._send_request, service)
|
||||
|
||||
async def _send_request(self, service: ApplicationService) -> None:
|
||||
# sanity-check: we shouldn't get here if this service already has a sender
|
||||
|
@ -478,14 +476,11 @@ class _Recoverer:
|
|||
self.backoff_counter = 1
|
||||
|
||||
def recover(self) -> None:
|
||||
def _retry() -> None:
|
||||
run_as_background_process(
|
||||
"as-recoverer-%s" % (self.service.id,), self.retry
|
||||
)
|
||||
|
||||
delay = 2**self.backoff_counter
|
||||
logger.info("Scheduling retries on %s in %fs", self.service.id, delay)
|
||||
self.clock.call_later(delay, _retry)
|
||||
self.clock.call_later(
|
||||
delay, run_as_background_process, "as-recoverer", self.retry
|
||||
)
|
||||
|
||||
def _backoff(self) -> None:
|
||||
# cap the backoff to be around 8.5min => (2^9) = 512 secs
|
||||
|
|
|
@ -486,6 +486,17 @@ class ServerConfig(Config):
|
|||
else:
|
||||
self.redaction_retention_period = None
|
||||
|
||||
# How long to keep locally forgotten rooms before purging them from the DB.
|
||||
forgotten_room_retention_period = config.get(
|
||||
"forgotten_room_retention_period", None
|
||||
)
|
||||
if forgotten_room_retention_period is not None:
|
||||
self.forgotten_room_retention_period: Optional[int] = self.parse_duration(
|
||||
forgotten_room_retention_period
|
||||
)
|
||||
else:
|
||||
self.forgotten_room_retention_period = None
|
||||
|
||||
# How long to keep entries in the `users_ips` table.
|
||||
user_ips_max_age = config.get("user_ips_max_age", "28d")
|
||||
if user_ips_max_age is not None:
|
||||
|
|
|
@ -103,7 +103,7 @@ class EventBuilder:
|
|||
|
||||
async def build(
|
||||
self,
|
||||
prev_event_ids: StrCollection,
|
||||
prev_event_ids: List[str],
|
||||
auth_event_ids: Optional[List[str]],
|
||||
depth: Optional[int] = None,
|
||||
) -> EventBase:
|
||||
|
|
|
@ -14,11 +14,11 @@
|
|||
|
||||
import abc
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Set
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Sequence, Set
|
||||
|
||||
from synapse.api.constants import Direction, Membership
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID, UserInfo
|
||||
from synapse.types import JsonMapping, RoomStreamToken, StateMap, UserID, UserInfo
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -35,7 +35,7 @@ class AdminHandler:
|
|||
self._state_storage_controller = self._storage_controllers.state
|
||||
self._msc3866_enabled = hs.config.experimental.msc3866.enabled
|
||||
|
||||
async def get_whois(self, user: UserID) -> JsonDict:
|
||||
async def get_whois(self, user: UserID) -> JsonMapping:
|
||||
connections = []
|
||||
|
||||
sessions = await self._store.get_user_ip_and_agents(user)
|
||||
|
@ -55,7 +55,7 @@ class AdminHandler:
|
|||
|
||||
return ret
|
||||
|
||||
async def get_user(self, user: UserID) -> Optional[JsonDict]:
|
||||
async def get_user(self, user: UserID) -> Optional[JsonMapping]:
|
||||
"""Function to get user details"""
|
||||
user_info: Optional[UserInfo] = await self._store.get_user_by_id(
|
||||
user.to_string()
|
||||
|
@ -344,7 +344,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
|
|||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def write_profile(self, profile: JsonDict) -> None:
|
||||
def write_profile(self, profile: JsonMapping) -> None:
|
||||
"""Write the profile of a user.
|
||||
|
||||
Args:
|
||||
|
@ -353,7 +353,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
|
|||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def write_devices(self, devices: List[JsonDict]) -> None:
|
||||
def write_devices(self, devices: Sequence[JsonMapping]) -> None:
|
||||
"""Write the devices of a user.
|
||||
|
||||
Args:
|
||||
|
@ -362,7 +362,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
|
|||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def write_connections(self, connections: List[JsonDict]) -> None:
|
||||
def write_connections(self, connections: Sequence[JsonMapping]) -> None:
|
||||
"""Write the connections of a user.
|
||||
|
||||
Args:
|
||||
|
@ -372,7 +372,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
|
|||
|
||||
@abc.abstractmethod
|
||||
def write_account_data(
|
||||
self, file_name: str, account_data: Mapping[str, JsonDict]
|
||||
self, file_name: str, account_data: Mapping[str, JsonMapping]
|
||||
) -> None:
|
||||
"""Write the account data of a user.
|
||||
|
||||
|
@ -383,7 +383,7 @@ class ExfiltrationWriter(metaclass=abc.ABCMeta):
|
|||
raise NotImplementedError()
|
||||
|
||||
@abc.abstractmethod
|
||||
def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None:
|
||||
def write_media_id(self, media_id: str, media_metadata: JsonMapping) -> None:
|
||||
"""Write the media's metadata of a user.
|
||||
Exports only the metadata, as this can be fetched from the database via
|
||||
read only. In order to access the files, a connection to the correct
|
||||
|
|
|
@ -723,12 +723,11 @@ class FederationEventHandler:
|
|||
if not prevs - seen:
|
||||
return
|
||||
|
||||
latest_list = await self._store.get_latest_event_ids_in_room(room_id)
|
||||
latest_frozen = await self._store.get_latest_event_ids_in_room(room_id)
|
||||
|
||||
# We add the prev events that we have seen to the latest
|
||||
# list to ensure the remote server doesn't give them to us
|
||||
latest = set(latest_list)
|
||||
latest |= seen
|
||||
latest = seen | latest_frozen
|
||||
|
||||
logger.info(
|
||||
"Requesting missing events between %s and %s",
|
||||
|
@ -1976,8 +1975,7 @@ class FederationEventHandler:
|
|||
# partial and full state and may not be accurate.
|
||||
return
|
||||
|
||||
extrem_ids_list = await self._store.get_latest_event_ids_in_room(event.room_id)
|
||||
extrem_ids = set(extrem_ids_list)
|
||||
extrem_ids = await self._store.get_latest_event_ids_in_room(event.room_id)
|
||||
prev_event_ids = set(event.prev_event_ids())
|
||||
|
||||
if extrem_ids == prev_event_ids:
|
||||
|
|
|
@ -13,9 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Dict, List, Optional, Set
|
||||
|
||||
import attr
|
||||
from typing import TYPE_CHECKING, List, Optional, Set, Tuple, cast
|
||||
|
||||
from twisted.python.failure import Failure
|
||||
|
||||
|
@ -23,16 +21,22 @@ from synapse.api.constants import Direction, EventTypes, Membership
|
|||
from synapse.api.errors import SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events.utils import SerializeEventConfig
|
||||
from synapse.handlers.room import ShutdownRoomResponse
|
||||
from synapse.handlers.room import ShutdownRoomParams, ShutdownRoomResponse
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging.opentracing import trace
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
Requester,
|
||||
ScheduledTask,
|
||||
StreamKeyType,
|
||||
TaskStatus,
|
||||
)
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util.async_helpers import ReadWriteLock
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.visibility import filter_events_for_client
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -53,80 +57,11 @@ BACKFILL_BECAUSE_TOO_MANY_GAPS_THRESHOLD = 3
|
|||
PURGE_PAGINATION_LOCK_NAME = "purge_pagination_lock"
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class PurgeStatus:
|
||||
"""Object tracking the status of a purge request
|
||||
PURGE_HISTORY_ACTION_NAME = "purge_history"
|
||||
|
||||
This class contains information on the progress of a purge request, for
|
||||
return by get_purge_status.
|
||||
"""
|
||||
PURGE_ROOM_ACTION_NAME = "purge_room"
|
||||
|
||||
STATUS_ACTIVE = 0
|
||||
STATUS_COMPLETE = 1
|
||||
STATUS_FAILED = 2
|
||||
|
||||
STATUS_TEXT = {
|
||||
STATUS_ACTIVE: "active",
|
||||
STATUS_COMPLETE: "complete",
|
||||
STATUS_FAILED: "failed",
|
||||
}
|
||||
|
||||
# Save the error message if an error occurs
|
||||
error: str = ""
|
||||
|
||||
# Tracks whether this request has completed. One of STATUS_{ACTIVE,COMPLETE,FAILED}.
|
||||
status: int = STATUS_ACTIVE
|
||||
|
||||
def asdict(self) -> JsonDict:
|
||||
ret = {"status": PurgeStatus.STATUS_TEXT[self.status]}
|
||||
if self.error:
|
||||
ret["error"] = self.error
|
||||
return ret
|
||||
|
||||
|
||||
@attr.s(slots=True, auto_attribs=True)
|
||||
class DeleteStatus:
|
||||
"""Object tracking the status of a delete room request
|
||||
|
||||
This class contains information on the progress of a delete room request, for
|
||||
return by get_delete_status.
|
||||
"""
|
||||
|
||||
STATUS_PURGING = 0
|
||||
STATUS_COMPLETE = 1
|
||||
STATUS_FAILED = 2
|
||||
STATUS_SHUTTING_DOWN = 3
|
||||
|
||||
STATUS_TEXT = {
|
||||
STATUS_PURGING: "purging",
|
||||
STATUS_COMPLETE: "complete",
|
||||
STATUS_FAILED: "failed",
|
||||
STATUS_SHUTTING_DOWN: "shutting_down",
|
||||
}
|
||||
|
||||
# Tracks whether this request has completed.
|
||||
# One of STATUS_{PURGING,COMPLETE,FAILED,SHUTTING_DOWN}.
|
||||
status: int = STATUS_PURGING
|
||||
|
||||
# Save the error message if an error occurs
|
||||
error: str = ""
|
||||
|
||||
# Saves the result of an action to give it back to REST API
|
||||
shutdown_room: ShutdownRoomResponse = {
|
||||
"kicked_users": [],
|
||||
"failed_to_kick_users": [],
|
||||
"local_aliases": [],
|
||||
"new_room_id": None,
|
||||
}
|
||||
|
||||
def asdict(self) -> JsonDict:
|
||||
ret = {
|
||||
"status": DeleteStatus.STATUS_TEXT[self.status],
|
||||
"shutdown_room": self.shutdown_room,
|
||||
}
|
||||
if self.error:
|
||||
ret["error"] = self.error
|
||||
return ret
|
||||
SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME = "shutdown_and_purge_room"
|
||||
|
||||
|
||||
class PaginationHandler:
|
||||
|
@ -136,9 +71,6 @@ class PaginationHandler:
|
|||
paginating during a purge.
|
||||
"""
|
||||
|
||||
# when to remove a completed deletion/purge from the results map
|
||||
CLEAR_PURGE_AFTER_MS = 1000 * 3600 * 24 # 24 hours
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
|
@ -150,17 +82,11 @@ class PaginationHandler:
|
|||
self._room_shutdown_handler = hs.get_room_shutdown_handler()
|
||||
self._relations_handler = hs.get_relations_handler()
|
||||
self._worker_locks = hs.get_worker_locks_handler()
|
||||
self._task_scheduler = hs.get_task_scheduler()
|
||||
|
||||
self.pagination_lock = ReadWriteLock()
|
||||
# IDs of rooms in which there currently an active purge *or delete* operation.
|
||||
self._purges_in_progress_by_room: Set[str] = set()
|
||||
# map from purge id to PurgeStatus
|
||||
self._purges_by_id: Dict[str, PurgeStatus] = {}
|
||||
# map from purge id to DeleteStatus
|
||||
self._delete_by_id: Dict[str, DeleteStatus] = {}
|
||||
# map from room id to delete ids
|
||||
# Dict[`room_id`, List[`delete_id`]]
|
||||
self._delete_by_room: Dict[str, List[str]] = {}
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
|
||||
self._retention_default_max_lifetime = (
|
||||
|
@ -173,6 +99,9 @@ class PaginationHandler:
|
|||
self._retention_allowed_lifetime_max = (
|
||||
hs.config.retention.retention_allowed_lifetime_max
|
||||
)
|
||||
self._forgotten_room_retention_period = (
|
||||
hs.config.server.forgotten_room_retention_period
|
||||
)
|
||||
self._is_master = hs.config.worker.worker_app is None
|
||||
|
||||
if hs.config.retention.retention_enabled and self._is_master:
|
||||
|
@ -189,6 +118,14 @@ class PaginationHandler:
|
|||
job.longest_max_lifetime,
|
||||
)
|
||||
|
||||
self._task_scheduler.register_action(
|
||||
self._purge_history, PURGE_HISTORY_ACTION_NAME
|
||||
)
|
||||
self._task_scheduler.register_action(self._purge_room, PURGE_ROOM_ACTION_NAME)
|
||||
self._task_scheduler.register_action(
|
||||
self._shutdown_and_purge_room, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME
|
||||
)
|
||||
|
||||
async def purge_history_for_rooms_in_range(
|
||||
self, min_ms: Optional[int], max_ms: Optional[int]
|
||||
) -> None:
|
||||
|
@ -224,7 +161,7 @@ class PaginationHandler:
|
|||
include_null = False
|
||||
|
||||
logger.info(
|
||||
"[purge] Running purge job for %s < max_lifetime <= %s (include NULLs = %s)",
|
||||
"[purge] Running retention purge job for %s < max_lifetime <= %s (include NULLs = %s)",
|
||||
min_ms,
|
||||
max_ms,
|
||||
include_null,
|
||||
|
@ -239,10 +176,10 @@ class PaginationHandler:
|
|||
for room_id, retention_policy in rooms.items():
|
||||
logger.info("[purge] Attempting to purge messages in room %s", room_id)
|
||||
|
||||
if room_id in self._purges_in_progress_by_room:
|
||||
if len(await self.get_delete_tasks_by_room(room_id, only_active=True)) > 0:
|
||||
logger.warning(
|
||||
"[purge] not purging room %s as there's an ongoing purge running"
|
||||
" for this room",
|
||||
"[purge] not purging room %s for retention as there's an ongoing purge"
|
||||
" running for this room",
|
||||
room_id,
|
||||
)
|
||||
continue
|
||||
|
@ -295,27 +232,20 @@ class PaginationHandler:
|
|||
(stream, topo, _event_id) = r
|
||||
token = "t%d-%d" % (topo, stream)
|
||||
|
||||
purge_id = random_string(16)
|
||||
|
||||
self._purges_by_id[purge_id] = PurgeStatus()
|
||||
|
||||
logger.info(
|
||||
"Starting purging events in room %s (purge_id %s)" % (room_id, purge_id)
|
||||
)
|
||||
logger.info("Starting purging events in room %s", room_id)
|
||||
|
||||
# We want to purge everything, including local events, and to run the purge in
|
||||
# the background so that it's not blocking any other operation apart from
|
||||
# other purges in the same room.
|
||||
run_as_background_process(
|
||||
"_purge_history",
|
||||
self._purge_history,
|
||||
purge_id,
|
||||
PURGE_HISTORY_ACTION_NAME,
|
||||
self.purge_history,
|
||||
room_id,
|
||||
token,
|
||||
True,
|
||||
)
|
||||
|
||||
def start_purge_history(
|
||||
async def start_purge_history(
|
||||
self, room_id: str, token: str, delete_local_events: bool = False
|
||||
) -> str:
|
||||
"""Start off a history purge on a room.
|
||||
|
@ -329,40 +259,58 @@ class PaginationHandler:
|
|||
Returns:
|
||||
unique ID for this purge transaction.
|
||||
"""
|
||||
if room_id in self._purges_in_progress_by_room:
|
||||
raise SynapseError(
|
||||
400, "History purge already in progress for %s" % (room_id,)
|
||||
purge_id = await self._task_scheduler.schedule_task(
|
||||
PURGE_HISTORY_ACTION_NAME,
|
||||
resource_id=room_id,
|
||||
params={"token": token, "delete_local_events": delete_local_events},
|
||||
)
|
||||
|
||||
purge_id = random_string(16)
|
||||
|
||||
# we log the purge_id here so that it can be tied back to the
|
||||
# request id in the log lines.
|
||||
logger.info("[purge] starting purge_id %s", purge_id)
|
||||
|
||||
self._purges_by_id[purge_id] = PurgeStatus()
|
||||
run_as_background_process(
|
||||
"purge_history",
|
||||
self._purge_history,
|
||||
purge_id,
|
||||
room_id,
|
||||
token,
|
||||
delete_local_events,
|
||||
)
|
||||
return purge_id
|
||||
|
||||
async def _purge_history(
|
||||
self, purge_id: str, room_id: str, token: str, delete_local_events: bool
|
||||
) -> None:
|
||||
self,
|
||||
task: ScheduledTask,
|
||||
) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
|
||||
"""
|
||||
Scheduler action to purge some history of a room.
|
||||
"""
|
||||
if (
|
||||
task.resource_id is None
|
||||
or task.params is None
|
||||
or "token" not in task.params
|
||||
or "delete_local_events" not in task.params
|
||||
):
|
||||
return (
|
||||
TaskStatus.FAILED,
|
||||
None,
|
||||
"Not enough parameters passed to _purge_history",
|
||||
)
|
||||
err = await self.purge_history(
|
||||
task.resource_id,
|
||||
task.params["token"],
|
||||
task.params["delete_local_events"],
|
||||
)
|
||||
if err is not None:
|
||||
return TaskStatus.FAILED, None, err
|
||||
return TaskStatus.COMPLETE, None, None
|
||||
|
||||
async def purge_history(
|
||||
self,
|
||||
room_id: str,
|
||||
token: str,
|
||||
delete_local_events: bool,
|
||||
) -> Optional[str]:
|
||||
"""Carry out a history purge on a room.
|
||||
|
||||
Args:
|
||||
purge_id: The ID for this purge.
|
||||
room_id: The room to purge from
|
||||
token: topological token to delete events before
|
||||
delete_local_events: True to delete local events as well as remote ones
|
||||
"""
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_PAGINATION_LOCK_NAME, room_id, write=True
|
||||
|
@ -371,57 +319,68 @@ class PaginationHandler:
|
|||
room_id, token, delete_local_events
|
||||
)
|
||||
logger.info("[purge] complete")
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
|
||||
return None
|
||||
except Exception:
|
||||
f = Failure()
|
||||
logger.error(
|
||||
"[purge] failed", exc_info=(f.type, f.value, f.getTracebackObject())
|
||||
)
|
||||
self._purges_by_id[purge_id].status = PurgeStatus.STATUS_FAILED
|
||||
self._purges_by_id[purge_id].error = f.getErrorMessage()
|
||||
finally:
|
||||
self._purges_in_progress_by_room.discard(room_id)
|
||||
return f.getErrorMessage()
|
||||
|
||||
# remove the purge from the list 24 hours after it completes
|
||||
def clear_purge() -> None:
|
||||
del self._purges_by_id[purge_id]
|
||||
|
||||
self.hs.get_reactor().callLater(
|
||||
PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_purge
|
||||
)
|
||||
|
||||
def get_purge_status(self, purge_id: str) -> Optional[PurgeStatus]:
|
||||
"""Get the current status of an active purge
|
||||
|
||||
Args:
|
||||
purge_id: purge_id returned by start_purge_history
|
||||
"""
|
||||
return self._purges_by_id.get(purge_id)
|
||||
|
||||
def get_delete_status(self, delete_id: str) -> Optional[DeleteStatus]:
|
||||
async def get_delete_task(self, delete_id: str) -> Optional[ScheduledTask]:
|
||||
"""Get the current status of an active deleting
|
||||
|
||||
Args:
|
||||
delete_id: delete_id returned by start_shutdown_and_purge_room
|
||||
or start_purge_history.
|
||||
"""
|
||||
return self._delete_by_id.get(delete_id)
|
||||
return await self._task_scheduler.get_task(delete_id)
|
||||
|
||||
def get_delete_ids_by_room(self, room_id: str) -> Optional[StrCollection]:
|
||||
"""Get all active delete ids by room
|
||||
async def get_delete_tasks_by_room(
|
||||
self, room_id: str, only_active: Optional[bool] = False
|
||||
) -> List[ScheduledTask]:
|
||||
"""Get complete, failed or active delete tasks by room
|
||||
|
||||
Args:
|
||||
room_id: room_id that is deleted
|
||||
only_active: if True, completed&failed tasks will be omitted
|
||||
"""
|
||||
return self._delete_by_room.get(room_id)
|
||||
statuses = [TaskStatus.ACTIVE]
|
||||
if not only_active:
|
||||
statuses += [TaskStatus.COMPLETE, TaskStatus.FAILED]
|
||||
|
||||
async def purge_room(self, room_id: str, force: bool = False) -> None:
|
||||
return await self._task_scheduler.get_tasks(
|
||||
actions=[PURGE_ROOM_ACTION_NAME, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME],
|
||||
resource_id=room_id,
|
||||
statuses=statuses,
|
||||
)
|
||||
|
||||
async def _purge_room(
|
||||
self,
|
||||
task: ScheduledTask,
|
||||
) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
|
||||
"""
|
||||
Scheduler action to purge a room.
|
||||
"""
|
||||
if not task.resource_id:
|
||||
raise Exception("No room id passed to purge_room task")
|
||||
params = task.params if task.params else {}
|
||||
await self.purge_room(task.resource_id, params.get("force", False))
|
||||
return TaskStatus.COMPLETE, None, None
|
||||
|
||||
async def purge_room(
|
||||
self,
|
||||
room_id: str,
|
||||
force: bool,
|
||||
) -> None:
|
||||
"""Purge the given room from the database.
|
||||
This function is part the delete room v1 API.
|
||||
|
||||
Args:
|
||||
room_id: room to be purged
|
||||
force: set true to skip checking for joined users.
|
||||
"""
|
||||
logger.info("starting purge room_id=%s force=%s", room_id, force)
|
||||
|
||||
async with self._worker_locks.acquire_multi_read_write_lock(
|
||||
[
|
||||
(PURGE_PAGINATION_LOCK_NAME, room_id),
|
||||
|
@ -430,13 +389,20 @@ class PaginationHandler:
|
|||
write=True,
|
||||
):
|
||||
# first check that we have no users in this room
|
||||
if not force:
|
||||
joined = await self.store.is_host_joined(room_id, self._server_name)
|
||||
if joined:
|
||||
if force:
|
||||
logger.info(
|
||||
"force-purging room %s with some local users still joined",
|
||||
room_id,
|
||||
)
|
||||
else:
|
||||
raise SynapseError(400, "Users are still joined to this room")
|
||||
|
||||
await self._storage_controllers.purge_events.purge_room(room_id)
|
||||
|
||||
logger.info("purge complete for room_id %s", room_id)
|
||||
|
||||
@trace
|
||||
async def get_messages(
|
||||
self,
|
||||
|
@ -711,177 +677,72 @@ class PaginationHandler:
|
|||
|
||||
async def _shutdown_and_purge_room(
|
||||
self,
|
||||
delete_id: str,
|
||||
room_id: str,
|
||||
requester_user_id: Optional[str],
|
||||
new_room_user_id: Optional[str] = None,
|
||||
new_room_name: Optional[str] = None,
|
||||
message: Optional[str] = None,
|
||||
block: bool = False,
|
||||
purge: bool = True,
|
||||
force_purge: bool = False,
|
||||
) -> None:
|
||||
task: ScheduledTask,
|
||||
) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]:
|
||||
"""
|
||||
Shuts down and purges a room.
|
||||
|
||||
See `RoomShutdownHandler.shutdown_room` for details of creation of the new room
|
||||
|
||||
Args:
|
||||
delete_id: The ID for this delete.
|
||||
room_id: The ID of the room to shut down.
|
||||
requester_user_id:
|
||||
User who requested the action. Will be recorded as putting the room on the
|
||||
blocking list.
|
||||
If None, the action was not manually requested but instead
|
||||
triggered automatically, e.g. through a Synapse module
|
||||
or some other policy.
|
||||
MUST NOT be None if block=True.
|
||||
new_room_user_id:
|
||||
If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be
|
||||
moved into that room. If not set, no new room will be created
|
||||
and the users will just be removed from the old room.
|
||||
new_room_name:
|
||||
A string representing the name of the room that new users will
|
||||
be invited to. Defaults to `Content Violation Notification`
|
||||
message:
|
||||
A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly
|
||||
convey why the original room was shut down.
|
||||
Defaults to `Sharing illegal content on this server is not
|
||||
permitted and rooms in violation will be blocked.`
|
||||
block:
|
||||
If set to `true`, this room will be added to a blocking list,
|
||||
preventing future attempts to join the room. Defaults to `false`.
|
||||
purge:
|
||||
If set to `true`, purge the given room from the database.
|
||||
force_purge:
|
||||
If set to `true`, the room will be purged from database
|
||||
also if it fails to remove some users from room.
|
||||
|
||||
Saves a `RoomShutdownHandler.ShutdownRoomResponse` in `DeleteStatus`:
|
||||
Scheduler action to shutdown and purge a room.
|
||||
"""
|
||||
|
||||
self._purges_in_progress_by_room.add(room_id)
|
||||
try:
|
||||
async with self._worker_locks.acquire_read_write_lock(
|
||||
PURGE_PAGINATION_LOCK_NAME, room_id, write=True
|
||||
):
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_SHUTTING_DOWN
|
||||
self._delete_by_id[
|
||||
delete_id
|
||||
].shutdown_room = await self._room_shutdown_handler.shutdown_room(
|
||||
room_id=room_id,
|
||||
requester_user_id=requester_user_id,
|
||||
new_room_user_id=new_room_user_id,
|
||||
new_room_name=new_room_name,
|
||||
message=message,
|
||||
block=block,
|
||||
)
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_PURGING
|
||||
|
||||
if purge:
|
||||
logger.info("starting purge room_id %s", room_id)
|
||||
|
||||
# first check that we have no users in this room
|
||||
if not force_purge:
|
||||
joined = await self.store.is_host_joined(
|
||||
room_id, self._server_name
|
||||
)
|
||||
if joined:
|
||||
raise SynapseError(
|
||||
400, "Users are still joined to this room"
|
||||
if task.resource_id is None or task.params is None:
|
||||
raise Exception(
|
||||
"No room id and/or no parameters passed to shutdown_and_purge_room task"
|
||||
)
|
||||
|
||||
await self._storage_controllers.purge_events.purge_room(room_id)
|
||||
room_id = task.resource_id
|
||||
|
||||
logger.info("purge complete for room_id %s", room_id)
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE
|
||||
except Exception:
|
||||
f = Failure()
|
||||
logger.error(
|
||||
"failed",
|
||||
exc_info=(f.type, f.value, f.getTracebackObject()),
|
||||
)
|
||||
self._delete_by_id[delete_id].status = DeleteStatus.STATUS_FAILED
|
||||
self._delete_by_id[delete_id].error = f.getErrorMessage()
|
||||
finally:
|
||||
self._purges_in_progress_by_room.discard(room_id)
|
||||
async def update_result(result: Optional[JsonMapping]) -> None:
|
||||
await self._task_scheduler.update_task(task.id, result=result)
|
||||
|
||||
# remove the delete from the list 24 hours after it completes
|
||||
def clear_delete() -> None:
|
||||
del self._delete_by_id[delete_id]
|
||||
self._delete_by_room[room_id].remove(delete_id)
|
||||
if not self._delete_by_room[room_id]:
|
||||
del self._delete_by_room[room_id]
|
||||
|
||||
self.hs.get_reactor().callLater(
|
||||
PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000, clear_delete
|
||||
shutdown_result = (
|
||||
cast(ShutdownRoomResponse, task.result) if task.result else None
|
||||
)
|
||||
|
||||
def start_shutdown_and_purge_room(
|
||||
shutdown_result = await self._room_shutdown_handler.shutdown_room(
|
||||
room_id,
|
||||
cast(ShutdownRoomParams, task.params),
|
||||
shutdown_result,
|
||||
update_result,
|
||||
)
|
||||
|
||||
if task.params.get("purge", False):
|
||||
await self.purge_room(
|
||||
room_id,
|
||||
task.params.get("force_purge", False),
|
||||
)
|
||||
|
||||
return (TaskStatus.COMPLETE, shutdown_result, None)
|
||||
|
||||
async def start_shutdown_and_purge_room(
|
||||
self,
|
||||
room_id: str,
|
||||
requester_user_id: Optional[str],
|
||||
new_room_user_id: Optional[str] = None,
|
||||
new_room_name: Optional[str] = None,
|
||||
message: Optional[str] = None,
|
||||
block: bool = False,
|
||||
purge: bool = True,
|
||||
force_purge: bool = False,
|
||||
shutdown_params: ShutdownRoomParams,
|
||||
) -> str:
|
||||
"""Start off shut down and purge on a room.
|
||||
|
||||
Args:
|
||||
room_id: The ID of the room to shut down.
|
||||
requester_user_id:
|
||||
User who requested the action and put the room on the
|
||||
blocking list.
|
||||
If None, the action was not manually requested but instead
|
||||
triggered automatically, e.g. through a Synapse module
|
||||
or some other policy.
|
||||
MUST NOT be None if block=True.
|
||||
new_room_user_id:
|
||||
If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be
|
||||
moved into that room. If not set, no new room will be created
|
||||
and the users will just be removed from the old room.
|
||||
new_room_name:
|
||||
A string representing the name of the room that new users will
|
||||
be invited to. Defaults to `Content Violation Notification`
|
||||
message:
|
||||
A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly
|
||||
convey why the original room was shut down.
|
||||
Defaults to `Sharing illegal content on this server is not
|
||||
permitted and rooms in violation will be blocked.`
|
||||
block:
|
||||
If set to `true`, this room will be added to a blocking list,
|
||||
preventing future attempts to join the room. Defaults to `false`.
|
||||
purge:
|
||||
If set to `true`, purge the given room from the database.
|
||||
force_purge:
|
||||
If set to `true`, the room will be purged from database
|
||||
also if it fails to remove some users from room.
|
||||
shutdown_params: parameters for the shutdown
|
||||
|
||||
Returns:
|
||||
unique ID for this delete transaction.
|
||||
"""
|
||||
if room_id in self._purges_in_progress_by_room:
|
||||
raise SynapseError(
|
||||
400, "History purge already in progress for %s" % (room_id,)
|
||||
)
|
||||
if len(await self.get_delete_tasks_by_room(room_id, only_active=True)) > 0:
|
||||
raise SynapseError(400, "Purge already in progress for %s" % (room_id,))
|
||||
|
||||
# This check is double to `RoomShutdownHandler.shutdown_room`
|
||||
# But here the requester get a direct response / error with HTTP request
|
||||
# and do not have to check the purge status
|
||||
new_room_user_id = shutdown_params["new_room_user_id"]
|
||||
if new_room_user_id is not None:
|
||||
if not self.hs.is_mine_id(new_room_user_id):
|
||||
raise SynapseError(
|
||||
400, "User must be our own: %s" % (new_room_user_id,)
|
||||
)
|
||||
|
||||
delete_id = random_string(16)
|
||||
delete_id = await self._task_scheduler.schedule_task(
|
||||
SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME,
|
||||
resource_id=room_id,
|
||||
params=shutdown_params,
|
||||
)
|
||||
|
||||
# we log the delete_id here so that it can be tied back to the
|
||||
# request id in the log lines.
|
||||
|
@ -891,19 +752,4 @@ class PaginationHandler:
|
|||
delete_id,
|
||||
)
|
||||
|
||||
self._delete_by_id[delete_id] = DeleteStatus()
|
||||
self._delete_by_room.setdefault(room_id, []).append(delete_id)
|
||||
run_as_background_process(
|
||||
"shutdown_and_purge_room",
|
||||
self._shutdown_and_purge_room,
|
||||
delete_id,
|
||||
room_id,
|
||||
requester_user_id,
|
||||
new_room_user_id,
|
||||
new_room_name,
|
||||
message,
|
||||
block,
|
||||
purge,
|
||||
force_purge,
|
||||
)
|
||||
return delete_id
|
||||
|
|
|
@ -37,6 +37,8 @@ class ReceiptsHandler:
|
|||
self.server_name = hs.config.server.server_name
|
||||
self.store = hs.get_datastores().main
|
||||
self.event_auth_handler = hs.get_event_auth_handler()
|
||||
self.event_handler = hs.get_event_handler()
|
||||
self._storage_controllers = hs.get_storage_controllers()
|
||||
|
||||
self.hs = hs
|
||||
|
||||
|
@ -81,6 +83,20 @@ class ReceiptsHandler:
|
|||
)
|
||||
continue
|
||||
|
||||
# Let's check that the origin server is in the room before accepting the receipt.
|
||||
# We don't want to block waiting on a partial state so take an
|
||||
# approximation if needed.
|
||||
domains = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation(
|
||||
room_id
|
||||
)
|
||||
if origin not in domains:
|
||||
logger.info(
|
||||
"Ignoring receipt for room %r from server %s as they're not in the room",
|
||||
room_id,
|
||||
origin,
|
||||
)
|
||||
continue
|
||||
|
||||
for receipt_type, users in room_values.items():
|
||||
for user_id, user_values in users.items():
|
||||
if get_domain_from_id(user_id) != origin:
|
||||
|
@ -158,17 +174,23 @@ class ReceiptsHandler:
|
|||
self,
|
||||
room_id: str,
|
||||
receipt_type: str,
|
||||
user_id: str,
|
||||
user_id: UserID,
|
||||
event_id: str,
|
||||
thread_id: Optional[str],
|
||||
) -> None:
|
||||
"""Called when a client tells us a local user has read up to the given
|
||||
event_id in the room.
|
||||
"""
|
||||
|
||||
# Ensure the room/event exists, this will raise an error if the user
|
||||
# cannot view the event.
|
||||
if not await self.event_handler.get_event(user_id, room_id, event_id):
|
||||
return
|
||||
|
||||
receipt = ReadReceipt(
|
||||
room_id=room_id,
|
||||
receipt_type=receipt_type,
|
||||
user_id=user_id,
|
||||
user_id=user_id.to_string(),
|
||||
event_ids=[event_id],
|
||||
thread_id=thread_id,
|
||||
data={"ts": int(self.clock.time_msec())},
|
||||
|
|
|
@ -20,7 +20,7 @@ import random
|
|||
import string
|
||||
from collections import OrderedDict
|
||||
from http import HTTPStatus
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
import attr
|
||||
from typing_extensions import TypedDict
|
||||
|
@ -54,11 +54,11 @@ from synapse.events import EventBase
|
|||
from synapse.events.snapshot import UnpersistedEventContext
|
||||
from synapse.events.utils import copy_and_fixup_power_levels_contents
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
from synapse.module_api import NOT_SPAM
|
||||
from synapse.rest.admin._base import assert_user_is_admin
|
||||
from synapse.streams import EventSource
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
MutableStateMap,
|
||||
Requester,
|
||||
RoomAlias,
|
||||
|
@ -454,7 +454,7 @@ class RoomCreationHandler:
|
|||
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
|
||||
user_id
|
||||
)
|
||||
if spam_check != NOT_SPAM:
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"You are not permitted to create rooms",
|
||||
|
@ -768,7 +768,7 @@ class RoomCreationHandler:
|
|||
spam_check = await self._spam_checker_module_callbacks.user_may_create_room(
|
||||
user_id
|
||||
)
|
||||
if spam_check != NOT_SPAM:
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"You are not permitted to create rooms",
|
||||
|
@ -1750,6 +1750,45 @@ class RoomEventSource(EventSource[RoomStreamToken, EventBase]):
|
|||
return self.store.get_current_room_stream_token_for_room_id(room_id)
|
||||
|
||||
|
||||
class ShutdownRoomParams(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
requester_user_id:
|
||||
User who requested the action. Will be recorded as putting the room on the
|
||||
blocking list.
|
||||
new_room_user_id:
|
||||
If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be
|
||||
moved into that room. If not set, no new room will be created
|
||||
and the users will just be removed from the old room.
|
||||
new_room_name:
|
||||
A string representing the name of the room that new users will
|
||||
be invited to. Defaults to `Content Violation Notification`
|
||||
message:
|
||||
A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly
|
||||
convey why the original room was shut down.
|
||||
Defaults to `Sharing illegal content on this server is not
|
||||
permitted and rooms in violation will be blocked.`
|
||||
block:
|
||||
If set to `true`, this room will be added to a blocking list,
|
||||
preventing future attempts to join the room. Defaults to `false`.
|
||||
purge:
|
||||
If set to `true`, purge the given room from the database.
|
||||
force_purge:
|
||||
If set to `true`, the room will be purged from database
|
||||
even if there are still users joined to the room.
|
||||
"""
|
||||
|
||||
requester_user_id: Optional[str]
|
||||
new_room_user_id: Optional[str]
|
||||
new_room_name: Optional[str]
|
||||
message: Optional[str]
|
||||
block: bool
|
||||
purge: bool
|
||||
force_purge: bool
|
||||
|
||||
|
||||
class ShutdownRoomResponse(TypedDict):
|
||||
"""
|
||||
Attributes:
|
||||
|
@ -1787,12 +1826,12 @@ class RoomShutdownHandler:
|
|||
async def shutdown_room(
|
||||
self,
|
||||
room_id: str,
|
||||
requester_user_id: Optional[str],
|
||||
new_room_user_id: Optional[str] = None,
|
||||
new_room_name: Optional[str] = None,
|
||||
message: Optional[str] = None,
|
||||
block: bool = False,
|
||||
) -> ShutdownRoomResponse:
|
||||
params: ShutdownRoomParams,
|
||||
result: Optional[ShutdownRoomResponse] = None,
|
||||
update_result_fct: Optional[
|
||||
Callable[[Optional[JsonMapping]], Awaitable[None]]
|
||||
] = None,
|
||||
) -> Optional[ShutdownRoomResponse]:
|
||||
"""
|
||||
Shuts down a room. Moves all local users and room aliases automatically
|
||||
to a new room if `new_room_user_id` is set. Otherwise local users only
|
||||
|
@ -1808,52 +1847,23 @@ class RoomShutdownHandler:
|
|||
|
||||
Args:
|
||||
room_id: The ID of the room to shut down.
|
||||
requester_user_id:
|
||||
User who requested the action and put the room on the
|
||||
blocking list.
|
||||
If None, the action was not manually requested but instead
|
||||
triggered automatically, e.g. through a Synapse module
|
||||
or some other policy.
|
||||
MUST NOT be None if block=True.
|
||||
new_room_user_id:
|
||||
If set, a new room will be created with this user ID
|
||||
as the creator and admin, and all users in the old room will be
|
||||
moved into that room. If not set, no new room will be created
|
||||
and the users will just be removed from the old room.
|
||||
new_room_name:
|
||||
A string representing the name of the room that new users will
|
||||
be invited to. Defaults to `Content Violation Notification`
|
||||
message:
|
||||
A string containing the first message that will be sent as
|
||||
`new_room_user_id` in the new room. Ideally this will clearly
|
||||
convey why the original room was shut down.
|
||||
Defaults to `Sharing illegal content on this server is not
|
||||
permitted and rooms in violation will be blocked.`
|
||||
block:
|
||||
If set to `True`, users will be prevented from joining the old
|
||||
room. This option can also be used to pre-emptively block a room,
|
||||
even if it's unknown to this homeserver. In this case, the room
|
||||
will be blocked, and no further action will be taken. If `False`,
|
||||
attempting to delete an unknown room is invalid.
|
||||
delete_id: The delete ID identifying this delete request
|
||||
params: parameters for the shutdown, cf `ShutdownRoomParams`
|
||||
result: current status of the shutdown, if it was interrupted
|
||||
update_result_fct: function called when `result` is updated locally
|
||||
|
||||
Defaults to `False`.
|
||||
|
||||
Returns: a dict containing the following keys:
|
||||
kicked_users: An array of users (`user_id`) that were kicked.
|
||||
failed_to_kick_users:
|
||||
An array of users (`user_id`) that that were not kicked.
|
||||
local_aliases:
|
||||
An array of strings representing the local aliases that were
|
||||
migrated from the old room to the new.
|
||||
new_room_id:
|
||||
A string representing the room ID of the new room, or None if
|
||||
no such room was created.
|
||||
Returns: a dict matching `ShutdownRoomResponse`.
|
||||
"""
|
||||
requester_user_id = params["requester_user_id"]
|
||||
new_room_user_id = params["new_room_user_id"]
|
||||
block = params["block"]
|
||||
|
||||
if not new_room_name:
|
||||
new_room_name = self.DEFAULT_ROOM_NAME
|
||||
if not message:
|
||||
message = self.DEFAULT_MESSAGE
|
||||
new_room_name = (
|
||||
params["new_room_name"]
|
||||
if params["new_room_name"]
|
||||
else self.DEFAULT_ROOM_NAME
|
||||
)
|
||||
message = params["message"] if params["message"] else self.DEFAULT_MESSAGE
|
||||
|
||||
if not RoomID.is_valid(room_id):
|
||||
raise SynapseError(400, "%s is not a legal room ID" % (room_id,))
|
||||
|
@ -1865,6 +1875,17 @@ class RoomShutdownHandler:
|
|||
403, "Shutdown of this room is forbidden", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
result = (
|
||||
result
|
||||
if result
|
||||
else {
|
||||
"kicked_users": [],
|
||||
"failed_to_kick_users": [],
|
||||
"local_aliases": [],
|
||||
"new_room_id": None,
|
||||
}
|
||||
)
|
||||
|
||||
# Action the block first (even if the room doesn't exist yet)
|
||||
if block:
|
||||
if requester_user_id is None:
|
||||
|
@ -1877,14 +1898,10 @@ class RoomShutdownHandler:
|
|||
|
||||
if not await self.store.get_room(room_id):
|
||||
# if we don't know about the room, there is nothing left to do.
|
||||
return {
|
||||
"kicked_users": [],
|
||||
"failed_to_kick_users": [],
|
||||
"local_aliases": [],
|
||||
"new_room_id": None,
|
||||
}
|
||||
return result
|
||||
|
||||
if new_room_user_id is not None:
|
||||
new_room_id = result.get("new_room_id")
|
||||
if new_room_user_id is not None and new_room_id is None:
|
||||
if not self.hs.is_mine_id(new_room_user_id):
|
||||
raise SynapseError(
|
||||
400, "User must be our own: %s" % (new_room_user_id,)
|
||||
|
@ -1904,6 +1921,10 @@ class RoomShutdownHandler:
|
|||
ratelimit=False,
|
||||
)
|
||||
|
||||
result["new_room_id"] = new_room_id
|
||||
if update_result_fct:
|
||||
await update_result_fct(result)
|
||||
|
||||
logger.info(
|
||||
"Shutting down room %r, joining to new room: %r", room_id, new_room_id
|
||||
)
|
||||
|
@ -1917,12 +1938,9 @@ class RoomShutdownHandler:
|
|||
stream_id,
|
||||
)
|
||||
else:
|
||||
new_room_id = None
|
||||
logger.info("Shutting down room %r", room_id)
|
||||
|
||||
users = await self.store.get_users_in_room(room_id)
|
||||
kicked_users = []
|
||||
failed_to_kick_users = []
|
||||
for user_id in users:
|
||||
if not self.hs.is_mine_id(user_id):
|
||||
continue
|
||||
|
@ -1951,7 +1969,9 @@ class RoomShutdownHandler:
|
|||
stream_id,
|
||||
)
|
||||
|
||||
await self.room_member_handler.forget(target_requester.user, room_id)
|
||||
await self.room_member_handler.forget(
|
||||
target_requester.user, room_id, do_not_schedule_purge=True
|
||||
)
|
||||
|
||||
# Join users to new room
|
||||
if new_room_user_id:
|
||||
|
@ -1966,15 +1986,23 @@ class RoomShutdownHandler:
|
|||
require_consent=False,
|
||||
)
|
||||
|
||||
kicked_users.append(user_id)
|
||||
result["kicked_users"].append(user_id)
|
||||
if update_result_fct:
|
||||
await update_result_fct(result)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to leave old room and join new room for %r", user_id
|
||||
)
|
||||
failed_to_kick_users.append(user_id)
|
||||
result["failed_to_kick_users"].append(user_id)
|
||||
if update_result_fct:
|
||||
await update_result_fct(result)
|
||||
|
||||
# Send message in new room and move aliases
|
||||
if new_room_user_id:
|
||||
room_creator_requester = create_requester(
|
||||
new_room_user_id, authenticated_entity=requester_user_id
|
||||
)
|
||||
|
||||
await self.event_creation_handler.create_and_send_nonmember_event(
|
||||
room_creator_requester,
|
||||
{
|
||||
|
@ -1986,18 +2014,15 @@ class RoomShutdownHandler:
|
|||
ratelimit=False,
|
||||
)
|
||||
|
||||
aliases_for_room = await self.store.get_aliases_for_room(room_id)
|
||||
result["local_aliases"] = list(
|
||||
await self.store.get_aliases_for_room(room_id)
|
||||
)
|
||||
|
||||
assert new_room_id is not None
|
||||
await self.store.update_aliases_for_room(
|
||||
room_id, new_room_id, requester_user_id
|
||||
)
|
||||
else:
|
||||
aliases_for_room = []
|
||||
result["local_aliases"] = []
|
||||
|
||||
return {
|
||||
"kicked_users": kicked_users,
|
||||
"failed_to_kick_users": failed_to_kick_users,
|
||||
"local_aliases": list(aliases_for_room),
|
||||
"new_room_id": new_room_id,
|
||||
}
|
||||
return result
|
||||
|
|
|
@ -37,13 +37,13 @@ from synapse.api.ratelimiting import Ratelimiter
|
|||
from synapse.event_auth import get_named_level, get_power_level_event
|
||||
from synapse.events import EventBase
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.handlers.pagination import PURGE_ROOM_ACTION_NAME
|
||||
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
|
||||
from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler
|
||||
from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME
|
||||
from synapse.logging import opentracing
|
||||
from synapse.metrics import event_processing_positions
|
||||
from synapse.metrics.background_process_metrics import run_as_background_process
|
||||
from synapse.module_api import NOT_SPAM
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
Requester,
|
||||
|
@ -169,6 +169,10 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
self.request_ratelimiter = hs.get_request_ratelimiter()
|
||||
hs.get_notifier().add_new_join_in_room_callback(self._on_user_joined_room)
|
||||
|
||||
self._forgotten_room_retention_period = (
|
||||
hs.config.server.forgotten_room_retention_period
|
||||
)
|
||||
|
||||
def _on_user_joined_room(self, event_id: str, room_id: str) -> None:
|
||||
"""Notify the rate limiter that a room join has occurred.
|
||||
|
||||
|
@ -278,7 +282,9 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
async def forget(self, user: UserID, room_id: str) -> None:
|
||||
async def forget(
|
||||
self, user: UserID, room_id: str, do_not_schedule_purge: bool = False
|
||||
) -> None:
|
||||
user_id = user.to_string()
|
||||
|
||||
member = await self._storage_controllers.state.get_current_state_event(
|
||||
|
@ -298,6 +304,20 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
# the table `current_state_events` and `get_current_state_events` is `None`.
|
||||
await self.store.forget(user_id, room_id)
|
||||
|
||||
# If everyone locally has left the room, then there is no reason for us to keep the
|
||||
# room around and we automatically purge room after a little bit
|
||||
if (
|
||||
not do_not_schedule_purge
|
||||
and self._forgotten_room_retention_period
|
||||
and await self.store.is_locally_forgotten_room(room_id)
|
||||
):
|
||||
await self.hs.get_task_scheduler().schedule_task(
|
||||
PURGE_ROOM_ACTION_NAME,
|
||||
resource_id=room_id,
|
||||
timestamp=self.clock.time_msec()
|
||||
+ self._forgotten_room_retention_period,
|
||||
)
|
||||
|
||||
async def ratelimit_multiple_invites(
|
||||
self,
|
||||
requester: Optional[Requester],
|
||||
|
@ -818,7 +838,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
spam_check = await self._spam_checker_module_callbacks.user_may_invite(
|
||||
requester.user.to_string(), target_id, room_id
|
||||
)
|
||||
if spam_check != NOT_SPAM:
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
logger.info("Blocking invite due to spam checker")
|
||||
block_invite_result = spam_check
|
||||
|
||||
|
@ -953,7 +973,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
target.to_string(), room_id, is_invited=inviter is not None
|
||||
)
|
||||
)
|
||||
if spam_check != NOT_SPAM:
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Not allowed to join this room",
|
||||
|
@ -1571,7 +1591,7 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
|
|||
room_id=room_id,
|
||||
)
|
||||
)
|
||||
if spam_check != NOT_SPAM:
|
||||
if spam_check != self._spam_checker_module_callbacks.NOT_SPAM:
|
||||
raise SynapseError(
|
||||
403,
|
||||
"Cannot send threepid invite",
|
||||
|
|
|
@ -174,8 +174,8 @@ class SendEmailHandler:
|
|||
if raw_to == "":
|
||||
raise RuntimeError("Invalid 'to' address")
|
||||
|
||||
html_part = MIMEText(html, "html", "utf8")
|
||||
text_part = MIMEText(text, "plain", "utf8")
|
||||
html_part = MIMEText(html, "html", "utf-8")
|
||||
text_part = MIMEText(text, "plain", "utf-8")
|
||||
|
||||
multipart_msg = MIMEMultipart("alternative")
|
||||
multipart_msg["Subject"] = subject
|
||||
|
|
|
@ -57,6 +57,7 @@ from synapse.storage.roommember import MemberSummary
|
|||
from synapse.types import (
|
||||
DeviceListUpdates,
|
||||
JsonDict,
|
||||
JsonMapping,
|
||||
MutableStateMap,
|
||||
Requester,
|
||||
RoomStreamToken,
|
||||
|
@ -1793,19 +1794,23 @@ class SyncHandler:
|
|||
)
|
||||
|
||||
if push_rules_changed:
|
||||
global_account_data = dict(global_account_data)
|
||||
global_account_data[
|
||||
AccountDataTypes.PUSH_RULES
|
||||
] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
|
||||
global_account_data = {
|
||||
AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user(
|
||||
sync_config.user
|
||||
),
|
||||
**global_account_data,
|
||||
}
|
||||
else:
|
||||
all_global_account_data = await self.store.get_global_account_data_for_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
global_account_data = dict(all_global_account_data)
|
||||
global_account_data[
|
||||
AccountDataTypes.PUSH_RULES
|
||||
] = await self._push_rules_handler.push_rules_for_user(sync_config.user)
|
||||
global_account_data = {
|
||||
AccountDataTypes.PUSH_RULES: await self._push_rules_handler.push_rules_for_user(
|
||||
sync_config.user
|
||||
),
|
||||
**all_global_account_data,
|
||||
}
|
||||
|
||||
account_data_for_user = (
|
||||
await sync_config.filter_collection.filter_global_account_data(
|
||||
|
@ -1909,7 +1914,7 @@ class SyncHandler:
|
|||
blocks_all_rooms
|
||||
or sync_result_builder.sync_config.filter_collection.blocks_all_room_account_data()
|
||||
):
|
||||
account_data_by_room: Mapping[str, Mapping[str, JsonDict]] = {}
|
||||
account_data_by_room: Mapping[str, Mapping[str, JsonMapping]] = {}
|
||||
elif since_token and not sync_result_builder.full_state:
|
||||
account_data_by_room = (
|
||||
await self.store.get_updated_room_account_data_for_user(
|
||||
|
@ -2349,8 +2354,8 @@ class SyncHandler:
|
|||
sync_result_builder: "SyncResultBuilder",
|
||||
room_builder: "RoomSyncResultBuilder",
|
||||
ephemeral: List[JsonDict],
|
||||
tags: Optional[Mapping[str, Mapping[str, Any]]],
|
||||
account_data: Mapping[str, JsonDict],
|
||||
tags: Optional[Mapping[str, JsonMapping]],
|
||||
account_data: Mapping[str, JsonMapping],
|
||||
always_include: bool = False,
|
||||
) -> None:
|
||||
"""Populates the `joined` and `archived` section of `sync_result_builder`
|
||||
|
|
|
@ -48,6 +48,9 @@ from synapse.metrics._types import Collector
|
|||
if TYPE_CHECKING:
|
||||
import resource
|
||||
|
||||
# Old versions don't have `LiteralString`
|
||||
from typing_extensions import LiteralString
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -191,7 +194,7 @@ R = TypeVar("R")
|
|||
|
||||
|
||||
def run_as_background_process(
|
||||
desc: str,
|
||||
desc: "LiteralString",
|
||||
func: Callable[..., Awaitable[Optional[R]]],
|
||||
*args: Any,
|
||||
bg_start_span: bool = True,
|
||||
|
@ -259,7 +262,7 @@ P = ParamSpec("P")
|
|||
|
||||
|
||||
def wrap_as_background_process(
|
||||
desc: str,
|
||||
desc: "LiteralString",
|
||||
) -> Callable[
|
||||
[Callable[P, Awaitable[Optional[R]]]],
|
||||
Callable[P, "defer.Deferred[Optional[R]]"],
|
||||
|
|
|
@ -1741,7 +1741,18 @@ class ModuleApi:
|
|||
"""
|
||||
# Future extensions to this method might want to e.g. allow use of `force_purge`.
|
||||
# TODO In the future we should make sure this is persistent.
|
||||
self._hs.get_pagination_handler().start_shutdown_and_purge_room(room_id, None)
|
||||
await self._hs.get_pagination_handler().start_shutdown_and_purge_room(
|
||||
room_id,
|
||||
{
|
||||
"new_room_user_id": None,
|
||||
"new_room_name": None,
|
||||
"message": None,
|
||||
"requester_user_id": None,
|
||||
"block": False,
|
||||
"purge": True,
|
||||
"force_purge": False,
|
||||
},
|
||||
)
|
||||
|
||||
async def set_displayname(
|
||||
self,
|
||||
|
|
|
@ -21,6 +21,7 @@ from http import HTTPStatus
|
|||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from synapse.api.errors import Codes, NotFoundError, SynapseError
|
||||
from synapse.handlers.pagination import PURGE_HISTORY_ACTION_NAME
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
|
@ -93,7 +94,7 @@ from synapse.rest.admin.users import (
|
|||
UserTokenRestServlet,
|
||||
WhoisRestServlet,
|
||||
)
|
||||
from synapse.types import JsonDict, RoomStreamToken
|
||||
from synapse.types import JsonDict, RoomStreamToken, TaskStatus
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -196,7 +197,7 @@ class PurgeHistoryRestServlet(RestServlet):
|
|||
errcode=Codes.BAD_JSON,
|
||||
)
|
||||
|
||||
purge_id = self.pagination_handler.start_purge_history(
|
||||
purge_id = await self.pagination_handler.start_purge_history(
|
||||
room_id, token, delete_local_events=delete_local_events
|
||||
)
|
||||
|
||||
|
@ -215,11 +216,20 @@ class PurgeHistoryStatusRestServlet(RestServlet):
|
|||
) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
purge_status = self.pagination_handler.get_purge_status(purge_id)
|
||||
if purge_status is None:
|
||||
purge_task = await self.pagination_handler.get_delete_task(purge_id)
|
||||
if purge_task is None or purge_task.action != PURGE_HISTORY_ACTION_NAME:
|
||||
raise NotFoundError("purge id '%s' not found" % purge_id)
|
||||
|
||||
return HTTPStatus.OK, purge_status.asdict()
|
||||
result: JsonDict = {
|
||||
"status": purge_task.status
|
||||
if purge_task.status == TaskStatus.COMPLETE
|
||||
or purge_task.status == TaskStatus.FAILED
|
||||
else "active",
|
||||
}
|
||||
if purge_task.error:
|
||||
result["error"] = purge_task.error
|
||||
|
||||
return HTTPStatus.OK, result
|
||||
|
||||
|
||||
########################################################################################
|
||||
|
|
|
@ -19,6 +19,10 @@ from urllib import parse as urlparse
|
|||
from synapse.api.constants import Direction, EventTypes, JoinRules, Membership
|
||||
from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.handlers.pagination import (
|
||||
PURGE_ROOM_ACTION_NAME,
|
||||
SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME,
|
||||
)
|
||||
from synapse.http.servlet import (
|
||||
ResolveRoomIdMixin,
|
||||
RestServlet,
|
||||
|
@ -36,7 +40,7 @@ from synapse.rest.admin._base import (
|
|||
)
|
||||
from synapse.storage.databases.main.room import RoomSortOrder
|
||||
from synapse.streams.config import PaginationConfig
|
||||
from synapse.types import JsonDict, RoomID, UserID, create_requester
|
||||
from synapse.types import JsonDict, RoomID, ScheduledTask, UserID, create_requester
|
||||
from synapse.types.state import StateFilter
|
||||
from synapse.util import json_decoder
|
||||
|
||||
|
@ -117,20 +121,30 @@ class RoomRestV2Servlet(RestServlet):
|
|||
403, "Shutdown of this room is forbidden", Codes.FORBIDDEN
|
||||
)
|
||||
|
||||
delete_id = self._pagination_handler.start_shutdown_and_purge_room(
|
||||
delete_id = await self._pagination_handler.start_shutdown_and_purge_room(
|
||||
room_id=room_id,
|
||||
new_room_user_id=content.get("new_room_user_id"),
|
||||
new_room_name=content.get("room_name"),
|
||||
message=content.get("message"),
|
||||
requester_user_id=requester.user.to_string(),
|
||||
block=block,
|
||||
purge=purge,
|
||||
force_purge=force_purge,
|
||||
shutdown_params={
|
||||
"new_room_user_id": content.get("new_room_user_id"),
|
||||
"new_room_name": content.get("room_name"),
|
||||
"message": content.get("message"),
|
||||
"requester_user_id": requester.user.to_string(),
|
||||
"block": block,
|
||||
"purge": purge,
|
||||
"force_purge": force_purge,
|
||||
},
|
||||
)
|
||||
|
||||
return HTTPStatus.OK, {"delete_id": delete_id}
|
||||
|
||||
|
||||
def _convert_delete_task_to_response(task: ScheduledTask) -> JsonDict:
|
||||
return {
|
||||
"delete_id": task.id,
|
||||
"status": task.status,
|
||||
"shutdown_room": task.result,
|
||||
}
|
||||
|
||||
|
||||
class DeleteRoomStatusByRoomIdRestServlet(RestServlet):
|
||||
"""Get the status of the delete room background task."""
|
||||
|
||||
|
@ -150,21 +164,16 @@ class DeleteRoomStatusByRoomIdRestServlet(RestServlet):
|
|||
HTTPStatus.BAD_REQUEST, "%s is not a legal room ID" % (room_id,)
|
||||
)
|
||||
|
||||
delete_ids = self._pagination_handler.get_delete_ids_by_room(room_id)
|
||||
if delete_ids is None:
|
||||
raise NotFoundError("No delete task for room_id '%s' found" % room_id)
|
||||
delete_tasks = await self._pagination_handler.get_delete_tasks_by_room(room_id)
|
||||
|
||||
response = []
|
||||
for delete_id in delete_ids:
|
||||
delete = self._pagination_handler.get_delete_status(delete_id)
|
||||
if delete:
|
||||
response += [
|
||||
{
|
||||
"delete_id": delete_id,
|
||||
**delete.asdict(),
|
||||
if delete_tasks:
|
||||
return HTTPStatus.OK, {
|
||||
"results": [
|
||||
_convert_delete_task_to_response(task) for task in delete_tasks
|
||||
],
|
||||
}
|
||||
]
|
||||
return HTTPStatus.OK, {"results": cast(JsonDict, response)}
|
||||
else:
|
||||
raise NotFoundError("No delete task for room_id '%s' found" % room_id)
|
||||
|
||||
|
||||
class DeleteRoomStatusByDeleteIdRestServlet(RestServlet):
|
||||
|
@ -181,11 +190,14 @@ class DeleteRoomStatusByDeleteIdRestServlet(RestServlet):
|
|||
) -> Tuple[int, JsonDict]:
|
||||
await assert_requester_is_admin(self._auth, request)
|
||||
|
||||
delete_status = self._pagination_handler.get_delete_status(delete_id)
|
||||
if delete_status is None:
|
||||
delete_task = await self._pagination_handler.get_delete_task(delete_id)
|
||||
if delete_task is None or (
|
||||
delete_task.action != PURGE_ROOM_ACTION_NAME
|
||||
and delete_task.action != SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME
|
||||
):
|
||||
raise NotFoundError("delete id '%s' not found" % delete_id)
|
||||
|
||||
return HTTPStatus.OK, cast(JsonDict, delete_status.asdict())
|
||||
return HTTPStatus.OK, _convert_delete_task_to_response(delete_task)
|
||||
|
||||
|
||||
class ListRoomRestServlet(RestServlet):
|
||||
|
@ -349,11 +361,15 @@ class RoomRestServlet(RestServlet):
|
|||
|
||||
ret = await room_shutdown_handler.shutdown_room(
|
||||
room_id=room_id,
|
||||
new_room_user_id=content.get("new_room_user_id"),
|
||||
new_room_name=content.get("room_name"),
|
||||
message=content.get("message"),
|
||||
requester_user_id=requester.user.to_string(),
|
||||
block=block,
|
||||
params={
|
||||
"new_room_user_id": content.get("new_room_user_id"),
|
||||
"new_room_name": content.get("room_name"),
|
||||
"message": content.get("message"),
|
||||
"requester_user_id": requester.user.to_string(),
|
||||
"block": block,
|
||||
"purge": purge,
|
||||
"force_purge": force_purge,
|
||||
},
|
||||
)
|
||||
|
||||
# Purge room
|
||||
|
|
|
@ -39,7 +39,7 @@ from synapse.rest.admin._base import (
|
|||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.storage.databases.main.registration import ExternalIDReuseException
|
||||
from synapse.storage.databases.main.stats import UserSortOrder
|
||||
from synapse.types import JsonDict, UserID
|
||||
from synapse.types import JsonDict, JsonMapping, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
|
@ -66,6 +66,7 @@ class UsersRestServletV2(RestServlet):
|
|||
The parameter `deactivated` can be used to include deactivated users.
|
||||
The parameter `order_by` can be used to order the result.
|
||||
The parameter `not_user_type` can be used to exclude certain user types.
|
||||
The parameter `locked` can be used to include locked users.
|
||||
Possible values are `bot`, `support` or "empty string".
|
||||
"empty string" here means to exclude users without a type.
|
||||
"""
|
||||
|
@ -107,8 +108,9 @@ class UsersRestServletV2(RestServlet):
|
|||
"The guests parameter is not supported when MSC3861 is enabled.",
|
||||
errcode=Codes.INVALID_PARAM,
|
||||
)
|
||||
deactivated = parse_boolean(request, "deactivated", default=False)
|
||||
|
||||
deactivated = parse_boolean(request, "deactivated", default=False)
|
||||
locked = parse_boolean(request, "locked", default=False)
|
||||
admins = parse_boolean(request, "admins")
|
||||
|
||||
# If support for MSC3866 is not enabled, apply no filtering based on the
|
||||
|
@ -133,6 +135,7 @@ class UsersRestServletV2(RestServlet):
|
|||
UserSortOrder.SHADOW_BANNED.value,
|
||||
UserSortOrder.CREATION_TS.value,
|
||||
UserSortOrder.LAST_SEEN_TS.value,
|
||||
UserSortOrder.LOCKED.value,
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -154,6 +157,7 @@ class UsersRestServletV2(RestServlet):
|
|||
direction,
|
||||
approved,
|
||||
not_user_types,
|
||||
locked,
|
||||
)
|
||||
|
||||
# If support for MSC3866 is not enabled, don't show the approval flag.
|
||||
|
@ -211,7 +215,7 @@ class UserRestServletV2(RestServlet):
|
|||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
) -> Tuple[int, JsonMapping]:
|
||||
await assert_requester_is_admin(self.auth, request)
|
||||
|
||||
target_user = UserID.from_string(user_id)
|
||||
|
@ -226,7 +230,7 @@ class UserRestServletV2(RestServlet):
|
|||
|
||||
async def on_PUT(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
) -> Tuple[int, JsonMapping]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
await assert_user_is_admin(self.auth, requester)
|
||||
|
||||
|
@ -658,7 +662,7 @@ class WhoisRestServlet(RestServlet):
|
|||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, user_id: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
) -> Tuple[int, JsonMapping]:
|
||||
target_user = UserID.from_string(user_id)
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError
|
|||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet, parse_json_object_from_request
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import JsonDict, RoomID
|
||||
from synapse.types import JsonDict, JsonMapping, RoomID
|
||||
|
||||
from ._base import client_patterns
|
||||
|
||||
|
@ -95,7 +95,7 @@ class AccountDataServlet(RestServlet):
|
|||
|
||||
async def on_GET(
|
||||
self, request: SynapseRequest, user_id: str, account_data_type: str
|
||||
) -> Tuple[int, JsonDict]:
|
||||
) -> Tuple[int, JsonMapping]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
if user_id != requester.user.to_string():
|
||||
raise AuthError(403, "Cannot get account data for other users.")
|
||||
|
@ -106,7 +106,7 @@ class AccountDataServlet(RestServlet):
|
|||
and account_data_type == AccountDataTypes.PUSH_RULES
|
||||
):
|
||||
account_data: Optional[
|
||||
JsonDict
|
||||
JsonMapping
|
||||
] = await self._push_rules_handler.push_rules_for_user(requester.user)
|
||||
else:
|
||||
account_data = await self.store.get_global_account_data_by_type_for_user(
|
||||
|
@ -236,7 +236,7 @@ class RoomAccountDataServlet(RestServlet):
|
|||
user_id: str,
|
||||
room_id: str,
|
||||
account_data_type: str,
|
||||
) -> Tuple[int, JsonDict]:
|
||||
) -> Tuple[int, JsonMapping]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
if user_id != requester.user.to_string():
|
||||
raise AuthError(403, "Cannot get account data for other users.")
|
||||
|
@ -253,7 +253,7 @@ class RoomAccountDataServlet(RestServlet):
|
|||
self._hs.config.experimental.msc4010_push_rules_account_data
|
||||
and account_data_type == AccountDataTypes.PUSH_RULES
|
||||
):
|
||||
account_data: Optional[JsonDict] = {}
|
||||
account_data: Optional[JsonMapping] = {}
|
||||
else:
|
||||
account_data = await self.store.get_account_data_for_room_and_type(
|
||||
user_id, room_id, account_data_type
|
||||
|
|
|
@ -84,7 +84,7 @@ class ReadMarkerRestServlet(RestServlet):
|
|||
await self.receipts_handler.received_client_receipt(
|
||||
room_id,
|
||||
receipt_type,
|
||||
user_id=requester.user.to_string(),
|
||||
user_id=requester.user,
|
||||
event_id=event_id,
|
||||
# Setting the thread ID is not possible with the /read_markers endpoint.
|
||||
thread_id=None,
|
||||
|
|
|
@ -108,7 +108,7 @@ class ReceiptRestServlet(RestServlet):
|
|||
await self.receipts_handler.received_client_receipt(
|
||||
room_id,
|
||||
receipt_type,
|
||||
user_id=requester.user.to_string(),
|
||||
user_id=requester.user,
|
||||
event_id=event_id,
|
||||
thread_id=thread_id,
|
||||
)
|
||||
|
|
|
@ -19,6 +19,7 @@ import logging
|
|||
from collections import deque
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
Any,
|
||||
Awaitable,
|
||||
Callable,
|
||||
|
@ -618,7 +619,7 @@ class EventsPersistenceStorageController:
|
|||
)
|
||||
|
||||
for room_id, ev_ctx_rm in events_by_room.items():
|
||||
latest_event_ids = set(
|
||||
latest_event_ids = (
|
||||
await self.main_store.get_latest_event_ids_in_room(room_id)
|
||||
)
|
||||
new_latest_event_ids = await self._calculate_new_extremities(
|
||||
|
@ -740,7 +741,7 @@ class EventsPersistenceStorageController:
|
|||
self,
|
||||
room_id: str,
|
||||
event_contexts: List[Tuple[EventBase, EventContext]],
|
||||
latest_event_ids: Collection[str],
|
||||
latest_event_ids: AbstractSet[str],
|
||||
) -> Set[str]:
|
||||
"""Calculates the new forward extremities for a room given events to
|
||||
persist.
|
||||
|
@ -758,8 +759,6 @@ class EventsPersistenceStorageController:
|
|||
and not event.internal_metadata.is_soft_failed()
|
||||
]
|
||||
|
||||
latest_event_ids = set(latest_event_ids)
|
||||
|
||||
# start with the existing forward extremities
|
||||
result = set(latest_event_ids)
|
||||
|
||||
|
@ -798,7 +797,7 @@ class EventsPersistenceStorageController:
|
|||
self,
|
||||
room_id: str,
|
||||
events_context: List[Tuple[EventBase, EventContext]],
|
||||
old_latest_event_ids: Set[str],
|
||||
old_latest_event_ids: AbstractSet[str],
|
||||
new_latest_event_ids: Set[str],
|
||||
) -> Tuple[Optional[StateMap[str]], Optional[StateMap[str]], Set[str]]:
|
||||
"""Calculate the current state dict after adding some new events to
|
||||
|
|
|
@ -1193,6 +1193,7 @@ class DatabasePool:
|
|||
keyvalues: Dict[str, Any],
|
||||
values: Dict[str, Any],
|
||||
insertion_values: Optional[Dict[str, Any]] = None,
|
||||
where_clause: Optional[str] = None,
|
||||
desc: str = "simple_upsert",
|
||||
) -> bool:
|
||||
"""Insert a row with values + insertion_values; on conflict, update with values.
|
||||
|
@ -1243,6 +1244,7 @@ class DatabasePool:
|
|||
keyvalues: The unique key columns and their new values
|
||||
values: The nonunique columns and their new values
|
||||
insertion_values: additional key/values to use only when inserting
|
||||
where_clause: An index predicate to apply to the upsert.
|
||||
desc: description of the transaction, for logging and metrics
|
||||
Returns:
|
||||
Returns True if a row was inserted or updated (i.e. if `values` is
|
||||
|
@ -1263,6 +1265,7 @@ class DatabasePool:
|
|||
keyvalues,
|
||||
values,
|
||||
insertion_values,
|
||||
where_clause,
|
||||
db_autocommit=autocommit,
|
||||
)
|
||||
except self.engine.module.IntegrityError as e:
|
||||
|
|
|
@ -175,6 +175,7 @@ class DataStore(
|
|||
direction: Direction = Direction.FORWARDS,
|
||||
approved: bool = True,
|
||||
not_user_types: Optional[List[str]] = None,
|
||||
locked: bool = False,
|
||||
) -> Tuple[List[JsonDict], int]:
|
||||
"""Function to retrieve a paginated list of users from
|
||||
users list. This will return a json list of users and the
|
||||
|
@ -194,6 +195,7 @@ class DataStore(
|
|||
direction: sort ascending or descending
|
||||
approved: whether to include approved users
|
||||
not_user_types: list of user types to exclude
|
||||
locked: whether to include locked users
|
||||
Returns:
|
||||
A tuple of a list of mappings from user to information and a count of total users.
|
||||
"""
|
||||
|
@ -226,6 +228,9 @@ class DataStore(
|
|||
if not deactivated:
|
||||
filters.append("deactivated = 0")
|
||||
|
||||
if not locked:
|
||||
filters.append("locked IS FALSE")
|
||||
|
||||
if admins is not None:
|
||||
if admins:
|
||||
filters.append("admin = 1")
|
||||
|
@ -290,7 +295,7 @@ class DataStore(
|
|||
sql = f"""
|
||||
SELECT name, user_type, is_guest, admin, deactivated, shadow_banned,
|
||||
displayname, avatar_url, creation_ts * 1000 as creation_ts, approved,
|
||||
eu.user_id is not null as erased, last_seen_ts
|
||||
eu.user_id is not null as erased, last_seen_ts, locked
|
||||
{sql_base}
|
||||
ORDER BY {order_by_column} {order}, u.name ASC
|
||||
LIMIT ? OFFSET ?
|
||||
|
|
|
@ -43,7 +43,7 @@ from synapse.storage.util.id_generators import (
|
|||
MultiWriterIdGenerator,
|
||||
StreamIdGenerator,
|
||||
)
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, JsonMapping
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.stream_change_cache import StreamChangeCache
|
||||
|
@ -119,7 +119,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
@cached()
|
||||
async def get_global_account_data_for_user(
|
||||
self, user_id: str
|
||||
) -> Mapping[str, JsonDict]:
|
||||
) -> Mapping[str, JsonMapping]:
|
||||
"""
|
||||
Get all the global client account_data for a user.
|
||||
|
||||
|
@ -164,7 +164,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
@cached()
|
||||
async def get_room_account_data_for_user(
|
||||
self, user_id: str
|
||||
) -> Mapping[str, Mapping[str, JsonDict]]:
|
||||
) -> Mapping[str, Mapping[str, JsonMapping]]:
|
||||
"""
|
||||
Get all of the per-room client account_data for a user.
|
||||
|
||||
|
@ -213,7 +213,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
@cached(num_args=2, max_entries=5000, tree=True)
|
||||
async def get_global_account_data_by_type_for_user(
|
||||
self, user_id: str, data_type: str
|
||||
) -> Optional[JsonDict]:
|
||||
) -> Optional[JsonMapping]:
|
||||
"""
|
||||
Returns:
|
||||
The account data.
|
||||
|
@ -265,7 +265,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
@cached(num_args=2, tree=True)
|
||||
async def get_account_data_for_room(
|
||||
self, user_id: str, room_id: str
|
||||
) -> Mapping[str, JsonDict]:
|
||||
) -> Mapping[str, JsonMapping]:
|
||||
"""Get all the client account_data for a user for a room.
|
||||
|
||||
Args:
|
||||
|
@ -296,7 +296,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
@cached(num_args=3, max_entries=5000, tree=True)
|
||||
async def get_account_data_for_room_and_type(
|
||||
self, user_id: str, room_id: str, account_data_type: str
|
||||
) -> Optional[JsonDict]:
|
||||
) -> Optional[JsonMapping]:
|
||||
"""Get the client account_data of given type for a user for a room.
|
||||
|
||||
Args:
|
||||
|
@ -394,7 +394,7 @@ class AccountDataWorkerStore(PushRulesWorkerStore, CacheInvalidationWorkerStore)
|
|||
|
||||
async def get_updated_global_account_data_for_user(
|
||||
self, user_id: str, stream_id: int
|
||||
) -> Dict[str, JsonDict]:
|
||||
) -> Mapping[str, JsonMapping]:
|
||||
"""Get all the global account_data that's changed for a user.
|
||||
|
||||
Args:
|
||||
|
|
|
@ -19,6 +19,7 @@ from typing import (
|
|||
TYPE_CHECKING,
|
||||
Collection,
|
||||
Dict,
|
||||
FrozenSet,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
|
@ -47,7 +48,7 @@ from synapse.storage.database import (
|
|||
from synapse.storage.databases.main.events_worker import EventsWorkerStore
|
||||
from synapse.storage.databases.main.signatures import SignatureWorkerStore
|
||||
from synapse.storage.engines import PostgresEngine, Sqlite3Engine
|
||||
from synapse.types import JsonDict, StrCollection, StrSequence
|
||||
from synapse.types import JsonDict, StrCollection
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
from synapse.util.caches.lrucache import LruCache
|
||||
|
@ -1179,13 +1180,14 @@ class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBas
|
|||
)
|
||||
|
||||
@cached(max_entries=5000, iterable=True)
|
||||
async def get_latest_event_ids_in_room(self, room_id: str) -> StrSequence:
|
||||
return await self.db_pool.simple_select_onecol(
|
||||
async def get_latest_event_ids_in_room(self, room_id: str) -> FrozenSet[str]:
|
||||
event_ids = await self.db_pool.simple_select_onecol(
|
||||
table="event_forward_extremities",
|
||||
keyvalues={"room_id": room_id},
|
||||
retcol="event_id",
|
||||
desc="get_latest_event_ids_in_room",
|
||||
)
|
||||
return frozenset(event_ids)
|
||||
|
||||
async def get_min_depth(self, room_id: str) -> Optional[int]:
|
||||
"""For the given room, get the minimum depth we have seen for it."""
|
||||
|
|
|
@ -1599,10 +1599,7 @@ class EventPushActionsWorkerStore(ReceiptsWorkerStore, StreamWorkerStore, SQLBas
|
|||
txn,
|
||||
table="event_push_summary",
|
||||
key_names=("user_id", "room_id", "thread_id"),
|
||||
key_values=[
|
||||
(user_id, room_id, thread_id)
|
||||
for user_id, room_id, thread_id in summaries
|
||||
],
|
||||
key_values=list(summaries),
|
||||
value_names=("notif_count", "unread_count", "stream_ordering"),
|
||||
value_values=[
|
||||
(
|
||||
|
|
|
@ -222,7 +222,7 @@ class PersistEventsStore:
|
|||
|
||||
for room_id, latest_event_ids in new_forward_extremities.items():
|
||||
self.store.get_latest_event_ids_in_room.prefill(
|
||||
(room_id,), list(latest_event_ids)
|
||||
(room_id,), frozenset(latest_event_ids)
|
||||
)
|
||||
|
||||
async def _get_events_which_are_prevs(self, event_ids: Iterable[str]) -> List[str]:
|
||||
|
@ -827,15 +827,7 @@ class PersistEventsStore:
|
|||
"target_chain_id",
|
||||
"target_sequence_number",
|
||||
),
|
||||
values=[
|
||||
(source_id, source_seq, target_id, target_seq)
|
||||
for (
|
||||
source_id,
|
||||
source_seq,
|
||||
target_id,
|
||||
target_seq,
|
||||
) in chain_links.get_additions()
|
||||
],
|
||||
values=list(chain_links.get_additions()),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
@ -12,11 +12,10 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import TYPE_CHECKING, Dict
|
||||
from typing import TYPE_CHECKING, Dict, FrozenSet
|
||||
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.databases.main import CacheInvalidationWorkerStore
|
||||
from synapse.types import StrCollection
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -34,7 +33,7 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore):
|
|||
super().__init__(database, db_conn, hs)
|
||||
|
||||
@cached()
|
||||
async def list_enabled_features(self, user_id: str) -> StrCollection:
|
||||
async def list_enabled_features(self, user_id: str) -> FrozenSet[str]:
|
||||
"""
|
||||
Checks to see what features are enabled for a given user
|
||||
Args:
|
||||
|
@ -49,7 +48,7 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore):
|
|||
["feature"],
|
||||
)
|
||||
|
||||
return [feature["feature"] for feature in enabled]
|
||||
return frozenset(feature["feature"] for feature in enabled)
|
||||
|
||||
async def set_features_for_user(
|
||||
self,
|
||||
|
|
|
@ -795,9 +795,7 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
now - event_ts,
|
||||
)
|
||||
|
||||
await self.db_pool.runInteraction(
|
||||
"insert_graph_receipt",
|
||||
self._insert_graph_receipt_txn,
|
||||
await self._insert_graph_receipt(
|
||||
room_id,
|
||||
receipt_type,
|
||||
user_id,
|
||||
|
@ -810,9 +808,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
|
||||
return stream_id, max_persisted_id
|
||||
|
||||
def _insert_graph_receipt_txn(
|
||||
async def _insert_graph_receipt(
|
||||
self,
|
||||
txn: LoggingTransaction,
|
||||
room_id: str,
|
||||
receipt_type: str,
|
||||
user_id: str,
|
||||
|
@ -822,13 +819,6 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
) -> None:
|
||||
assert self._can_write_to_receipts
|
||||
|
||||
txn.call_after(
|
||||
self._get_receipts_for_user_with_orderings.invalidate,
|
||||
(user_id, receipt_type),
|
||||
)
|
||||
# FIXME: This shouldn't invalidate the whole cache
|
||||
txn.call_after(self._get_linearized_receipts_for_room.invalidate, (room_id,))
|
||||
|
||||
keyvalues = {
|
||||
"room_id": room_id,
|
||||
"receipt_type": receipt_type,
|
||||
|
@ -840,8 +830,8 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
else:
|
||||
keyvalues["thread_id"] = thread_id
|
||||
|
||||
self.db_pool.simple_upsert_txn(
|
||||
txn,
|
||||
await self.db_pool.simple_upsert(
|
||||
desc="insert_graph_receipt",
|
||||
table="receipts_graph",
|
||||
keyvalues=keyvalues,
|
||||
values={
|
||||
|
@ -851,6 +841,11 @@ class ReceiptsWorkerStore(SQLBaseStore):
|
|||
where_clause=where_clause,
|
||||
)
|
||||
|
||||
self._get_receipts_for_user_with_orderings.invalidate((user_id, receipt_type))
|
||||
|
||||
# FIXME: This shouldn't invalidate the whole cache
|
||||
self._get_linearized_receipts_for_room.invalidate((room_id,))
|
||||
|
||||
|
||||
class ReceiptsBackgroundUpdateStore(SQLBaseStore):
|
||||
POPULATE_RECEIPT_EVENT_STREAM_ORDERING = "populate_event_stream_ordering"
|
||||
|
|
|
@ -108,6 +108,7 @@ class UserSortOrder(Enum):
|
|||
SHADOW_BANNED = "shadow_banned"
|
||||
CREATION_TS = "creation_ts"
|
||||
LAST_SEEN_TS = "last_seen_ts"
|
||||
LOCKED = "locked"
|
||||
|
||||
|
||||
class StatsStore(StateDeltasStore):
|
||||
|
|
|
@ -23,7 +23,7 @@ from synapse.storage._base import db_to_json
|
|||
from synapse.storage.database import LoggingTransaction
|
||||
from synapse.storage.databases.main.account_data import AccountDataWorkerStore
|
||||
from synapse.storage.util.id_generators import AbstractStreamIdGenerator
|
||||
from synapse.types import JsonDict
|
||||
from synapse.types import JsonDict, JsonMapping
|
||||
from synapse.util import json_encoder
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
|
@ -34,7 +34,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
|
|||
@cached()
|
||||
async def get_tags_for_user(
|
||||
self, user_id: str
|
||||
) -> Mapping[str, Mapping[str, JsonDict]]:
|
||||
) -> Mapping[str, Mapping[str, JsonMapping]]:
|
||||
"""Get all the tags for a user.
|
||||
|
||||
|
||||
|
@ -109,7 +109,7 @@ class TagsWorkerStore(AccountDataWorkerStore):
|
|||
|
||||
async def get_updated_tags(
|
||||
self, user_id: str, stream_id: int
|
||||
) -> Mapping[str, Mapping[str, JsonDict]]:
|
||||
) -> Mapping[str, Mapping[str, JsonMapping]]:
|
||||
"""Get all the tags for the rooms where the tags have changed since the
|
||||
given version
|
||||
|
||||
|
|
|
@ -84,9 +84,7 @@ class ExpiringCache(Generic[KT, VT]):
|
|||
return
|
||||
|
||||
def f() -> "defer.Deferred[None]":
|
||||
return run_as_background_process(
|
||||
"prune_cache_%s" % self._cache_name, self._prune_cache
|
||||
)
|
||||
return run_as_background_process("prune_cache", self._prune_cache)
|
||||
|
||||
self._clock.looping_call(f, self._expiry_ms / 2)
|
||||
|
||||
|
|
|
@ -1858,7 +1858,7 @@ class PresenceJoinTestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
|
||||
event = self.get_success(
|
||||
builder.build(prev_event_ids=prev_event_ids, auth_event_ids=None)
|
||||
builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None)
|
||||
)
|
||||
|
||||
self.get_success(self.federation_event_handler.on_receive_pdu(hostname, event))
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, Iterable, Optional
|
||||
from typing import Any, Callable, Iterable, Optional
|
||||
from unittest.mock import Mock
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
@ -47,24 +47,31 @@ class BaseWorkerStoreTestCase(BaseStreamTestCase):
|
|||
self.pump(0.1)
|
||||
|
||||
def check(
|
||||
self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None
|
||||
self,
|
||||
method: str,
|
||||
args: Iterable[Any],
|
||||
expected_result: Optional[Any] = None,
|
||||
asserter: Optional[Callable[[Any, Any, Optional[Any]], None]] = None,
|
||||
) -> None:
|
||||
if asserter is None:
|
||||
asserter = self.assertEqual
|
||||
|
||||
master_result = self.get_success(getattr(self.master_store, method)(*args))
|
||||
worker_result = self.get_success(getattr(self.worker_store, method)(*args))
|
||||
if expected_result is not None:
|
||||
self.assertEqual(
|
||||
asserter(
|
||||
master_result,
|
||||
expected_result,
|
||||
"Expected master result to be %r but was %r"
|
||||
% (expected_result, master_result),
|
||||
)
|
||||
self.assertEqual(
|
||||
asserter(
|
||||
worker_result,
|
||||
expected_result,
|
||||
"Expected worker result to be %r but was %r"
|
||||
% (expected_result, worker_result),
|
||||
)
|
||||
self.assertEqual(
|
||||
asserter(
|
||||
master_result,
|
||||
worker_result,
|
||||
"Worker result %r does not match master result %r"
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from typing import Any, Callable, Iterable, List, Optional, Tuple
|
||||
from typing import Any, Iterable, List, Optional, Tuple
|
||||
|
||||
from canonicaljson import encode_canonical_json
|
||||
from parameterized import parameterized
|
||||
|
@ -21,7 +21,7 @@ from twisted.test.proto_helpers import MemoryReactor
|
|||
|
||||
from synapse.api.constants import ReceiptTypes
|
||||
from synapse.api.room_versions import RoomVersions
|
||||
from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.handlers.room import RoomEventSource
|
||||
from synapse.server import HomeServer
|
||||
|
@ -46,32 +46,9 @@ ROOM_ID = "!room:test"
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def dict_equals(self: EventBase, other: EventBase) -> bool:
|
||||
me = encode_canonical_json(self.get_pdu_json())
|
||||
them = encode_canonical_json(other.get_pdu_json())
|
||||
return me == them
|
||||
|
||||
|
||||
def patch__eq__(cls: object) -> Callable[[], None]:
|
||||
eq = getattr(cls, "__eq__", None)
|
||||
cls.__eq__ = dict_equals # type: ignore[assignment]
|
||||
|
||||
def unpatch() -> None:
|
||||
if eq is not None:
|
||||
cls.__eq__ = eq # type: ignore[method-assign]
|
||||
|
||||
return unpatch
|
||||
|
||||
|
||||
class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
|
||||
STORE_TYPE = EventsWorkerStore
|
||||
|
||||
def setUp(self) -> None:
|
||||
# Patch up the equality operator for events so that we can check
|
||||
# whether lists of events match using assertEqual
|
||||
self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(EventBase)]
|
||||
super().setUp()
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
super().prepare(reactor, clock, hs)
|
||||
|
||||
|
@ -84,13 +61,19 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
|
|||
)
|
||||
)
|
||||
|
||||
def tearDown(self) -> None:
|
||||
[unpatch() for unpatch in self.unpatches]
|
||||
def assertEventsEqual(
|
||||
self, first: EventBase, second: EventBase, msg: Optional[Any] = None
|
||||
) -> None:
|
||||
self.assertEqual(
|
||||
encode_canonical_json(first.get_pdu_json()),
|
||||
encode_canonical_json(second.get_pdu_json()),
|
||||
msg,
|
||||
)
|
||||
|
||||
def test_get_latest_event_ids_in_room(self) -> None:
|
||||
create = self.persist(type="m.room.create", key="", creator=USER_ID)
|
||||
self.replicate()
|
||||
self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id])
|
||||
self.check("get_latest_event_ids_in_room", (ROOM_ID,), {create.event_id})
|
||||
|
||||
join = self.persist(
|
||||
type="m.room.member",
|
||||
|
@ -99,7 +82,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
|
|||
prev_events=[(create.event_id, {})],
|
||||
)
|
||||
self.replicate()
|
||||
self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id])
|
||||
self.check("get_latest_event_ids_in_room", (ROOM_ID,), {join.event_id})
|
||||
|
||||
def test_redactions(self) -> None:
|
||||
self.persist(type="m.room.create", key="", creator=USER_ID)
|
||||
|
@ -107,7 +90,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
|
|||
|
||||
msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello")
|
||||
self.replicate()
|
||||
self.check("get_event", [msg.event_id], msg)
|
||||
self.check("get_event", [msg.event_id], msg, asserter=self.assertEventsEqual)
|
||||
|
||||
redaction = self.persist(type="m.room.redaction", redacts=msg.event_id)
|
||||
self.replicate()
|
||||
|
@ -119,7 +102,9 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
|
|||
redacted = make_event_from_dict(
|
||||
msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict()
|
||||
)
|
||||
self.check("get_event", [msg.event_id], redacted)
|
||||
self.check(
|
||||
"get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual
|
||||
)
|
||||
|
||||
def test_backfilled_redactions(self) -> None:
|
||||
self.persist(type="m.room.create", key="", creator=USER_ID)
|
||||
|
@ -127,7 +112,7 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
|
|||
|
||||
msg = self.persist(type="m.room.message", msgtype="m.text", body="Hello")
|
||||
self.replicate()
|
||||
self.check("get_event", [msg.event_id], msg)
|
||||
self.check("get_event", [msg.event_id], msg, asserter=self.assertEventsEqual)
|
||||
|
||||
redaction = self.persist(
|
||||
type="m.room.redaction", redacts=msg.event_id, backfill=True
|
||||
|
@ -141,7 +126,9 @@ class EventsWorkerStoreTestCase(BaseWorkerStoreTestCase):
|
|||
redacted = make_event_from_dict(
|
||||
msg_dict, internal_metadata_dict=msg.internal_metadata.get_dict()
|
||||
)
|
||||
self.check("get_event", [msg.event_id], redacted)
|
||||
self.check(
|
||||
"get_event", [msg.event_id], redacted, asserter=self.assertEventsEqual
|
||||
)
|
||||
|
||||
def test_invites(self) -> None:
|
||||
self.persist(type="m.room.create", key="", creator=USER_ID)
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Any, List, Optional, Sequence
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
|
@ -139,7 +139,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
|
|||
)
|
||||
|
||||
# this is the point in the DAG where we make a fork
|
||||
fork_point: Sequence[str] = self.get_success(
|
||||
fork_point = self.get_success(
|
||||
self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id)
|
||||
)
|
||||
|
||||
|
@ -294,7 +294,7 @@ class EventsStreamTestCase(BaseStreamTestCase):
|
|||
)
|
||||
|
||||
# this is the point in the DAG where we make a fork
|
||||
fork_point: Sequence[str] = self.get_success(
|
||||
fork_point = self.get_success(
|
||||
self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id)
|
||||
)
|
||||
|
||||
|
@ -316,14 +316,14 @@ class EventsStreamTestCase(BaseStreamTestCase):
|
|||
self.test_handler.received_rdata_rows.clear()
|
||||
|
||||
# now roll back all that state by de-modding the users
|
||||
prev_events = fork_point
|
||||
prev_events = list(fork_point)
|
||||
pl_events = []
|
||||
for u in user_ids:
|
||||
pls["users"][u] = 0
|
||||
e = self.get_success(
|
||||
inject_event(
|
||||
self.hs,
|
||||
prev_event_ids=list(prev_events),
|
||||
prev_event_ids=prev_events,
|
||||
type=EventTypes.PowerLevels,
|
||||
state_key="",
|
||||
sender=self.user_id,
|
||||
|
|
|
@ -261,7 +261,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase):
|
|||
|
||||
builder = factory.for_room_version(room_version, event_dict)
|
||||
join_event = self.get_success(
|
||||
builder.build(prev_event_ids=prev_event_ids, auth_event_ids=None)
|
||||
builder.build(prev_event_ids=list(prev_event_ids), auth_event_ids=None)
|
||||
)
|
||||
|
||||
self.get_success(federation.on_send_membership_event(remote_server, join_event))
|
||||
|
|
|
@ -15,26 +15,34 @@ import json
|
|||
import time
|
||||
import urllib.parse
|
||||
from typing import List, Optional
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import AsyncMock, Mock
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from twisted.internet.task import deferLater
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
import synapse.rest.admin
|
||||
from synapse.api.constants import EventTypes, Membership, RoomTypes
|
||||
from synapse.api.errors import Codes
|
||||
from synapse.handlers.pagination import PaginationHandler, PurgeStatus
|
||||
from synapse.handlers.pagination import (
|
||||
PURGE_ROOM_ACTION_NAME,
|
||||
SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME,
|
||||
)
|
||||
from synapse.rest.client import directory, events, login, room
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import UserID
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
from synapse.util.task_scheduler import TaskScheduler
|
||||
|
||||
from tests import unittest
|
||||
|
||||
"""Tests admin REST events for /rooms paths."""
|
||||
|
||||
|
||||
ONE_HOUR_IN_S = 3600
|
||||
|
||||
|
||||
class DeleteRoomTestCase(unittest.HomeserverTestCase):
|
||||
servlets = [
|
||||
synapse.rest.admin.register_servlets,
|
||||
|
@ -46,6 +54,7 @@ class DeleteRoomTestCase(unittest.HomeserverTestCase):
|
|||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.task_scheduler = hs.get_task_scheduler()
|
||||
hs.config.consent.user_consent_version = "1"
|
||||
|
||||
consent_uri_builder = Mock()
|
||||
|
@ -476,6 +485,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.event_creation_handler = hs.get_event_creation_handler()
|
||||
self.task_scheduler = hs.get_task_scheduler()
|
||||
hs.config.consent.user_consent_version = "1"
|
||||
|
||||
consent_uri_builder = Mock()
|
||||
|
@ -502,6 +512,9 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
)
|
||||
self.url_status_by_delete_id = "/_synapse/admin/v2/rooms/delete_status/"
|
||||
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
self.pagination_handler = hs.get_pagination_handler()
|
||||
|
||||
@parameterized.expand(
|
||||
[
|
||||
("DELETE", "/_synapse/admin/v2/rooms/%s"),
|
||||
|
@ -661,7 +674,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
delete_id1 = channel.json_body["delete_id"]
|
||||
|
||||
# go ahead
|
||||
self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2)
|
||||
self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2)
|
||||
|
||||
# second task
|
||||
channel = self.make_request(
|
||||
|
@ -686,12 +699,14 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(2, len(channel.json_body["results"]))
|
||||
self.assertEqual("complete", channel.json_body["results"][0]["status"])
|
||||
self.assertEqual("complete", channel.json_body["results"][1]["status"])
|
||||
self.assertEqual(delete_id1, channel.json_body["results"][0]["delete_id"])
|
||||
self.assertEqual(delete_id2, channel.json_body["results"][1]["delete_id"])
|
||||
delete_ids = {delete_id1, delete_id2}
|
||||
self.assertTrue(channel.json_body["results"][0]["delete_id"] in delete_ids)
|
||||
delete_ids.remove(channel.json_body["results"][0]["delete_id"])
|
||||
self.assertTrue(channel.json_body["results"][1]["delete_id"] in delete_ids)
|
||||
|
||||
# get status after more than clearing time for first task
|
||||
# second task is not cleared
|
||||
self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2)
|
||||
self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2)
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
|
@ -705,7 +720,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(delete_id2, channel.json_body["results"][0]["delete_id"])
|
||||
|
||||
# get status after more than clearing time for all tasks
|
||||
self.reactor.advance(PaginationHandler.CLEAR_PURGE_AFTER_MS / 1000 / 2)
|
||||
self.reactor.advance(TaskScheduler.KEEP_TASKS_FOR_MS / 1000 / 2)
|
||||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
|
@ -721,6 +736,13 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
|
||||
body = {"new_room_user_id": self.admin_user}
|
||||
|
||||
# Mock PaginationHandler.purge_room to sleep for 100s, so we have time to do a second call
|
||||
# before the purge is over. Note that it doesn't purge anymore, but we don't care.
|
||||
async def purge_room(room_id: str, force: bool) -> None:
|
||||
await deferLater(self.hs.get_reactor(), 100, lambda: None)
|
||||
|
||||
self.pagination_handler.purge_room = AsyncMock(side_effect=purge_room) # type: ignore[method-assign]
|
||||
|
||||
# first call to delete room
|
||||
# and do not wait for finish the task
|
||||
first_channel = self.make_request(
|
||||
|
@ -728,7 +750,6 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
self.url.encode("ascii"),
|
||||
content=body,
|
||||
access_token=self.admin_user_tok,
|
||||
await_result=False,
|
||||
)
|
||||
|
||||
# second call to delete room
|
||||
|
@ -742,7 +763,7 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(400, second_channel.code, msg=second_channel.json_body)
|
||||
self.assertEqual(Codes.UNKNOWN, second_channel.json_body["errcode"])
|
||||
self.assertEqual(
|
||||
f"History purge already in progress for {self.room_id}",
|
||||
f"Purge already in progress for {self.room_id}",
|
||||
second_channel.json_body["error"],
|
||||
)
|
||||
|
||||
|
@ -751,6 +772,9 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(200, first_channel.code, msg=first_channel.json_body)
|
||||
self.assertIn("delete_id", first_channel.json_body)
|
||||
|
||||
# wait for purge_room to finish
|
||||
self.pump(1)
|
||||
|
||||
# check status after finish the task
|
||||
self._test_result(
|
||||
first_channel.json_body["delete_id"],
|
||||
|
@ -972,6 +996,115 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
# Assert we can no longer peek into the room
|
||||
self._assert_peek(self.room_id, expect_code=403)
|
||||
|
||||
@unittest.override_config({"forgotten_room_retention_period": "1d"})
|
||||
def test_purge_forgotten_room(self) -> None:
|
||||
# Create a test room
|
||||
room_id = self.helper.create_room_as(
|
||||
self.admin_user,
|
||||
tok=self.admin_user_tok,
|
||||
)
|
||||
|
||||
self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok)
|
||||
self.get_success(
|
||||
self.room_member_handler.forget(
|
||||
UserID.from_string(self.admin_user), room_id
|
||||
)
|
||||
)
|
||||
|
||||
# Test that room is not yet purged
|
||||
with self.assertRaises(AssertionError):
|
||||
self._is_purged(room_id)
|
||||
|
||||
# Advance 24 hours in the future, past the `forgotten_room_retention_period`
|
||||
self.reactor.advance(24 * ONE_HOUR_IN_S)
|
||||
|
||||
self._is_purged(room_id)
|
||||
|
||||
def test_scheduled_purge_room(self) -> None:
|
||||
# Create a test room
|
||||
room_id = self.helper.create_room_as(
|
||||
self.admin_user,
|
||||
tok=self.admin_user_tok,
|
||||
)
|
||||
self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok)
|
||||
|
||||
# Schedule a purge 10 seconds in the future
|
||||
self.get_success(
|
||||
self.task_scheduler.schedule_task(
|
||||
PURGE_ROOM_ACTION_NAME,
|
||||
resource_id=room_id,
|
||||
timestamp=self.clock.time_msec() + 10 * 1000,
|
||||
)
|
||||
)
|
||||
|
||||
# Test that room is not yet purged
|
||||
with self.assertRaises(AssertionError):
|
||||
self._is_purged(room_id)
|
||||
|
||||
# Wait for next scheduler run
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS)
|
||||
|
||||
self._is_purged(room_id)
|
||||
|
||||
def test_schedule_shutdown_room(self) -> None:
|
||||
# Create a test room
|
||||
room_id = self.helper.create_room_as(
|
||||
self.other_user,
|
||||
tok=self.other_user_tok,
|
||||
)
|
||||
|
||||
# Schedule a shutdown 10 seconds in the future
|
||||
delete_id = self.get_success(
|
||||
self.task_scheduler.schedule_task(
|
||||
SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME,
|
||||
resource_id=room_id,
|
||||
params={
|
||||
"requester_user_id": self.admin_user,
|
||||
"new_room_user_id": self.admin_user,
|
||||
"new_room_name": None,
|
||||
"message": None,
|
||||
"block": False,
|
||||
"purge": True,
|
||||
"force_purge": True,
|
||||
},
|
||||
timestamp=self.clock.time_msec() + 10 * 1000,
|
||||
)
|
||||
)
|
||||
|
||||
# Test that room is not yet shutdown
|
||||
self._is_member(room_id, self.other_user)
|
||||
|
||||
# Test that room is not yet purged
|
||||
with self.assertRaises(AssertionError):
|
||||
self._is_purged(room_id)
|
||||
|
||||
# Wait for next scheduler run
|
||||
self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS)
|
||||
|
||||
# Test that all users has been kicked (room is shutdown)
|
||||
self._has_no_members(room_id)
|
||||
|
||||
self._is_purged(room_id)
|
||||
|
||||
# Retrieve delete results
|
||||
result = self.make_request(
|
||||
"GET",
|
||||
self.url_status_by_delete_id + delete_id,
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(200, result.code, msg=result.json_body)
|
||||
|
||||
# Check that the user is in kicked_users
|
||||
self.assertIn(
|
||||
self.other_user, result.json_body["shutdown_room"]["kicked_users"]
|
||||
)
|
||||
|
||||
new_room_id = result.json_body["shutdown_room"]["new_room_id"]
|
||||
self.assertTrue(new_room_id)
|
||||
|
||||
# Check that the user is actually in the new room
|
||||
self._is_member(new_room_id, self.other_user)
|
||||
|
||||
def _is_blocked(self, room_id: str, expect: bool = True) -> None:
|
||||
"""Assert that the room is blocked or not"""
|
||||
d = self.store.is_room_blocked(room_id)
|
||||
|
@ -1034,7 +1167,6 @@ class DeleteRoomV2TestCase(unittest.HomeserverTestCase):
|
|||
kicked_user: a user_id which is kicked from the room
|
||||
expect_new_room: if we expect that a new room was created
|
||||
"""
|
||||
|
||||
# get information by room_id
|
||||
channel_room_id = self.make_request(
|
||||
"GET",
|
||||
|
@ -1957,11 +2089,8 @@ class RoomMessagesTestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(len(chunk), 2, [event["content"] for event in chunk])
|
||||
|
||||
# Purge every event before the second event.
|
||||
purge_id = random_string(16)
|
||||
pagination_handler._purges_by_id[purge_id] = PurgeStatus()
|
||||
self.get_success(
|
||||
pagination_handler._purge_history(
|
||||
purge_id=purge_id,
|
||||
pagination_handler.purge_history(
|
||||
room_id=self.room_id,
|
||||
token=second_token_str,
|
||||
delete_local_events=True,
|
||||
|
|
|
@ -22,6 +22,7 @@ from synapse.server import HomeServer
|
|||
from synapse.storage.roommember import RoomsForUser
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock
|
||||
from synapse.util.stringutils import random_string
|
||||
|
||||
from tests import unittest
|
||||
from tests.unittest import override_config
|
||||
|
@ -413,11 +414,24 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(messages[0]["content"]["body"], "test msg one")
|
||||
self.assertEqual(messages[0]["sender"], "@notices:test")
|
||||
|
||||
random_string(16)
|
||||
|
||||
# shut down and purge room
|
||||
self.get_success(
|
||||
self.room_shutdown_handler.shutdown_room(first_room_id, self.admin_user)
|
||||
self.room_shutdown_handler.shutdown_room(
|
||||
first_room_id,
|
||||
{
|
||||
"requester_user_id": self.admin_user,
|
||||
"new_room_user_id": None,
|
||||
"new_room_name": None,
|
||||
"message": None,
|
||||
"block": False,
|
||||
"purge": True,
|
||||
"force_purge": False,
|
||||
},
|
||||
)
|
||||
self.get_success(self.pagination_handler.purge_room(first_room_id))
|
||||
)
|
||||
self.get_success(self.pagination_handler.purge_room(first_room_id, force=False))
|
||||
|
||||
# user is not member anymore
|
||||
self._check_invite_and_join_status(self.other_user, 0, 0)
|
||||
|
|
|
@ -1146,6 +1146,32 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
|||
users = {user["name"]: user for user in channel.json_body["users"]}
|
||||
self.assertIs(users[user_id]["erased"], True)
|
||||
|
||||
def test_filter_locked(self) -> None:
|
||||
# Create a new user.
|
||||
user_id = self.register_user("lockme", "lockme")
|
||||
|
||||
# Lock them
|
||||
self.get_success(self.store.set_user_locked_status(user_id, True))
|
||||
|
||||
# Locked user should appear in list users API
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
self.url + "?locked=true",
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
users = {user["name"]: user for user in channel.json_body["users"]}
|
||||
self.assertIn(user_id, users)
|
||||
self.assertTrue(users[user_id]["locked"])
|
||||
|
||||
# Locked user should not appear in list users API
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
self.url + "?locked=false",
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
users = {user["name"]: user for user in channel.json_body["users"]}
|
||||
self.assertNotIn(user_id, users)
|
||||
|
||||
def _order_test(
|
||||
self,
|
||||
expected_user_list: List[str],
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue