mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-19 09:31:35 +03:00
Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes
This commit is contained in:
commit
97795d8437
96 changed files with 8249 additions and 648 deletions
22
.github/workflows/tests.yml
vendored
22
.github/workflows/tests.yml
vendored
|
@ -21,6 +21,7 @@ jobs:
|
|||
trial: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.trial }}
|
||||
integration: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.integration }}
|
||||
linting: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting }}
|
||||
linting_readme: ${{ !startsWith(github.ref, 'refs/pull/') || steps.filter.outputs.linting_readme }}
|
||||
steps:
|
||||
- uses: dorny/paths-filter@v3
|
||||
id: filter
|
||||
|
@ -73,6 +74,9 @@ jobs:
|
|||
- 'poetry.lock'
|
||||
- '.github/workflows/tests.yml'
|
||||
|
||||
linting_readme:
|
||||
- 'README.rst'
|
||||
|
||||
check-sampleconfig:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
|
@ -135,7 +139,7 @@ jobs:
|
|||
|
||||
- name: Semantic checks (ruff)
|
||||
# --quiet suppresses the update check.
|
||||
run: poetry run ruff --quiet .
|
||||
run: poetry run ruff check --quiet .
|
||||
|
||||
lint-mypy:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -269,6 +273,20 @@ jobs:
|
|||
|
||||
- run: cargo fmt --check
|
||||
|
||||
# This is to detect issues with the rst file, which can otherwise cause issues
|
||||
# when uploading packages to PyPi.
|
||||
lint-readme:
|
||||
runs-on: ubuntu-latest
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.linting_readme == 'true' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
- run: "pip install rstcheck"
|
||||
- run: "rstcheck --report-level=WARNING README.rst"
|
||||
|
||||
# Dummy step to gate other tests on without repeating the whole list
|
||||
linting-done:
|
||||
if: ${{ !cancelled() }} # Run this even if prior jobs were skipped
|
||||
|
@ -284,6 +302,7 @@ jobs:
|
|||
- lint-clippy
|
||||
- lint-clippy-nightly
|
||||
- lint-rustfmt
|
||||
- lint-readme
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: matrix-org/done-action@v2
|
||||
|
@ -301,6 +320,7 @@ jobs:
|
|||
lint-clippy
|
||||
lint-clippy-nightly
|
||||
lint-rustfmt
|
||||
lint-readme
|
||||
|
||||
|
||||
calculate-test-jobs:
|
||||
|
|
23
CHANGES.md
23
CHANGES.md
|
@ -1,3 +1,24 @@
|
|||
# Synapse 1.110.0 (2024-07-03)
|
||||
|
||||
No significant changes since 1.110.0rc3.
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc3 (2024-07-02)
|
||||
|
||||
### Bugfixes
|
||||
|
||||
- Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0. ([\#17386](https://github.com/element-hq/synapse/issues/17386), [\#17391](https://github.com/element-hq/synapse/issues/17391))
|
||||
|
||||
### Internal Changes
|
||||
|
||||
- Limit size of presence EDUs to 50 entries. ([\#17371](https://github.com/element-hq/synapse/issues/17371))
|
||||
- Fix building debian package for debian sid. ([\#17389](https://github.com/element-hq/synapse/issues/17389))
|
||||
|
||||
|
||||
|
||||
|
||||
# Synapse 1.110.0rc2 (2024-06-26)
|
||||
|
||||
### Internal Changes
|
||||
|
@ -27,7 +48,7 @@
|
|||
This is useful for scripts that bootstrap user accounts with initial passwords. ([\#17304](https://github.com/element-hq/synapse/issues/17304))
|
||||
- Add support for via query parameter from [MSC4156](https://github.com/matrix-org/matrix-spec-proposals/pull/4156). ([\#17322](https://github.com/element-hq/synapse/issues/17322))
|
||||
- Add `is_invite` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint. ([\#17335](https://github.com/element-hq/synapse/issues/17335))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
|
||||
- Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding a federation /download endpoint. ([\#17350](https://github.com/element-hq/synapse/issues/17350))
|
||||
|
||||
### Bugfixes
|
||||
|
||||
|
|
8
Cargo.lock
generated
8
Cargo.lock
generated
|
@ -234,9 +234,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.21"
|
||||
version = "0.4.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
|
@ -505,9 +505,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.117"
|
||||
version = "1.0.119"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3"
|
||||
checksum = "e8eddb61f0697cc3989c5d64b452f5488e2b8a60fd7d5076a3045076ffef8cb0"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"ryu",
|
||||
|
|
|
@ -179,10 +179,10 @@ desired ``localpart`` in the 'User name' box.
|
|||
-----------------------
|
||||
|
||||
Enterprise quality support for Synapse including SLAs is available as part of an
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>` subscription.
|
||||
`Element Server Suite (ESS) <https://element.io/pricing>`_ subscription.
|
||||
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`
|
||||
and access the `knowledge base <https://ems-docs.element.io>`.
|
||||
If you are an existing ESS subscriber then you can raise a `support request <https://ems.element.io/support>`_
|
||||
and access the `knowledge base <https://ems-docs.element.io>`_.
|
||||
|
||||
🤝 Community support
|
||||
--------------------
|
||||
|
|
1
changelog.d/17318.misc
Normal file
1
changelog.d/17318.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Make the release script create a release branch for Complement as well.
|
1
changelog.d/17320.feature
Normal file
1
changelog.d/17320.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add `rooms` data to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
1
changelog.d/17337.feature
Normal file
1
changelog.d/17337.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Add `room_types`/`not_room_types` filtering to experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
1
changelog.d/17342.feature
Normal file
1
changelog.d/17342.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Return "required state" in experimental [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575) Sliding Sync `/sync` endpoint.
|
1
changelog.d/17356.doc
Normal file
1
changelog.d/17356.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Clarify `url_preview_url_blacklist` is a usability feature.
|
1
changelog.d/17362.bugfix
Normal file
1
changelog.d/17362.bugfix
Normal file
|
@ -0,0 +1 @@
|
|||
Fix rare race which causes no new to-device messages to be received from remote server.
|
1
changelog.d/17363.misc
Normal file
1
changelog.d/17363.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Fix uploading packages to PyPi.
|
1
changelog.d/17365.feature
Normal file
1
changelog.d/17365.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/main/proposals/3916-authentication-for-media.md) by adding _matrix/client/v1/media/download endpoint.
|
1
changelog.d/17367.misc
Normal file
1
changelog.d/17367.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Add CI check for the README.
|
|
@ -1 +0,0 @@
|
|||
Limit size of presence EDUs to 50 entries.
|
1
changelog.d/17379.doc
Normal file
1
changelog.d/17379.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Fix broken links in README.
|
1
changelog.d/17381.misc
Normal file
1
changelog.d/17381.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Fix linting errors from new `ruff` version.
|
|
@ -1 +0,0 @@
|
|||
Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0.
|
3
changelog.d/17388.feature
Normal file
3
changelog.d/17388.feature
Normal file
|
@ -0,0 +1,3 @@
|
|||
Support [MSC3916](https://github.com/matrix-org/matrix-spec-proposals/blob/rav/authentication-for-media/proposals/3916-authentication-for-media.md)
|
||||
by adding `_matrix/client/v1/media/thumbnail`, `_matrix/federation/v1/media/thumbnail` endpoints and stabilizing the
|
||||
remaining `_matrix/client/v1/media` endpoints.
|
|
@ -1 +0,0 @@
|
|||
Fix building debian package for debian sid.
|
1
changelog.d/17390.misc
Normal file
1
changelog.d/17390.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Fix building debian packages on non-clean checkouts.
|
|
@ -1 +0,0 @@
|
|||
Fix bug where `/sync` requests could get blocked indefinitely after an upgrade from Synapse versions before v1.109.0.
|
1
changelog.d/17392.misc
Normal file
1
changelog.d/17392.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Finish up work to allow per-user feature flags.
|
1
changelog.d/17393.misc
Normal file
1
changelog.d/17393.misc
Normal file
|
@ -0,0 +1 @@
|
|||
Allow enabling sliding sync per-user.
|
1
changelog.d/17399.doc
Normal file
1
changelog.d/17399.doc
Normal file
|
@ -0,0 +1 @@
|
|||
Clarify that changelog content *and file extension* need to match in order for entries to merge.
|
1
changelog.d/17400.feature
Normal file
1
changelog.d/17400.feature
Normal file
|
@ -0,0 +1 @@
|
|||
Forget all of a user's rooms upon deactivation, enabling future purges.
|
12
debian/changelog
vendored
12
debian/changelog
vendored
|
@ -1,3 +1,15 @@
|
|||
matrix-synapse-py3 (1.110.0) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Wed, 03 Jul 2024 09:08:59 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc3) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc3.
|
||||
|
||||
-- Synapse Packaging team <packages@matrix.org> Tue, 02 Jul 2024 08:28:56 -0600
|
||||
|
||||
matrix-synapse-py3 (1.110.0~rc2) stable; urgency=medium
|
||||
|
||||
* New Synapse release 1.110.0rc2.
|
||||
|
|
|
@ -11,6 +11,9 @@ DIST=$(cut -d ':' -f2 <<< "${distro:?}")
|
|||
cp -aT /synapse/source /synapse/build
|
||||
cd /synapse/build
|
||||
|
||||
# Delete any existing `.so` files to ensure a clean build.
|
||||
rm -f /synapse/build/synapse/*.so
|
||||
|
||||
# if this is a prerelease, set the Section accordingly.
|
||||
#
|
||||
# When the package is later added to the package repo, reprepro will use the
|
||||
|
|
|
@ -117,7 +117,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
|||
},
|
||||
"media_repository": {
|
||||
"app": "synapse.app.generic_worker",
|
||||
"listener_resources": ["media"],
|
||||
"listener_resources": ["media", "client"],
|
||||
"endpoint_patterns": [
|
||||
"^/_matrix/media/",
|
||||
"^/_synapse/admin/v1/purge_media_cache$",
|
||||
|
@ -125,6 +125,7 @@ WORKERS_CONFIG: Dict[str, Dict[str, Any]] = {
|
|||
"^/_synapse/admin/v1/user/.*/media.*$",
|
||||
"^/_synapse/admin/v1/media/.*$",
|
||||
"^/_synapse/admin/v1/quarantine_media/.*$",
|
||||
"^/_matrix/client/v1/media/.*$",
|
||||
],
|
||||
# The first configured media worker will run the media background jobs
|
||||
"shared_extra_conf": {
|
||||
|
|
|
@ -1,21 +1,17 @@
|
|||
# Experimental Features API
|
||||
|
||||
This API allows a server administrator to enable or disable some experimental features on a per-user
|
||||
basis. The currently supported features are:
|
||||
- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy
|
||||
presence state enabled
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require
|
||||
UIA when first uploading cross-signing keys.
|
||||
|
||||
basis. The currently supported features are:
|
||||
- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications
|
||||
for another client
|
||||
- [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support
|
||||
|
||||
To use it, you will need to authenticate by providing an `access_token`
|
||||
for a server admin: see [Admin API](../usage/administration/admin_api/).
|
||||
|
||||
## Enabling/Disabling Features
|
||||
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
This API allows a server administrator to enable experimental features for a given user. The request must
|
||||
provide a body containing the user id and listing the features to enable/disable in the following format:
|
||||
```json
|
||||
{
|
||||
|
@ -35,7 +31,7 @@ PUT /_synapse/admin/v1/experimental_features/<user_id>
|
|||
```
|
||||
|
||||
## Listing Enabled Features
|
||||
|
||||
|
||||
To list which features are enabled/disabled for a given user send a request to the following API:
|
||||
|
||||
```
|
||||
|
@ -52,4 +48,4 @@ user like so:
|
|||
"msc3967": false
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
|
|
@ -449,9 +449,9 @@ For example, a fix in PR #1234 would have its changelog entry in
|
|||
> The security levels of Florbs are now validated when received
|
||||
> via the `/federation/florb` endpoint. Contributed by Jane Matrix.
|
||||
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc,
|
||||
then the content for each `changelog.d` file should be the same. Towncrier will
|
||||
merge the matching files together into a single changelog entry when we come to
|
||||
If there are multiple pull requests involved in a single bugfix/feature/etc, then the
|
||||
content for each `changelog.d` file and file extension should be the same. Towncrier
|
||||
will merge the matching files together into a single changelog entry when we come to
|
||||
release.
|
||||
|
||||
### How do I know what to call the changelog file before I create the PR?
|
||||
|
|
|
@ -117,6 +117,19 @@ each upgrade are complete before moving on to the next upgrade, to avoid
|
|||
stacking them up. You can monitor the currently running background updates with
|
||||
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||
|
||||
# Upgrading to v1.111.0
|
||||
|
||||
## New worker endpoints for authenticated client media
|
||||
|
||||
[Media repository workers](./workers.md#synapseappmedia_repository) handling
|
||||
Media APIs can now handle the following endpoint pattern:
|
||||
|
||||
```
|
||||
^/_matrix/client/v1/media/.*$
|
||||
```
|
||||
|
||||
Please update your reverse proxy configuration.
|
||||
|
||||
# Upgrading to v1.106.0
|
||||
|
||||
## Minimum supported Rust version
|
||||
|
|
|
@ -1976,9 +1976,10 @@ This will not prevent the listed domains from accessing media themselves.
|
|||
It simply prevents users on this server from downloading media originating
|
||||
from the listed servers.
|
||||
|
||||
This will have no effect on media originating from the local server.
|
||||
This only affects media downloaded from other Matrix servers, to
|
||||
block domains from URL previews see [`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
This will have no effect on media originating from the local server. This only
|
||||
affects media downloaded from other Matrix servers, to control URL previews see
|
||||
[`url_preview_ip_range_blacklist`](#url_preview_ip_range_blacklist) or
|
||||
[`url_preview_url_blacklist`](#url_preview_url_blacklist).
|
||||
|
||||
Defaults to an empty list (nothing blocked).
|
||||
|
||||
|
@ -2130,12 +2131,14 @@ url_preview_ip_range_whitelist:
|
|||
---
|
||||
### `url_preview_url_blacklist`
|
||||
|
||||
Optional list of URL matches that the URL preview spider is
|
||||
denied from accessing. You should use `url_preview_ip_range_blacklist`
|
||||
in preference to this, otherwise someone could define a public DNS
|
||||
entry that points to a private IP address and circumvent the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that
|
||||
you know that will never want synapse to try to spider.
|
||||
Optional list of URL matches that the URL preview spider is denied from
|
||||
accessing. This is a usability feature, not a security one. You should use
|
||||
`url_preview_ip_range_blacklist` in preference to this, otherwise someone could
|
||||
define a public DNS entry that points to a private IP address and circumvent
|
||||
the blacklist. Applications that perform redirects or serve different content
|
||||
when detecting that Synapse is accessing them can also bypass the blacklist.
|
||||
This is more useful if you know there is an entire shape of URL that you know
|
||||
that you do not want Synapse to preview.
|
||||
|
||||
Each list entry is a dictionary of url component attributes as returned
|
||||
by urlparse.urlsplit as applied to the absolute form of the URL. See
|
||||
|
|
|
@ -739,6 +739,7 @@ An example for a federation sender instance:
|
|||
Handles the media repository. It can handle all endpoints starting with:
|
||||
|
||||
/_matrix/media/
|
||||
/_matrix/client/v1/media/
|
||||
|
||||
... and the following regular expressions matching media-specific administration APIs:
|
||||
|
||||
|
|
3
mypy.ini
3
mypy.ini
|
@ -96,3 +96,6 @@ ignore_missing_imports = True
|
|||
# https://github.com/twisted/treq/pull/366
|
||||
[mypy-treq.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
[mypy-multipart.*]
|
||||
ignore_missing_imports = True
|
||||
|
|
133
poetry.lock
generated
133
poetry.lock
generated
|
@ -1,4 +1,4 @@
|
|||
# This file is automatically @generated by Poetry 1.8.2 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
|
@ -182,13 +182,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2023.7.22"
|
||||
version = "2024.7.4"
|
||||
description = "Python package for providing Mozilla's CA Bundle."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
|
||||
{file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
|
||||
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
|
||||
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -403,43 +403,43 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "cryptography"
|
||||
version = "42.0.7"
|
||||
version = "42.0.8"
|
||||
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:a987f840718078212fdf4504d0fd4c6effe34a7e4740378e59d47696e8dfb477"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:bd13b5e9b543532453de08bcdc3cc7cebec6f9883e886fd20a92f26940fd3e7a"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79165431551042cc9d1d90e6145d5d0d3ab0f2d66326c201d9b0e7f5bf43604"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a47787a5e3649008a1102d3df55424e86606c9bae6fb77ac59afe06d234605f8"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:02c0eee2d7133bdbbc5e24441258d5d2244beb31da5ed19fbb80315f4bbbff55"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:5e44507bf8d14b36b8389b226665d597bc0f18ea035d75b4e53c7b1ea84583cc"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:7f8b25fa616d8b846aef64b15c606bb0828dbc35faf90566eb139aa9cff67af2"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:93a3209f6bb2b33e725ed08ee0991b92976dfdcf4e8b38646540674fc7508e13"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e6b8f1881dac458c34778d0a424ae5769de30544fc678eac51c1c8bb2183e9da"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3de9a45d3b2b7d8088c3fbf1ed4395dfeff79d07842217b38df14ef09ce1d8d7"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-win32.whl", hash = "sha256:789caea816c6704f63f6241a519bfa347f72fbd67ba28d04636b7c6b7da94b0b"},
|
||||
{file = "cryptography-42.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:8cb8ce7c3347fcf9446f201dc30e2d5a3c898d009126010cbd1f443f28b52678"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:a3a5ac8b56fe37f3125e5b72b61dcde43283e5370827f5233893d461b7360cd4"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:779245e13b9a6638df14641d029add5dc17edbef6ec915688f3acb9e720a5858"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d563795db98b4cd57742a78a288cdbdc9daedac29f2239793071fe114f13785"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:31adb7d06fe4383226c3e963471f6837742889b3c4caa55aac20ad951bc8ffda"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:efd0bf5205240182e0f13bcaea41be4fdf5c22c5129fc7ced4a0282ac86998c9"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a9bc127cdc4ecf87a5ea22a2556cab6c7eda2923f84e4f3cc588e8470ce4e42e"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:3577d029bc3f4827dd5bf8bf7710cac13527b470bbf1820a3f394adb38ed7d5f"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2e47577f9b18723fa294b0ea9a17d5e53a227867a0a4904a1a076d1646d45ca1"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1a58839984d9cb34c855197043eaae2c187d930ca6d644612843b4fe8513c886"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-win32.whl", hash = "sha256:e6b79d0adb01aae87e8a44c2b64bc3f3fe59515280e00fb6d57a7267a2583cda"},
|
||||
{file = "cryptography-42.0.7-cp39-abi3-win_amd64.whl", hash = "sha256:16268d46086bb8ad5bf0a2b5544d8a9ed87a0e33f5e77dd3c3301e63d941a83b"},
|
||||
{file = "cryptography-42.0.7-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2954fccea107026512b15afb4aa664a5640cd0af630e2ee3962f2602693f0c82"},
|
||||
{file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:362e7197754c231797ec45ee081f3088a27a47c6c01eff2ac83f60f85a50fe60"},
|
||||
{file = "cryptography-42.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4f698edacf9c9e0371112792558d2f705b5645076cc0aaae02f816a0171770fd"},
|
||||
{file = "cryptography-42.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5482e789294854c28237bba77c4c83be698be740e31a3ae5e879ee5444166582"},
|
||||
{file = "cryptography-42.0.7-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e9b2a6309f14c0497f348d08a065d52f3020656f675819fc405fb63bbcd26562"},
|
||||
{file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d8e3098721b84392ee45af2dd554c947c32cc52f862b6a3ae982dbb90f577f14"},
|
||||
{file = "cryptography-42.0.7-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c65f96dad14f8528a447414125e1fc8feb2ad5a272b8f68477abbcc1ea7d94b9"},
|
||||
{file = "cryptography-42.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36017400817987670037fbb0324d71489b6ead6231c9604f8fc1f7d008087c68"},
|
||||
{file = "cryptography-42.0.7.tar.gz", hash = "sha256:ecbfbc00bf55888edda9868a4cf927205de8499e7fabe6c050322298382953f2"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-win32.whl", hash = "sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2"},
|
||||
{file = "cryptography-42.0.8-cp37-abi3-win_amd64.whl", hash = "sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-win32.whl", hash = "sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c"},
|
||||
{file = "cryptography-42.0.8-cp39-abi3-win_amd64.whl", hash = "sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a"},
|
||||
{file = "cryptography-42.0.8-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe"},
|
||||
{file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c"},
|
||||
{file = "cryptography-42.0.8-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71"},
|
||||
{file = "cryptography-42.0.8-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d"},
|
||||
{file = "cryptography-42.0.8-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c"},
|
||||
{file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842"},
|
||||
{file = "cryptography-42.0.8-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648"},
|
||||
{file = "cryptography-42.0.8-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad"},
|
||||
{file = "cryptography-42.0.8.tar.gz", hash = "sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -2039,6 +2039,20 @@ files = [
|
|||
[package.dependencies]
|
||||
six = ">=1.5"
|
||||
|
||||
[[package]]
|
||||
name = "python-multipart"
|
||||
version = "0.0.9"
|
||||
description = "A streaming multipart parser for Python"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"},
|
||||
{file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"]
|
||||
|
||||
[[package]]
|
||||
name = "pytz"
|
||||
version = "2022.7.1"
|
||||
|
@ -2331,28 +2345,29 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "ruff"
|
||||
version = "0.3.7"
|
||||
version = "0.5.0"
|
||||
description = "An extremely fast Python linter and code formatter, written in Rust."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0e8377cccb2f07abd25e84fc5b2cbe48eeb0fea9f1719cad7caedb061d70e5ce"},
|
||||
{file = "ruff-0.3.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:15a4d1cc1e64e556fa0d67bfd388fed416b7f3b26d5d1c3e7d192c897e39ba4b"},
|
||||
{file = "ruff-0.3.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d28bdf3d7dc71dd46929fafeec98ba89b7c3550c3f0978e36389b5631b793663"},
|
||||
{file = "ruff-0.3.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:379b67d4f49774ba679593b232dcd90d9e10f04d96e3c8ce4a28037ae473f7bb"},
|
||||
{file = "ruff-0.3.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c060aea8ad5ef21cdfbbe05475ab5104ce7827b639a78dd55383a6e9895b7c51"},
|
||||
{file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ebf8f615dde968272d70502c083ebf963b6781aacd3079081e03b32adfe4d58a"},
|
||||
{file = "ruff-0.3.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d48098bd8f5c38897b03604f5428901b65e3c97d40b3952e38637b5404b739a2"},
|
||||
{file = "ruff-0.3.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8a4fda219bf9024692b1bc68c9cff4b80507879ada8769dc7e985755d662ea"},
|
||||
{file = "ruff-0.3.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c44e0149f1d8b48c4d5c33d88c677a4aa22fd09b1683d6a7ff55b816b5d074f"},
|
||||
{file = "ruff-0.3.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3050ec0af72b709a62ecc2aca941b9cd479a7bf2b36cc4562f0033d688e44fa1"},
|
||||
{file = "ruff-0.3.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a29cc38e4c1ab00da18a3f6777f8b50099d73326981bb7d182e54a9a21bb4ff7"},
|
||||
{file = "ruff-0.3.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5b15cc59c19edca917f51b1956637db47e200b0fc5e6e1878233d3a938384b0b"},
|
||||
{file = "ruff-0.3.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e491045781b1e38b72c91247cf4634f040f8d0cb3e6d3d64d38dcf43616650b4"},
|
||||
{file = "ruff-0.3.7-py3-none-win32.whl", hash = "sha256:bc931de87593d64fad3a22e201e55ad76271f1d5bfc44e1a1887edd0903c7d9f"},
|
||||
{file = "ruff-0.3.7-py3-none-win_amd64.whl", hash = "sha256:5ef0e501e1e39f35e03c2acb1d1238c595b8bb36cf7a170e7c1df1b73da00e74"},
|
||||
{file = "ruff-0.3.7-py3-none-win_arm64.whl", hash = "sha256:789e144f6dc7019d1f92a812891c645274ed08af6037d11fc65fcbc183b7d59f"},
|
||||
{file = "ruff-0.3.7.tar.gz", hash = "sha256:d5c1aebee5162c2226784800ae031f660c350e7a3402c4d1f8ea4e97e232e3ba"},
|
||||
{file = "ruff-0.5.0-py3-none-linux_armv6l.whl", hash = "sha256:ee770ea8ab38918f34e7560a597cc0a8c9a193aaa01bfbd879ef43cb06bd9c4c"},
|
||||
{file = "ruff-0.5.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:38f3b8327b3cb43474559d435f5fa65dacf723351c159ed0dc567f7ab735d1b6"},
|
||||
{file = "ruff-0.5.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7594f8df5404a5c5c8f64b8311169879f6cf42142da644c7e0ba3c3f14130370"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:adc7012d6ec85032bc4e9065110df205752d64010bed5f958d25dbee9ce35de3"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d505fb93b0fabef974b168d9b27c3960714d2ecda24b6ffa6a87ac432905ea38"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9dc5cfd3558f14513ed0d5b70ce531e28ea81a8a3b1b07f0f48421a3d9e7d80a"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:db3ca35265de239a1176d56a464b51557fce41095c37d6c406e658cf80bbb362"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b1a321c4f68809fddd9b282fab6a8d8db796b270fff44722589a8b946925a2a8"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2c4dfcd8d34b143916994b3876b63d53f56724c03f8c1a33a253b7b1e6bf2a7d"},
|
||||
{file = "ruff-0.5.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81e5facfc9f4a674c6a78c64d38becfbd5e4f739c31fcd9ce44c849f1fad9e4c"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:e589e27971c2a3efff3fadafb16e5aef7ff93250f0134ec4b52052b673cf988d"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d2ffbc3715a52b037bcb0f6ff524a9367f642cdc5817944f6af5479bbb2eb50e"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cd096e23c6a4f9c819525a437fa0a99d1c67a1b6bb30948d46f33afbc53596cf"},
|
||||
{file = "ruff-0.5.0-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:46e193b36f2255729ad34a49c9a997d506e58f08555366b2108783b3064a0e1e"},
|
||||
{file = "ruff-0.5.0-py3-none-win32.whl", hash = "sha256:49141d267100f5ceff541b4e06552e98527870eafa1acc9dec9139c9ec5af64c"},
|
||||
{file = "ruff-0.5.0-py3-none-win_amd64.whl", hash = "sha256:e9118f60091047444c1b90952736ee7b1792910cab56e9b9a9ac20af94cd0440"},
|
||||
{file = "ruff-0.5.0-py3-none-win_arm64.whl", hash = "sha256:ed5c4df5c1fb4518abcb57725b576659542bdbe93366f4f329e8f398c4b71178"},
|
||||
{file = "ruff-0.5.0.tar.gz", hash = "sha256:eb641b5873492cf9bd45bc9c5ae5320648218e04386a5f0c264ad6ccce8226a1"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2906,13 +2921,13 @@ urllib3 = ">=2"
|
|||
|
||||
[[package]]
|
||||
name = "types-setuptools"
|
||||
version = "69.5.0.20240423"
|
||||
version = "70.1.0.20240627"
|
||||
description = "Typing stubs for setuptools"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "types-setuptools-69.5.0.20240423.tar.gz", hash = "sha256:a7ba908f1746c4337d13f027fa0f4a5bcad6d1d92048219ba792b3295c58586d"},
|
||||
{file = "types_setuptools-69.5.0.20240423-py3-none-any.whl", hash = "sha256:a4381e041510755a6c9210e26ad55b1629bc10237aeb9cb8b6bd24996b73db48"},
|
||||
{file = "types-setuptools-70.1.0.20240627.tar.gz", hash = "sha256:385907a47b5cf302b928ce07953cd91147d5de6f3da604c31905fdf0ec309e83"},
|
||||
{file = "types_setuptools-70.1.0.20240627-py3-none-any.whl", hash = "sha256:c7bdf05cd0a8b66868b4774c7b3c079d01ae025d8c9562bfc8bf2ff44d263c9c"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -3187,4 +3202,4 @@ user-search = ["pyicu"]
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.8.0"
|
||||
content-hash = "107c8fb5c67360340854fbdba3c085fc5f9c7be24bcb592596a914eea621faea"
|
||||
content-hash = "3372a97db99050a34f8eddad2ddf8efe8b7b704b6123df4a3e36ddc171e8f34d"
|
||||
|
|
|
@ -43,6 +43,7 @@ target-version = ['py38', 'py39', 'py310', 'py311']
|
|||
[tool.ruff]
|
||||
line-length = 88
|
||||
|
||||
[tool.ruff.lint]
|
||||
# See https://beta.ruff.rs/docs/rules/#error-e
|
||||
# for error codes. The ones we ignore are:
|
||||
# E501: Line too long (black enforces this for us)
|
||||
|
@ -96,7 +97,7 @@ module-name = "synapse.synapse_rust"
|
|||
|
||||
[tool.poetry]
|
||||
name = "matrix-synapse"
|
||||
version = "1.110.0rc2"
|
||||
version = "1.110.0"
|
||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||
license = "AGPL-3.0-or-later"
|
||||
|
@ -224,6 +225,8 @@ pydantic = ">=1.7.4, <3"
|
|||
# needed.
|
||||
setuptools_rust = ">=1.3"
|
||||
|
||||
# This is used for parsing multipart responses
|
||||
python-multipart = ">=0.0.9"
|
||||
|
||||
# Optional Dependencies
|
||||
# ---------------------
|
||||
|
@ -319,7 +322,7 @@ all = [
|
|||
# This helps prevents merge conflicts when running a batch of dependabot updates.
|
||||
isort = ">=5.10.1"
|
||||
black = ">=22.7.0"
|
||||
ruff = "0.3.7"
|
||||
ruff = "0.5.0"
|
||||
# Type checking only works with the pydantic.v1 compat module from pydantic v2
|
||||
pydantic = "^2"
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ python3 -m black "${files[@]}"
|
|||
|
||||
# Catch any common programming mistakes in Python code.
|
||||
# --quiet suppresses the update check.
|
||||
ruff --quiet --fix "${files[@]}"
|
||||
ruff check --quiet --fix "${files[@]}"
|
||||
|
||||
# Catch any common programming mistakes in Rust code.
|
||||
#
|
||||
|
|
|
@ -70,6 +70,7 @@ def cli() -> None:
|
|||
pip install -e .[dev]
|
||||
|
||||
- A checkout of the sytest repository at ../sytest
|
||||
- A checkout of the complement repository at ../complement
|
||||
|
||||
Then to use:
|
||||
|
||||
|
@ -112,10 +113,12 @@ def _prepare() -> None:
|
|||
# Make sure we're in a git repo.
|
||||
synapse_repo = get_repo_and_check_clean_checkout()
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout("../complement", "complement")
|
||||
|
||||
click.secho("Updating Synapse and Sytest git repos...")
|
||||
synapse_repo.remote().fetch()
|
||||
sytest_repo.remote().fetch()
|
||||
complement_repo.remote().fetch()
|
||||
|
||||
# Get the current version and AST from root Synapse module.
|
||||
current_version = get_package_version()
|
||||
|
@ -208,7 +211,15 @@ def _prepare() -> None:
|
|||
"Which branch should the release be based on?", default=default
|
||||
)
|
||||
|
||||
for repo_name, repo in {"synapse": synapse_repo, "sytest": sytest_repo}.items():
|
||||
for repo_name, repo in {
|
||||
"synapse": synapse_repo,
|
||||
"sytest": sytest_repo,
|
||||
"complement": complement_repo,
|
||||
}.items():
|
||||
# Special case for Complement: `develop` maps to `main`
|
||||
if repo_name == "complement" and branch_name == "develop":
|
||||
branch_name = "main"
|
||||
|
||||
base_branch = find_ref(repo, branch_name)
|
||||
if not base_branch:
|
||||
print(f"Could not find base branch {branch_name} for {repo_name}!")
|
||||
|
@ -231,6 +242,12 @@ def _prepare() -> None:
|
|||
if click.confirm("Push new SyTest branch?", default=True):
|
||||
sytest_repo.git.push("-u", sytest_repo.remote().name, release_branch_name)
|
||||
|
||||
# Same for Complement
|
||||
if click.confirm("Push new Complement branch?", default=True):
|
||||
complement_repo.git.push(
|
||||
"-u", complement_repo.remote().name, release_branch_name
|
||||
)
|
||||
|
||||
# Switch to the release branch and ensure it's up to date.
|
||||
synapse_repo.git.checkout(release_branch_name)
|
||||
update_branch(synapse_repo)
|
||||
|
@ -630,6 +647,9 @@ def _merge_back() -> None:
|
|||
else:
|
||||
# Full release
|
||||
sytest_repo = get_repo_and_check_clean_checkout("../sytest", "sytest")
|
||||
complement_repo = get_repo_and_check_clean_checkout(
|
||||
"../complement", "complement"
|
||||
)
|
||||
|
||||
if click.confirm(f"Merge {branch_name} → master?", default=True):
|
||||
_merge_into(synapse_repo, branch_name, "master")
|
||||
|
@ -643,6 +663,9 @@ def _merge_back() -> None:
|
|||
if click.confirm("On SyTest, merge master → develop?", default=True):
|
||||
_merge_into(sytest_repo, "master", "develop")
|
||||
|
||||
if click.confirm(f"On Complement, merge {branch_name} → main?", default=True):
|
||||
_merge_into(complement_repo, branch_name, "main")
|
||||
|
||||
|
||||
@cli.command()
|
||||
def announce() -> None:
|
||||
|
|
|
@ -44,7 +44,7 @@ logger = logging.getLogger("generate_workers_map")
|
|||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None:
|
||||
super().__init__(config.server.server_name, config=config)
|
||||
|
|
|
@ -41,7 +41,7 @@ logger = logging.getLogger("update_database")
|
|||
|
||||
|
||||
class MockHomeserver(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore [assignment]
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def __init__(self, config: HomeServerConfig):
|
||||
super().__init__(
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
from typing import Optional, Tuple
|
||||
from typing import TYPE_CHECKING, Optional, Tuple
|
||||
|
||||
from typing_extensions import Protocol
|
||||
|
||||
|
@ -28,6 +28,9 @@ from synapse.appservice import ApplicationService
|
|||
from synapse.http.site import SynapseRequest
|
||||
from synapse.types import Requester
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
|
||||
# guests always get this device id.
|
||||
GUEST_DEVICE_ID = "guest_device"
|
||||
|
||||
|
@ -87,6 +90,19 @@ class Auth(Protocol):
|
|||
AuthError if access is denied for the user in the access token
|
||||
"""
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
"""Like `get_user_by_req`, except also checks if the user has access to
|
||||
the experimental feature. If they don't returns a 404 unrecognized
|
||||
request.
|
||||
"""
|
||||
|
||||
async def validate_appservice_can_control_user_id(
|
||||
self, app_service: ApplicationService, user_id: str
|
||||
) -> None:
|
||||
|
|
|
@ -28,6 +28,7 @@ from synapse.api.errors import (
|
|||
Codes,
|
||||
InvalidClientTokenError,
|
||||
MissingClientTokenError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import active_span, force_tracing, start_active_span
|
||||
|
@ -38,8 +39,10 @@ from . import GUEST_DEVICE_ID
|
|||
from .base import BaseAuth
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
@ -106,6 +109,32 @@ class InternalAuth(BaseAuth):
|
|||
parent_span.set_tag("appservice_id", requester.app_service.id)
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
@cancellable
|
||||
async def _wrapped_get_user_by_req(
|
||||
self,
|
||||
|
|
|
@ -40,6 +40,7 @@ from synapse.api.errors import (
|
|||
OAuthInsufficientScopeError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
UnrecognizedRequestError,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
|
@ -48,6 +49,7 @@ from synapse.util import json_decoder
|
|||
from synapse.util.caches.cached_call import RetryOnExceptionCachedCall
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.server import HomeServer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -245,6 +247,32 @@ class MSC3861DelegatedAuth(BaseAuth):
|
|||
|
||||
return requester
|
||||
|
||||
async def get_user_by_req_experimental_feature(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
feature: "ExperimentalFeature",
|
||||
allow_guest: bool = False,
|
||||
allow_expired: bool = False,
|
||||
allow_locked: bool = False,
|
||||
) -> Requester:
|
||||
try:
|
||||
requester = await self.get_user_by_req(
|
||||
request,
|
||||
allow_guest=allow_guest,
|
||||
allow_expired=allow_expired,
|
||||
allow_locked=allow_locked,
|
||||
)
|
||||
if await self.store.is_feature_enabled(requester.user.to_string(), feature):
|
||||
return requester
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
except (AuthError, InvalidClientTokenError):
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
# If its globally enabled then return the auth error
|
||||
raise
|
||||
|
||||
raise UnrecognizedRequestError(code=404)
|
||||
|
||||
async def get_user_by_access_token(
|
||||
self,
|
||||
token: str,
|
||||
|
|
|
@ -130,7 +130,8 @@ class Ratelimiter:
|
|||
Overrides the value set during instantiation if set.
|
||||
burst_count: How many actions that can be performed before being limited.
|
||||
Overrides the value set during instantiation if set.
|
||||
update: Whether to count this check as performing the action
|
||||
update: Whether to count this check as performing the action. If the action
|
||||
cannot be performed, the user's action count is not incremented at all.
|
||||
n_actions: The number of times the user wants to do this action. If the user
|
||||
cannot do all of the actions, the user's action count is not incremented
|
||||
at all.
|
||||
|
|
|
@ -110,7 +110,7 @@ class AdminCmdStore(
|
|||
|
||||
|
||||
class AdminCmdServer(HomeServer):
|
||||
DATASTORE_CLASS = AdminCmdStore # type: ignore
|
||||
DATASTORE_CLASS = AdminCmdStore
|
||||
|
||||
|
||||
async def export_data_command(hs: HomeServer, args: argparse.Namespace) -> None:
|
||||
|
|
|
@ -163,7 +163,7 @@ class GenericWorkerStore(
|
|||
|
||||
|
||||
class GenericWorkerServer(HomeServer):
|
||||
DATASTORE_CLASS = GenericWorkerStore # type: ignore
|
||||
DATASTORE_CLASS = GenericWorkerStore
|
||||
|
||||
def _listen_http(self, listener_config: ListenerConfig) -> None:
|
||||
assert listener_config.http_options is not None
|
||||
|
|
|
@ -81,7 +81,7 @@ def gz_wrap(r: Resource) -> Resource:
|
|||
|
||||
|
||||
class SynapseHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
def _listener_http(
|
||||
self,
|
||||
|
|
|
@ -437,10 +437,6 @@ class ExperimentalConfig(Config):
|
|||
"msc3823_account_suspension", False
|
||||
)
|
||||
|
||||
self.msc3916_authenticated_media_enabled = experimental.get(
|
||||
"msc3916_authenticated_media_enabled", False
|
||||
)
|
||||
|
||||
# MSC4151: Report room API (Client-Server API)
|
||||
self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False)
|
||||
|
||||
|
|
|
@ -836,3 +836,21 @@ def maybe_upsert_event_field(
|
|||
del container[key]
|
||||
|
||||
return upsert_okay
|
||||
|
||||
|
||||
def strip_event(event: EventBase) -> JsonDict:
|
||||
"""
|
||||
Used for "stripped state" events which provide a simplified view of the state of a
|
||||
room intended to help a potential joiner identify the room (relevant when the user
|
||||
is invited or knocked).
|
||||
|
||||
Stripped state events can only have the `sender`, `type`, `state_key` and `content`
|
||||
properties present.
|
||||
"""
|
||||
|
||||
return {
|
||||
"type": event.type,
|
||||
"state_key": event.state_key,
|
||||
"content": event.content,
|
||||
"sender": event.sender,
|
||||
}
|
||||
|
|
|
@ -1871,6 +1871,52 @@ class FederationClient(FederationBase):
|
|||
|
||||
return filtered_statuses, filtered_failures
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Union[
|
||||
Tuple[int, Dict[bytes, List[bytes]], bytes],
|
||||
Tuple[int, Dict[bytes, List[bytes]]],
|
||||
]:
|
||||
try:
|
||||
return await self.transport_layer.federation_download_media(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
except HttpResponseException as e:
|
||||
# If an error is received that is due to an unrecognised endpoint,
|
||||
# fallback to the _matrix/media/v3/download endpoint. Otherwise, consider it a legitimate error
|
||||
# and raise.
|
||||
if not is_unknown_endpoint(e):
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
"Couldn't download media %s/%s over _matrix/federation/v1/media/download, falling back to _matrix/media/v3/download path",
|
||||
destination,
|
||||
media_id,
|
||||
)
|
||||
|
||||
return await self.transport_layer.download_media_v3(
|
||||
destination,
|
||||
media_id,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def download_media(
|
||||
self,
|
||||
destination: str,
|
||||
|
|
|
@ -322,7 +322,6 @@ class PerDestinationQueue:
|
|||
)
|
||||
|
||||
async def _transaction_transmission_loop(self) -> None:
|
||||
pending_pdus: List[EventBase] = []
|
||||
try:
|
||||
self.transmission_loop_running = True
|
||||
|
||||
|
@ -338,7 +337,6 @@ class PerDestinationQueue:
|
|||
# not caught up yet
|
||||
return
|
||||
|
||||
pending_pdus = []
|
||||
while True:
|
||||
self._new_data_to_send = False
|
||||
|
||||
|
|
|
@ -824,7 +824,6 @@ class TransportLayerClient:
|
|||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/r0/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
|
@ -852,7 +851,6 @@ class TransportLayerClient:
|
|||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]]]:
|
||||
path = f"/_matrix/media/v3/download/{destination}/{media_id}"
|
||||
|
||||
return await self.client.get_file(
|
||||
destination,
|
||||
path,
|
||||
|
@ -873,6 +871,29 @@ class TransportLayerClient:
|
|||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
async def federation_download_media(
|
||||
self,
|
||||
destination: str,
|
||||
media_id: str,
|
||||
output_stream: BinaryIO,
|
||||
max_size: int,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
path = f"/_matrix/federation/v1/media/download/{media_id}"
|
||||
return await self.client.federation_get_file(
|
||||
destination,
|
||||
path,
|
||||
output_stream=output_stream,
|
||||
max_size=max_size,
|
||||
args={
|
||||
"timeout_ms": str(max_timeout_ms),
|
||||
},
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
|
||||
|
||||
def _create_path(federation_prefix: str, path: str, *args: str) -> str:
|
||||
"""
|
||||
|
|
|
@ -32,8 +32,9 @@ from synapse.federation.transport.server._base import (
|
|||
from synapse.federation.transport.server.federation import (
|
||||
FEDERATION_SERVLET_CLASSES,
|
||||
FederationAccountStatusServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
FederationUnstableClientKeysClaimServlet,
|
||||
FederationUnstableMediaDownloadServlet,
|
||||
)
|
||||
from synapse.http.server import HttpServer, JsonResource
|
||||
from synapse.http.servlet import (
|
||||
|
@ -316,11 +317,11 @@ def register_servlets(
|
|||
):
|
||||
continue
|
||||
|
||||
if servletclass == FederationUnstableMediaDownloadServlet:
|
||||
if (
|
||||
not hs.config.server.enable_media_repo
|
||||
or not hs.config.experimental.msc3916_authenticated_media_enabled
|
||||
):
|
||||
if (
|
||||
servletclass == FederationMediaDownloadServlet
|
||||
or servletclass == FederationMediaThumbnailServlet
|
||||
):
|
||||
if not hs.config.server.enable_media_repo:
|
||||
continue
|
||||
|
||||
servletclass(
|
||||
|
|
|
@ -362,7 +362,9 @@ class BaseFederationServlet:
|
|||
return None
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationUnstableMediaDownloadServlet"
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
|
@ -374,7 +376,9 @@ class BaseFederationServlet:
|
|||
else:
|
||||
if (
|
||||
func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationUnstableMediaDownloadServlet"
|
||||
== "FederationMediaDownloadServlet"
|
||||
or func.__self__.__class__.__name__ # type: ignore
|
||||
== "FederationMediaThumbnailServlet"
|
||||
):
|
||||
response = await func(
|
||||
origin, content, request, *args, **kwargs
|
||||
|
|
|
@ -46,11 +46,13 @@ from synapse.http.servlet import (
|
|||
parse_boolean_from_args,
|
||||
parse_integer,
|
||||
parse_integer_from_args,
|
||||
parse_string,
|
||||
parse_string_from_args,
|
||||
parse_strings_from_args,
|
||||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.media._base import DEFAULT_MAX_TIMEOUT_MS, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS
|
||||
from synapse.media.thumbnailer import ThumbnailProvider
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import SYNAPSE_VERSION
|
||||
from synapse.util.ratelimitutils import FederationRateLimiter
|
||||
|
@ -790,7 +792,7 @@ class FederationAccountStatusServlet(BaseFederationServerServlet):
|
|||
return 200, {"account_statuses": statuses, "failures": failures}
|
||||
|
||||
|
||||
class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
|
||||
class FederationMediaDownloadServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/download` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
|
@ -798,7 +800,6 @@ class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
|
|||
"""
|
||||
|
||||
PATH = "/media/download/(?P<media_id>[^/]*)"
|
||||
PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3916"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
|
@ -827,6 +828,59 @@ class FederationUnstableMediaDownloadServlet(BaseFederationServerServlet):
|
|||
)
|
||||
|
||||
|
||||
class FederationMediaThumbnailServlet(BaseFederationServerServlet):
|
||||
"""
|
||||
Implementation of new federation media `/thumbnail` endpoint outlined in MSC3916. Returns
|
||||
a multipart/mixed response consisting of a JSON object and the requested media
|
||||
item. This endpoint only returns local media.
|
||||
"""
|
||||
|
||||
PATH = "/media/thumbnail/(?P<media_id>[^/]*)"
|
||||
RATELIMIT = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
hs: "HomeServer",
|
||||
ratelimiter: FederationRateLimiter,
|
||||
authenticator: Authenticator,
|
||||
server_name: str,
|
||||
):
|
||||
super().__init__(hs, authenticator, ratelimiter, server_name)
|
||||
self.media_repo = self.hs.get_media_repository()
|
||||
self.dynamic_thumbnails = hs.config.media.dynamic_thumbnails
|
||||
self.thumbnail_provider = ThumbnailProvider(
|
||||
hs, self.media_repo, self.media_repo.media_storage
|
||||
)
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
origin: Optional[str],
|
||||
content: Literal[None],
|
||||
request: SynapseRequest,
|
||||
media_id: str,
|
||||
) -> None:
|
||||
|
||||
width = parse_integer(request, "width", required=True)
|
||||
height = parse_integer(request, "height", required=True)
|
||||
method = parse_string(request, "method", "scale")
|
||||
# TODO Parse the Accept header to get an prioritised list of thumbnail types.
|
||||
m_type = "image/png"
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
|
||||
if self.dynamic_thumbnails:
|
||||
await self.thumbnail_provider.select_or_generate_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
else:
|
||||
await self.thumbnail_provider.respond_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms, True
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
|
||||
|
||||
FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
||||
FederationSendServlet,
|
||||
FederationEventServlet,
|
||||
|
@ -858,5 +912,6 @@ FEDERATION_SERVLET_CLASSES: Tuple[Type[BaseFederationServlet], ...] = (
|
|||
FederationV1SendKnockServlet,
|
||||
FederationMakeKnockServlet,
|
||||
FederationAccountStatusServlet,
|
||||
FederationUnstableMediaDownloadServlet,
|
||||
FederationMediaDownloadServlet,
|
||||
FederationMediaThumbnailServlet,
|
||||
)
|
||||
|
|
|
@ -283,6 +283,10 @@ class DeactivateAccountHandler:
|
|||
ratelimit=False,
|
||||
require_consent=False,
|
||||
)
|
||||
|
||||
# Mark the room forgotten too, because they won't be able to do this
|
||||
# for us. This may lead to the room being purged eventually.
|
||||
await self._room_member_handler.forget(user, room_id)
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to part user %r from room %r: ignoring and continuing",
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1352,7 +1352,7 @@ class SyncHandler:
|
|||
await_full_state = True
|
||||
lazy_load_members = False
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
|
@ -1480,11 +1480,13 @@ class SyncHandler:
|
|||
else:
|
||||
# We can get here if the user has ignored the senders of all
|
||||
# the recent events.
|
||||
state_at_timeline_start = await self._state_storage_controller.get_state_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
state_at_timeline_start = (
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
)
|
||||
|
||||
if batch.limited:
|
||||
|
@ -1502,14 +1504,14 @@ class SyncHandler:
|
|||
# about them).
|
||||
state_filter = StateFilter.all()
|
||||
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_at(
|
||||
state_at_previous_sync = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=since_token,
|
||||
state_filter=state_filter,
|
||||
await_full_state=await_full_state,
|
||||
)
|
||||
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_at(
|
||||
state_at_timeline_end = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
stream_position=end_token,
|
||||
state_filter=state_filter,
|
||||
|
@ -2508,7 +2510,7 @@ class SyncHandler:
|
|||
continue
|
||||
|
||||
if room_id in sync_result_builder.joined_room_ids or has_join:
|
||||
old_state_ids = await self._state_storage_controller.get_state_at(
|
||||
old_state_ids = await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types([(EventTypes.Member, user_id)]),
|
||||
|
@ -2539,7 +2541,7 @@ class SyncHandler:
|
|||
else:
|
||||
if not old_state_ids:
|
||||
old_state_ids = (
|
||||
await self._state_storage_controller.get_state_at(
|
||||
await self._state_storage_controller.get_state_ids_at(
|
||||
room_id,
|
||||
since_token,
|
||||
state_filter=StateFilter.from_types(
|
||||
|
|
|
@ -35,6 +35,8 @@ from typing import (
|
|||
Union,
|
||||
)
|
||||
|
||||
import attr
|
||||
import multipart
|
||||
import treq
|
||||
from canonicaljson import encode_canonical_json
|
||||
from netaddr import AddrFormatError, IPAddress, IPSet
|
||||
|
@ -1006,6 +1008,130 @@ class _DiscardBodyWithMaxSizeProtocol(protocol.Protocol):
|
|||
self._maybe_fail()
|
||||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True)
|
||||
class MultipartResponse:
|
||||
"""
|
||||
A small class to hold parsed values of a multipart response.
|
||||
"""
|
||||
|
||||
json: bytes = b"{}"
|
||||
length: Optional[int] = None
|
||||
content_type: Optional[bytes] = None
|
||||
disposition: Optional[bytes] = None
|
||||
url: Optional[bytes] = None
|
||||
|
||||
|
||||
class _MultipartParserProtocol(protocol.Protocol):
|
||||
"""
|
||||
Protocol to read and parse a MSC3916 multipart/mixed response
|
||||
"""
|
||||
|
||||
transport: Optional[ITCPTransport] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: ByteWriteable,
|
||||
deferred: defer.Deferred,
|
||||
boundary: str,
|
||||
max_length: Optional[int],
|
||||
) -> None:
|
||||
self.stream = stream
|
||||
self.deferred = deferred
|
||||
self.boundary = boundary
|
||||
self.max_length = max_length
|
||||
self.parser = None
|
||||
self.multipart_response = MultipartResponse()
|
||||
self.has_redirect = False
|
||||
self.in_json = False
|
||||
self.json_done = False
|
||||
self.file_length = 0
|
||||
self.total_length = 0
|
||||
self.in_disposition = False
|
||||
self.in_content_type = False
|
||||
|
||||
def dataReceived(self, incoming_data: bytes) -> None:
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
# we don't have a parser yet, instantiate it
|
||||
if not self.parser:
|
||||
|
||||
def on_header_field(data: bytes, start: int, end: int) -> None:
|
||||
if data[start:end] == b"Location":
|
||||
self.has_redirect = True
|
||||
if data[start:end] == b"Content-Disposition":
|
||||
self.in_disposition = True
|
||||
if data[start:end] == b"Content-Type":
|
||||
self.in_content_type = True
|
||||
|
||||
def on_header_value(data: bytes, start: int, end: int) -> None:
|
||||
# the first header should be content-type for application/json
|
||||
if not self.in_json and not self.json_done:
|
||||
assert data[start:end] == b"application/json"
|
||||
self.in_json = True
|
||||
elif self.has_redirect:
|
||||
self.multipart_response.url = data[start:end]
|
||||
elif self.in_content_type:
|
||||
self.multipart_response.content_type = data[start:end]
|
||||
self.in_content_type = False
|
||||
elif self.in_disposition:
|
||||
self.multipart_response.disposition = data[start:end]
|
||||
self.in_disposition = False
|
||||
|
||||
def on_part_data(data: bytes, start: int, end: int) -> None:
|
||||
# we've seen json header but haven't written the json data
|
||||
if self.in_json and not self.json_done:
|
||||
self.multipart_response.json = data[start:end]
|
||||
self.json_done = True
|
||||
# we have a redirect header rather than a file, and have already captured it
|
||||
elif self.has_redirect:
|
||||
return
|
||||
# otherwise we are in the file part
|
||||
else:
|
||||
logger.info("Writing multipart file data to stream")
|
||||
try:
|
||||
self.stream.write(data[start:end])
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Exception encountered writing file data to stream: {e}"
|
||||
)
|
||||
self.deferred.errback()
|
||||
self.file_length += end - start
|
||||
|
||||
callbacks = {
|
||||
"on_header_field": on_header_field,
|
||||
"on_header_value": on_header_value,
|
||||
"on_part_data": on_part_data,
|
||||
}
|
||||
self.parser = multipart.MultipartParser(self.boundary, callbacks)
|
||||
|
||||
self.total_length += len(incoming_data)
|
||||
if self.max_length is not None and self.total_length >= self.max_length:
|
||||
self.deferred.errback(BodyExceededMaxSize())
|
||||
# Close the connection (forcefully) since all the data will get
|
||||
# discarded anyway.
|
||||
assert self.transport is not None
|
||||
self.transport.abortConnection()
|
||||
|
||||
try:
|
||||
self.parser.write(incoming_data) # type: ignore[attr-defined]
|
||||
except Exception as e:
|
||||
logger.warning(f"Exception writing to multipart parser: {e}")
|
||||
self.deferred.errback()
|
||||
return
|
||||
|
||||
def connectionLost(self, reason: Failure = connectionDone) -> None:
|
||||
# If the maximum size was already exceeded, there's nothing to do.
|
||||
if self.deferred.called:
|
||||
return
|
||||
|
||||
if reason.check(ResponseDone):
|
||||
self.multipart_response.length = self.file_length
|
||||
self.deferred.callback(self.multipart_response)
|
||||
else:
|
||||
self.deferred.errback(reason)
|
||||
|
||||
|
||||
class _ReadBodyWithMaxSizeProtocol(protocol.Protocol):
|
||||
"""A protocol which reads body to a stream, erroring if the body exceeds a maximum size."""
|
||||
|
||||
|
@ -1091,6 +1217,32 @@ def read_body_with_max_size(
|
|||
return d
|
||||
|
||||
|
||||
def read_multipart_response(
|
||||
response: IResponse, stream: ByteWriteable, boundary: str, max_length: Optional[int]
|
||||
) -> "defer.Deferred[MultipartResponse]":
|
||||
"""
|
||||
Reads a MSC3916 multipart/mixed response and parses it, reading the file part (if it contains one) into
|
||||
the stream passed in and returning a deferred resolving to a MultipartResponse
|
||||
|
||||
Args:
|
||||
response: The HTTP response to read from.
|
||||
stream: The file-object to write to.
|
||||
boundary: the multipart/mixed boundary string
|
||||
max_length: maximum allowable length of the response
|
||||
"""
|
||||
d: defer.Deferred[MultipartResponse] = defer.Deferred()
|
||||
|
||||
# If the Content-Length header gives a size larger than the maximum allowed
|
||||
# size, do not bother downloading the body.
|
||||
if max_length is not None and response.length != UNKNOWN_LENGTH:
|
||||
if response.length > max_length:
|
||||
response.deliverBody(_DiscardBodyWithMaxSizeProtocol(d))
|
||||
return d
|
||||
|
||||
response.deliverBody(_MultipartParserProtocol(stream, d, boundary, max_length))
|
||||
return d
|
||||
|
||||
|
||||
def encode_query_args(args: Optional[QueryParams]) -> bytes:
|
||||
"""
|
||||
Encodes a map of query arguments to bytes which can be appended to a URL.
|
||||
|
|
|
@ -75,9 +75,11 @@ from synapse.http.client import (
|
|||
BlocklistingAgentWrapper,
|
||||
BodyExceededMaxSize,
|
||||
ByteWriteable,
|
||||
SimpleHttpClient,
|
||||
_make_scheduler,
|
||||
encode_query_args,
|
||||
read_body_with_max_size,
|
||||
read_multipart_response,
|
||||
)
|
||||
from synapse.http.connectproxyclient import BearerProxyCredentials
|
||||
from synapse.http.federation.matrix_federation_agent import MatrixFederationAgent
|
||||
|
@ -466,6 +468,13 @@ class MatrixFederationHttpClient:
|
|||
|
||||
self._sleeper = AwakenableSleeper(self.reactor)
|
||||
|
||||
self._simple_http_client = SimpleHttpClient(
|
||||
hs,
|
||||
ip_blocklist=hs.config.server.federation_ip_range_blocklist,
|
||||
ip_allowlist=hs.config.server.federation_ip_range_allowlist,
|
||||
use_proxy=True,
|
||||
)
|
||||
|
||||
def wake_destination(self, destination: str) -> None:
|
||||
"""Called when the remote server may have come back online."""
|
||||
|
||||
|
@ -1553,6 +1562,189 @@ class MatrixFederationHttpClient:
|
|||
)
|
||||
return length, headers
|
||||
|
||||
async def federation_get_file(
|
||||
self,
|
||||
destination: str,
|
||||
path: str,
|
||||
output_stream: BinaryIO,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
max_size: int,
|
||||
args: Optional[QueryParams] = None,
|
||||
retry_on_dns_fail: bool = True,
|
||||
ignore_backoff: bool = False,
|
||||
) -> Tuple[int, Dict[bytes, List[bytes]], bytes]:
|
||||
"""GETs a file from a given homeserver over the federation /download endpoint
|
||||
Args:
|
||||
destination: The remote server to send the HTTP request to.
|
||||
path: The HTTP path to GET.
|
||||
output_stream: File to write the response body to.
|
||||
download_ratelimiter: a ratelimiter to limit remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: IP address of the requester
|
||||
max_size: maximum allowable size in bytes of the file
|
||||
args: Optional dictionary used to create the query string.
|
||||
ignore_backoff: true to ignore the historical backoff data
|
||||
and try the request anyway.
|
||||
|
||||
Returns:
|
||||
Resolves to an (int, dict, bytes) tuple of
|
||||
the file length, a dict of the response headers, and the file json
|
||||
|
||||
Raises:
|
||||
HttpResponseException: If we get an HTTP response code >= 300
|
||||
(except 429).
|
||||
NotRetryingDestination: If we are not yet ready to retry this
|
||||
server.
|
||||
FederationDeniedError: If this destination is not on our
|
||||
federation whitelist
|
||||
RequestSendFailed: If there were problems connecting to the
|
||||
remote, due to e.g. DNS failures, connection timeouts etc.
|
||||
SynapseError: If the requested file exceeds ratelimits or the response from the
|
||||
remote server is not a multipart response
|
||||
AssertionError: if the resolved multipart response's length is None
|
||||
"""
|
||||
request = MatrixFederationRequest(
|
||||
method="GET", destination=destination, path=path, query=args
|
||||
)
|
||||
|
||||
# check for a minimum balance of 1MiB in ratelimiter before initiating request
|
||||
send_req, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None, key=ip_address, n_actions=1048576, update=False
|
||||
)
|
||||
|
||||
if not send_req:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
response = await self._send_request(
|
||||
request,
|
||||
retry_on_dns_fail=retry_on_dns_fail,
|
||||
ignore_backoff=ignore_backoff,
|
||||
)
|
||||
|
||||
headers = dict(response.headers.getAllRawHeaders())
|
||||
|
||||
expected_size = response.length
|
||||
# if we don't get an expected length then use the max length
|
||||
if expected_size == UNKNOWN_LENGTH:
|
||||
expected_size = max_size
|
||||
logger.debug(
|
||||
f"File size unknown, assuming file is max allowable size: {max_size}"
|
||||
)
|
||||
|
||||
read_body, _ = await download_ratelimiter.can_do_action(
|
||||
requester=None,
|
||||
key=ip_address,
|
||||
n_actions=expected_size,
|
||||
)
|
||||
if not read_body:
|
||||
msg = "Requested file size exceeds ratelimits"
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.TOO_MANY_REQUESTS, msg, Codes.LIMIT_EXCEEDED)
|
||||
|
||||
# this should be a multipart/mixed response with the boundary string in the header
|
||||
try:
|
||||
raw_content_type = headers.get(b"Content-Type")
|
||||
assert raw_content_type is not None
|
||||
content_type = raw_content_type[0].decode("UTF-8")
|
||||
content_type_parts = content_type.split("boundary=")
|
||||
boundary = content_type_parts[1]
|
||||
except Exception:
|
||||
msg = "Remote response is malformed: expected Content-Type of multipart/mixed with a boundary present."
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg)
|
||||
|
||||
try:
|
||||
# add a byte of headroom to max size as `_MultipartParserProtocol.dataReceived` errs at >=
|
||||
deferred = read_multipart_response(
|
||||
response, output_stream, boundary, expected_size + 1
|
||||
)
|
||||
deferred.addTimeout(self.default_timeout_seconds, self.reactor)
|
||||
except BodyExceededMaxSize:
|
||||
msg = "Requested file is too large > %r bytes" % (expected_size,)
|
||||
logger.warning(
|
||||
"{%s} [%s] %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
msg,
|
||||
)
|
||||
raise SynapseError(HTTPStatus.BAD_GATEWAY, msg, Codes.TOO_LARGE)
|
||||
except defer.TimeoutError as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Timed out reading response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except ResponseFailed as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Failed to read response - %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
raise RequestSendFailed(e, can_retry=True) from e
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
e,
|
||||
)
|
||||
raise
|
||||
|
||||
multipart_response = await make_deferred_yieldable(deferred)
|
||||
if not multipart_response.url:
|
||||
assert multipart_response.length is not None
|
||||
length = multipart_response.length
|
||||
headers[b"Content-Type"] = [multipart_response.content_type]
|
||||
headers[b"Content-Disposition"] = [multipart_response.disposition]
|
||||
|
||||
# the response contained a redirect url to download the file from
|
||||
else:
|
||||
str_url = multipart_response.url.decode("utf-8")
|
||||
logger.info(
|
||||
"{%s} [%s] File download redirected, now downloading from: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
str_url,
|
||||
)
|
||||
length, headers, _, _ = await self._simple_http_client.get_file(
|
||||
str_url, output_stream, expected_size
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"{%s} [%s] Completed: %d %s [%d bytes] %s %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
response.code,
|
||||
response.phrase.decode("ascii", errors="replace"),
|
||||
length,
|
||||
request.method,
|
||||
request.uri.decode("ascii"),
|
||||
)
|
||||
return length, headers, multipart_response.json
|
||||
|
||||
|
||||
def _flatten_response_never_received(e: BaseException) -> str:
|
||||
if hasattr(e, "reasons"):
|
||||
|
|
|
@ -221,6 +221,7 @@ def add_file_headers(
|
|||
# select private. don't bother setting Expires as all our
|
||||
# clients are smart enough to be happy with Cache-Control
|
||||
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
|
||||
|
||||
if file_size is not None:
|
||||
request.setHeader(b"Content-Length", b"%d" % (file_size,))
|
||||
|
||||
|
@ -302,12 +303,37 @@ async def respond_with_multipart_responder(
|
|||
)
|
||||
return
|
||||
|
||||
if media_info.media_type.lower().split(";", 1)[0] in INLINE_CONTENT_TYPES:
|
||||
disposition = "inline"
|
||||
else:
|
||||
disposition = "attachment"
|
||||
|
||||
def _quote(x: str) -> str:
|
||||
return urllib.parse.quote(x.encode("utf-8"))
|
||||
|
||||
if media_info.upload_name:
|
||||
if _can_encode_filename_as_token(media_info.upload_name):
|
||||
disposition = "%s; filename=%s" % (
|
||||
disposition,
|
||||
media_info.upload_name,
|
||||
)
|
||||
else:
|
||||
disposition = "%s; filename*=utf-8''%s" % (
|
||||
disposition,
|
||||
_quote(media_info.upload_name),
|
||||
)
|
||||
|
||||
from synapse.media.media_storage import MultipartFileConsumer
|
||||
|
||||
# note that currently the json_object is just {}, this will change when linked media
|
||||
# is implemented
|
||||
multipart_consumer = MultipartFileConsumer(
|
||||
clock, request, media_info.media_type, {}, media_info.media_length
|
||||
clock,
|
||||
request,
|
||||
media_info.media_type,
|
||||
{},
|
||||
disposition,
|
||||
media_info.media_length,
|
||||
)
|
||||
|
||||
logger.debug("Responding to media request with responder %s", responder)
|
||||
|
|
|
@ -480,6 +480,7 @@ class MediaRepository:
|
|||
name: Optional[str],
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
) -> None:
|
||||
"""Respond to requests for remote media.
|
||||
|
||||
|
@ -492,6 +493,8 @@ class MediaRepository:
|
|||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: the IP address of the requester
|
||||
use_federation_endpoint: whether to request the remote media over the new
|
||||
federation `/download` endpoint
|
||||
|
||||
Returns:
|
||||
Resolves once a response has successfully been written to request
|
||||
|
@ -522,6 +525,7 @@ class MediaRepository:
|
|||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation_endpoint,
|
||||
)
|
||||
|
||||
# We deliberately stream the file outside the lock
|
||||
|
@ -538,7 +542,12 @@ class MediaRepository:
|
|||
respond_404(request)
|
||||
|
||||
async def get_remote_media_info(
|
||||
self, server_name: str, media_id: str, max_timeout_ms: int, ip_address: str
|
||||
self,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
) -> RemoteMedia:
|
||||
"""Gets the media info associated with the remote file, downloading
|
||||
if necessary.
|
||||
|
@ -549,6 +558,8 @@ class MediaRepository:
|
|||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
ip_address: IP address of the requester
|
||||
use_federation: if a download is necessary, whether to request the remote file
|
||||
over the federation `/download` endpoint
|
||||
|
||||
Returns:
|
||||
The media info of the file
|
||||
|
@ -569,6 +580,7 @@ class MediaRepository:
|
|||
max_timeout_ms,
|
||||
self.download_ratelimiter,
|
||||
ip_address,
|
||||
use_federation,
|
||||
)
|
||||
|
||||
# Ensure we actually use the responder so that it releases resources
|
||||
|
@ -585,6 +597,7 @@ class MediaRepository:
|
|||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
use_federation_endpoint: bool,
|
||||
) -> Tuple[Optional[Responder], RemoteMedia]:
|
||||
"""Looks for media in local cache, if not there then attempt to
|
||||
download from remote server.
|
||||
|
@ -598,6 +611,8 @@ class MediaRepository:
|
|||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP.
|
||||
ip_address: the IP address of the requester
|
||||
use_federation_endpoint: whether to request the remote media over the new federation
|
||||
/download endpoint
|
||||
|
||||
Returns:
|
||||
A tuple of responder and the media info of the file.
|
||||
|
@ -629,9 +644,23 @@ class MediaRepository:
|
|||
# Failed to find the file anywhere, lets download it.
|
||||
|
||||
try:
|
||||
media_info = await self._download_remote_file(
|
||||
server_name, media_id, max_timeout_ms, download_ratelimiter, ip_address
|
||||
)
|
||||
if not use_federation_endpoint:
|
||||
media_info = await self._download_remote_file(
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
else:
|
||||
media_info = await self._federation_download_remote_file(
|
||||
server_name,
|
||||
media_id,
|
||||
max_timeout_ms,
|
||||
download_ratelimiter,
|
||||
ip_address,
|
||||
)
|
||||
|
||||
except SynapseError:
|
||||
raise
|
||||
except Exception as e:
|
||||
|
@ -775,6 +804,129 @@ class MediaRepository:
|
|||
quarantined_by=None,
|
||||
)
|
||||
|
||||
async def _federation_download_remote_file(
|
||||
self,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
max_timeout_ms: int,
|
||||
download_ratelimiter: Ratelimiter,
|
||||
ip_address: str,
|
||||
) -> RemoteMedia:
|
||||
"""Attempt to download the remote file from the given server name.
|
||||
Uses the given file_id as the local id and downloads the file over the federation
|
||||
v1 download endpoint
|
||||
|
||||
Args:
|
||||
server_name: Originating server
|
||||
media_id: The media ID of the content (as defined by the
|
||||
remote server). This is different than the file_id, which is
|
||||
locally generated.
|
||||
max_timeout_ms: the maximum number of milliseconds to wait for the
|
||||
media to be uploaded.
|
||||
download_ratelimiter: a ratelimiter limiting remote media downloads, keyed to
|
||||
requester IP
|
||||
ip_address: the IP address of the requester
|
||||
|
||||
Returns:
|
||||
The media info of the file.
|
||||
"""
|
||||
|
||||
file_id = random_string(24)
|
||||
|
||||
file_info = FileInfo(server_name=server_name, file_id=file_id)
|
||||
|
||||
async with self.media_storage.store_into_file(file_info) as (f, fname):
|
||||
try:
|
||||
res = await self.client.federation_download_media(
|
||||
server_name,
|
||||
media_id,
|
||||
output_stream=f,
|
||||
max_size=self.max_upload_size,
|
||||
max_timeout_ms=max_timeout_ms,
|
||||
download_ratelimiter=download_ratelimiter,
|
||||
ip_address=ip_address,
|
||||
)
|
||||
# if we had to fall back to the _matrix/media endpoint it will only return
|
||||
# the headers and length, check the length of the tuple before unpacking
|
||||
if len(res) == 3:
|
||||
length, headers, json = res
|
||||
else:
|
||||
length, headers = res
|
||||
except RequestSendFailed as e:
|
||||
logger.warning(
|
||||
"Request failed fetching remote media %s/%s: %r",
|
||||
server_name,
|
||||
media_id,
|
||||
e,
|
||||
)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except HttpResponseException as e:
|
||||
logger.warning(
|
||||
"HTTP error fetching remote media %s/%s: %s",
|
||||
server_name,
|
||||
media_id,
|
||||
e.response,
|
||||
)
|
||||
if e.code == twisted.web.http.NOT_FOUND:
|
||||
raise e.to_synapse_error()
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except SynapseError:
|
||||
logger.warning(
|
||||
"Failed to fetch remote media %s/%s", server_name, media_id
|
||||
)
|
||||
raise
|
||||
except NotRetryingDestination:
|
||||
logger.warning("Not retrying destination %r", server_name)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
"Failed to fetch remote media %s/%s", server_name, media_id
|
||||
)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
if b"Content-Type" in headers:
|
||||
media_type = headers[b"Content-Type"][0].decode("ascii")
|
||||
else:
|
||||
media_type = "application/octet-stream"
|
||||
upload_name = get_filename_from_headers(headers)
|
||||
time_now_ms = self.clock.time_msec()
|
||||
|
||||
# Multiple remote media download requests can race (when using
|
||||
# multiple media repos), so this may throw a violation constraint
|
||||
# exception. If it does we'll delete the newly downloaded file from
|
||||
# disk (as we're in the ctx manager).
|
||||
#
|
||||
# However: we've already called `finish()` so we may have also
|
||||
# written to the storage providers. This is preferable to the
|
||||
# alternative where we call `finish()` *after* this, where we could
|
||||
# end up having an entry in the DB but fail to write the files to
|
||||
# the storage providers.
|
||||
await self.store.store_cached_remote_media(
|
||||
origin=server_name,
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
time_now_ms=time_now_ms,
|
||||
upload_name=upload_name,
|
||||
media_length=length,
|
||||
filesystem_id=file_id,
|
||||
)
|
||||
|
||||
logger.debug("Stored remote media in file %r", fname)
|
||||
|
||||
return RemoteMedia(
|
||||
media_origin=server_name,
|
||||
media_id=media_id,
|
||||
media_type=media_type,
|
||||
media_length=length,
|
||||
upload_name=upload_name,
|
||||
created_ts=time_now_ms,
|
||||
filesystem_id=file_id,
|
||||
last_access_ts=time_now_ms,
|
||||
quarantined_by=None,
|
||||
)
|
||||
|
||||
def _get_thumbnail_requirements(
|
||||
self, media_type: str
|
||||
) -> Tuple[ThumbnailRequirement, ...]:
|
||||
|
|
|
@ -401,13 +401,14 @@ class MultipartFileConsumer:
|
|||
wrapped_consumer: interfaces.IConsumer,
|
||||
file_content_type: str,
|
||||
json_object: JsonDict,
|
||||
content_length: Optional[int] = None,
|
||||
disposition: str,
|
||||
content_length: Optional[int],
|
||||
) -> None:
|
||||
self.clock = clock
|
||||
self.wrapped_consumer = wrapped_consumer
|
||||
self.json_field = json_object
|
||||
self.json_field_written = False
|
||||
self.content_type_written = False
|
||||
self.file_headers_written = False
|
||||
self.file_content_type = file_content_type
|
||||
self.boundary = uuid4().hex.encode("ascii")
|
||||
|
||||
|
@ -420,6 +421,7 @@ class MultipartFileConsumer:
|
|||
self.paused = False
|
||||
|
||||
self.length = content_length
|
||||
self.disposition = disposition
|
||||
|
||||
### IConsumer APIs ###
|
||||
|
||||
|
@ -488,11 +490,13 @@ class MultipartFileConsumer:
|
|||
self.json_field_written = True
|
||||
|
||||
# if we haven't written the content type yet, do so
|
||||
if not self.content_type_written:
|
||||
if not self.file_headers_written:
|
||||
type = self.file_content_type.encode("utf-8")
|
||||
content_type = Header(b"Content-Type", type)
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF + CRLF)
|
||||
self.content_type_written = True
|
||||
self.wrapped_consumer.write(bytes(content_type) + CRLF)
|
||||
disp_header = Header(b"Content-Disposition", self.disposition)
|
||||
self.wrapped_consumer.write(bytes(disp_header) + CRLF + CRLF)
|
||||
self.file_headers_written = True
|
||||
|
||||
self.wrapped_consumer.write(data)
|
||||
|
||||
|
@ -506,7 +510,6 @@ class MultipartFileConsumer:
|
|||
producing data for good.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
|
||||
self.paused = True
|
||||
self.producer.stopProducing()
|
||||
|
||||
|
@ -518,7 +521,6 @@ class MultipartFileConsumer:
|
|||
the time being, and to stop until C{resumeProducing()} is called.
|
||||
"""
|
||||
assert self.producer is not None
|
||||
|
||||
self.paused = True
|
||||
|
||||
if self.streaming:
|
||||
|
@ -549,7 +551,7 @@ class MultipartFileConsumer:
|
|||
"""
|
||||
if not self.length:
|
||||
return None
|
||||
# calculate length of json field and content-type header
|
||||
# calculate length of json field and content-type, disposition headers
|
||||
json_field = json.dumps(self.json_field)
|
||||
json_bytes = json_field.encode("utf-8")
|
||||
json_length = len(json_bytes)
|
||||
|
@ -558,9 +560,13 @@ class MultipartFileConsumer:
|
|||
content_type = Header(b"Content-Type", type)
|
||||
type_length = len(bytes(content_type))
|
||||
|
||||
# 154 is the length of the elements that aren't variable, ie
|
||||
disp = self.disposition.encode("utf-8")
|
||||
disp_header = Header(b"Content-Disposition", disp)
|
||||
disp_length = len(bytes(disp_header))
|
||||
|
||||
# 156 is the length of the elements that aren't variable, ie
|
||||
# CRLFs and boundary strings, etc
|
||||
self.length += json_length + type_length + 154
|
||||
self.length += json_length + type_length + disp_length + 156
|
||||
|
||||
return self.length
|
||||
|
||||
|
@ -569,7 +575,6 @@ class MultipartFileConsumer:
|
|||
async def _resumeProducingRepeatedly(self) -> None:
|
||||
assert self.producer is not None
|
||||
assert not self.streaming
|
||||
|
||||
producer = cast("interfaces.IPullProducer", self.producer)
|
||||
|
||||
self.paused = False
|
||||
|
|
|
@ -36,9 +36,11 @@ from synapse.media._base import (
|
|||
ThumbnailInfo,
|
||||
respond_404,
|
||||
respond_with_file,
|
||||
respond_with_multipart_responder,
|
||||
respond_with_responder,
|
||||
)
|
||||
from synapse.media.media_storage import MediaStorage
|
||||
from synapse.media.media_storage import FileResponder, MediaStorage
|
||||
from synapse.storage.databases.main.media_repository import LocalMedia
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.media.media_repository import MediaRepository
|
||||
|
@ -271,6 +273,7 @@ class ThumbnailProvider:
|
|||
method: str,
|
||||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
|
@ -290,6 +293,8 @@ class ThumbnailProvider:
|
|||
media_id,
|
||||
url_cache=bool(media_info.url_cache),
|
||||
server_name=None,
|
||||
for_federation=for_federation,
|
||||
media_info=media_info,
|
||||
)
|
||||
|
||||
async def select_or_generate_local_thumbnail(
|
||||
|
@ -301,6 +306,7 @@ class ThumbnailProvider:
|
|||
desired_method: str,
|
||||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
for_federation: bool,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_local_media_info(
|
||||
request, media_id, max_timeout_ms
|
||||
|
@ -326,10 +332,16 @@ class ThumbnailProvider:
|
|||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
if for_federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
return
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request, responder, info.type, info.length
|
||||
)
|
||||
return
|
||||
|
||||
logger.debug("We don't have a thumbnail of that size. Generating")
|
||||
|
||||
|
@ -344,7 +356,15 @@ class ThumbnailProvider:
|
|||
)
|
||||
|
||||
if file_path:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
if for_federation:
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(),
|
||||
request,
|
||||
FileResponder(open(file_path, "rb")),
|
||||
media_info,
|
||||
)
|
||||
else:
|
||||
await respond_with_file(request, desired_type, file_path)
|
||||
else:
|
||||
logger.warning("Failed to generate thumbnail")
|
||||
raise SynapseError(400, "Failed to generate thumbnail.")
|
||||
|
@ -360,9 +380,10 @@ class ThumbnailProvider:
|
|||
desired_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
) -> None:
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
server_name, media_id, max_timeout_ms, ip_address, use_federation
|
||||
)
|
||||
if not media_info:
|
||||
respond_404(request)
|
||||
|
@ -424,12 +445,13 @@ class ThumbnailProvider:
|
|||
m_type: str,
|
||||
max_timeout_ms: int,
|
||||
ip_address: str,
|
||||
use_federation: bool,
|
||||
) -> None:
|
||||
# TODO: Don't download the whole remote file
|
||||
# We should proxy the thumbnail from the remote server instead of
|
||||
# downloading the remote file and generating our own thumbnails.
|
||||
media_info = await self.media_repo.get_remote_media_info(
|
||||
server_name, media_id, max_timeout_ms, ip_address
|
||||
server_name, media_id, max_timeout_ms, ip_address, use_federation
|
||||
)
|
||||
if not media_info:
|
||||
return
|
||||
|
@ -448,6 +470,7 @@ class ThumbnailProvider:
|
|||
media_info.filesystem_id,
|
||||
url_cache=False,
|
||||
server_name=server_name,
|
||||
for_federation=False,
|
||||
)
|
||||
|
||||
async def _select_and_respond_with_thumbnail(
|
||||
|
@ -461,7 +484,9 @@ class ThumbnailProvider:
|
|||
media_id: str,
|
||||
file_id: str,
|
||||
url_cache: bool,
|
||||
for_federation: bool,
|
||||
server_name: Optional[str] = None,
|
||||
media_info: Optional[LocalMedia] = None,
|
||||
) -> None:
|
||||
"""
|
||||
Respond to a request with an appropriate thumbnail from the previously generated thumbnails.
|
||||
|
@ -476,6 +501,8 @@ class ThumbnailProvider:
|
|||
file_id: The ID of the media that a thumbnail is being requested for.
|
||||
url_cache: True if this is from a URL cache.
|
||||
server_name: The server name, if this is a remote thumbnail.
|
||||
for_federation: whether the request is from the federation /thumbnail request
|
||||
media_info: metadata about the media being requested.
|
||||
"""
|
||||
logger.debug(
|
||||
"_select_and_respond_with_thumbnail: media_id=%s desired=%sx%s (%s) thumbnail_infos=%s",
|
||||
|
@ -511,13 +538,20 @@ class ThumbnailProvider:
|
|||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
if responder:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
return
|
||||
if for_federation:
|
||||
assert media_info is not None
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
return
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
return
|
||||
|
||||
# If we can't find the thumbnail we regenerate it. This can happen
|
||||
# if e.g. we've deleted the thumbnails but still have the original
|
||||
|
@ -558,12 +592,18 @@ class ThumbnailProvider:
|
|||
)
|
||||
|
||||
responder = await self.media_storage.fetch_media(file_info)
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
if for_federation:
|
||||
assert media_info is not None
|
||||
await respond_with_multipart_responder(
|
||||
self.hs.get_clock(), request, responder, media_info
|
||||
)
|
||||
else:
|
||||
await respond_with_responder(
|
||||
request,
|
||||
responder,
|
||||
file_info.thumbnail.type,
|
||||
file_info.thumbnail.length,
|
||||
)
|
||||
else:
|
||||
# This might be because:
|
||||
# 1. We can't create thumbnails for the given media (corrupted or
|
||||
|
|
|
@ -145,6 +145,10 @@ class ClientRestResource(JsonResource):
|
|||
password_policy.register_servlets(hs, client_resource)
|
||||
knock.register_servlets(hs, client_resource)
|
||||
appservice_ping.register_servlets(hs, client_resource)
|
||||
if hs.config.server.enable_media_repo:
|
||||
from synapse.rest.client import media
|
||||
|
||||
media.register_servlets(hs, client_resource)
|
||||
|
||||
# moving to /_synapse/admin
|
||||
if is_main_process:
|
||||
|
|
|
@ -31,7 +31,9 @@ from synapse.rest.admin import admin_patterns, assert_requester_is_admin
|
|||
from synapse.types import JsonDict, UserID
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.server import HomeServer
|
||||
from typing_extensions import assert_never
|
||||
|
||||
from synapse.server import HomeServer, HomeServerConfig
|
||||
|
||||
|
||||
class ExperimentalFeature(str, Enum):
|
||||
|
@ -39,8 +41,16 @@ class ExperimentalFeature(str, Enum):
|
|||
Currently supported per-user features
|
||||
"""
|
||||
|
||||
MSC3026 = "msc3026"
|
||||
MSC3881 = "msc3881"
|
||||
MSC3575 = "msc3575"
|
||||
|
||||
def is_globally_enabled(self, config: "HomeServerConfig") -> bool:
|
||||
if self is ExperimentalFeature.MSC3881:
|
||||
return config.experimental.msc3881_enabled
|
||||
if self is ExperimentalFeature.MSC3575:
|
||||
return config.experimental.msc3575_enabled
|
||||
|
||||
assert_never(self)
|
||||
|
||||
|
||||
class ExperimentalFeaturesRestServlet(RestServlet):
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
import logging
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from synapse.http.server import (
|
||||
HttpServer,
|
||||
|
@ -46,7 +47,7 @@ from synapse.util.stringutils import parse_and_validate_server_name
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class UnstablePreviewURLServlet(RestServlet):
|
||||
class PreviewURLServlet(RestServlet):
|
||||
"""
|
||||
Same as `GET /_matrix/media/r0/preview_url`, this endpoint provides a generic preview API
|
||||
for URLs which outputs Open Graph (https://ogp.me/) responses (with some Matrix
|
||||
|
@ -64,9 +65,7 @@ class UnstablePreviewURLServlet(RestServlet):
|
|||
* Matrix cannot be used to distribute the metadata between homeservers.
|
||||
"""
|
||||
|
||||
PATTERNS = [
|
||||
re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/preview_url$")
|
||||
]
|
||||
PATTERNS = [re.compile(r"^/_matrix/client/v1/media/preview_url$")]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -94,10 +93,8 @@ class UnstablePreviewURLServlet(RestServlet):
|
|||
respond_with_json_bytes(request, 200, og, send_cors=True)
|
||||
|
||||
|
||||
class UnstableMediaConfigResource(RestServlet):
|
||||
PATTERNS = [
|
||||
re.compile(r"^/_matrix/client/unstable/org.matrix.msc3916/media/config$")
|
||||
]
|
||||
class MediaConfigResource(RestServlet):
|
||||
PATTERNS = [re.compile(r"^/_matrix/client/v1/media/config$")]
|
||||
|
||||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
|
@ -111,10 +108,10 @@ class UnstableMediaConfigResource(RestServlet):
|
|||
respond_with_json(request, 200, self.limits_dict, send_cors=True)
|
||||
|
||||
|
||||
class UnstableThumbnailResource(RestServlet):
|
||||
class ThumbnailResource(RestServlet):
|
||||
PATTERNS = [
|
||||
re.compile(
|
||||
"/_matrix/client/unstable/org.matrix.msc3916/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
|
||||
"/_matrix/client/v1/media/thumbnail/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)$"
|
||||
)
|
||||
]
|
||||
|
||||
|
@ -158,11 +155,25 @@ class UnstableThumbnailResource(RestServlet):
|
|||
if self._is_mine_server_name(server_name):
|
||||
if self.dynamic_thumbnails:
|
||||
await self.thumbnailer.select_or_generate_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms
|
||||
request,
|
||||
media_id,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
)
|
||||
else:
|
||||
await self.thumbnailer.respond_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms
|
||||
request,
|
||||
media_id,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
else:
|
||||
|
@ -190,18 +201,79 @@ class UnstableThumbnailResource(RestServlet):
|
|||
m_type,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
True,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(server_name, media_id)
|
||||
|
||||
|
||||
class DownloadResource(RestServlet):
|
||||
PATTERNS = [
|
||||
re.compile(
|
||||
"/_matrix/client/v1/media/download/(?P<server_name>[^/]*)/(?P<media_id>[^/]*)(/(?P<file_name>[^/]*))?$"
|
||||
)
|
||||
]
|
||||
|
||||
def __init__(self, hs: "HomeServer", media_repo: "MediaRepository"):
|
||||
super().__init__()
|
||||
self.media_repo = media_repo
|
||||
self._is_mine_server_name = hs.is_mine_server_name
|
||||
self.auth = hs.get_auth()
|
||||
|
||||
async def on_GET(
|
||||
self,
|
||||
request: SynapseRequest,
|
||||
server_name: str,
|
||||
media_id: str,
|
||||
file_name: Optional[str] = None,
|
||||
) -> None:
|
||||
# Validate the server name, raising if invalid
|
||||
parse_and_validate_server_name(server_name)
|
||||
|
||||
await self.auth.get_user_by_req(request)
|
||||
|
||||
set_cors_headers(request)
|
||||
set_corp_headers(request)
|
||||
request.setHeader(
|
||||
b"Content-Security-Policy",
|
||||
b"sandbox;"
|
||||
b" default-src 'none';"
|
||||
b" script-src 'none';"
|
||||
b" plugin-types application/pdf;"
|
||||
b" style-src 'unsafe-inline';"
|
||||
b" media-src 'self';"
|
||||
b" object-src 'self';",
|
||||
)
|
||||
# Limited non-standard form of CSP for IE11
|
||||
request.setHeader(b"X-Content-Security-Policy", b"sandbox;")
|
||||
request.setHeader(b"Referrer-Policy", b"no-referrer")
|
||||
max_timeout_ms = parse_integer(
|
||||
request, "timeout_ms", default=DEFAULT_MAX_TIMEOUT_MS
|
||||
)
|
||||
max_timeout_ms = min(max_timeout_ms, MAXIMUM_ALLOWED_MAX_TIMEOUT_MS)
|
||||
|
||||
if self._is_mine_server_name(server_name):
|
||||
await self.media_repo.get_local_media(
|
||||
request, media_id, file_name, max_timeout_ms
|
||||
)
|
||||
else:
|
||||
ip_address = request.getClientAddress().host
|
||||
await self.media_repo.get_remote_media(
|
||||
request,
|
||||
server_name,
|
||||
media_id,
|
||||
file_name,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
True,
|
||||
)
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
if hs.config.experimental.msc3916_authenticated_media_enabled:
|
||||
media_repo = hs.get_media_repository()
|
||||
if hs.config.media.url_preview_enabled:
|
||||
UnstablePreviewURLServlet(
|
||||
hs, media_repo, media_repo.media_storage
|
||||
).register(http_server)
|
||||
UnstableMediaConfigResource(hs).register(http_server)
|
||||
UnstableThumbnailResource(hs, media_repo, media_repo.media_storage).register(
|
||||
media_repo = hs.get_media_repository()
|
||||
if hs.config.media.url_preview_enabled:
|
||||
PreviewURLServlet(hs, media_repo, media_repo.media_storage).register(
|
||||
http_server
|
||||
)
|
||||
MediaConfigResource(hs).register(http_server)
|
||||
ThumbnailResource(hs, media_repo, media_repo.media_storage).register(http_server)
|
||||
DownloadResource(hs, media_repo).register(http_server)
|
||||
|
|
|
@ -32,6 +32,7 @@ from synapse.http.servlet import (
|
|||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.push import PusherConfigException
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.rest.client._base import client_patterns
|
||||
from synapse.rest.synapse.client.unsubscribe import UnsubscribeResource
|
||||
from synapse.types import JsonDict
|
||||
|
@ -49,20 +50,22 @@ class PushersRestServlet(RestServlet):
|
|||
super().__init__()
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user = requester.user
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
pushers = await self.hs.get_datastores().main.get_pushers_by_user_id(
|
||||
user.to_string()
|
||||
msc3881_enabled = await self._store.is_feature_enabled(
|
||||
user_id, ExperimentalFeature.MSC3881
|
||||
)
|
||||
|
||||
pushers = await self.hs.get_datastores().main.get_pushers_by_user_id(user_id)
|
||||
|
||||
pusher_dicts = [p.as_dict() for p in pushers]
|
||||
|
||||
for pusher in pusher_dicts:
|
||||
if self._msc3881_enabled:
|
||||
if msc3881_enabled:
|
||||
pusher["org.matrix.msc3881.enabled"] = pusher["enabled"]
|
||||
pusher["org.matrix.msc3881.device_id"] = pusher["device_id"]
|
||||
del pusher["enabled"]
|
||||
|
@ -80,11 +83,15 @@ class PushersSetRestServlet(RestServlet):
|
|||
self.auth = hs.get_auth()
|
||||
self.notifier = hs.get_notifier()
|
||||
self.pusher_pool = self.hs.get_pusherpool()
|
||||
self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled
|
||||
self._store = hs.get_datastores().main
|
||||
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user = requester.user
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
msc3881_enabled = await self._store.is_feature_enabled(
|
||||
user_id, ExperimentalFeature.MSC3881
|
||||
)
|
||||
|
||||
content = parse_json_object_from_request(request)
|
||||
|
||||
|
@ -95,7 +102,7 @@ class PushersSetRestServlet(RestServlet):
|
|||
and content["kind"] is None
|
||||
):
|
||||
await self.pusher_pool.remove_pusher(
|
||||
content["app_id"], content["pushkey"], user_id=user.to_string()
|
||||
content["app_id"], content["pushkey"], user_id=user_id
|
||||
)
|
||||
return 200, {}
|
||||
|
||||
|
@ -120,19 +127,19 @@ class PushersSetRestServlet(RestServlet):
|
|||
append = content["append"]
|
||||
|
||||
enabled = True
|
||||
if self._msc3881_enabled and "org.matrix.msc3881.enabled" in content:
|
||||
if msc3881_enabled and "org.matrix.msc3881.enabled" in content:
|
||||
enabled = content["org.matrix.msc3881.enabled"]
|
||||
|
||||
if not append:
|
||||
await self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user(
|
||||
app_id=content["app_id"],
|
||||
pushkey=content["pushkey"],
|
||||
not_user_id=user.to_string(),
|
||||
not_user_id=user_id,
|
||||
)
|
||||
|
||||
try:
|
||||
await self.pusher_pool.add_or_update_pusher(
|
||||
user_id=user.to_string(),
|
||||
user_id=user_id,
|
||||
kind=content["kind"],
|
||||
app_id=content["app_id"],
|
||||
app_display_name=content["app_display_name"],
|
||||
|
|
|
@ -53,6 +53,7 @@ from synapse.http.servlet import (
|
|||
)
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.logging.opentracing import trace_with_opname
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.types import JsonDict, Requester, StreamToken
|
||||
from synapse.types.rest.client import SlidingSyncBody
|
||||
from synapse.util import json_decoder
|
||||
|
@ -673,7 +674,9 @@ class SlidingSyncE2eeRestServlet(RestServlet):
|
|||
)
|
||||
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
requester = await self.auth.get_user_by_req_experimental_feature(
|
||||
request, allow_guest=True, feature=ExperimentalFeature.MSC3575
|
||||
)
|
||||
user = requester.user
|
||||
device_id = requester.device_id
|
||||
|
||||
|
@ -761,7 +764,6 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
"lists": {
|
||||
"foo-list": {
|
||||
"ranges": [ [0, 99] ],
|
||||
"sort": [ "by_notification_level", "by_recency", "by_name" ],
|
||||
"required_state": [
|
||||
["m.room.join_rules", ""],
|
||||
["m.room.history_visibility", ""],
|
||||
|
@ -771,7 +773,6 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
"filters": {
|
||||
"is_dm": true
|
||||
},
|
||||
"bump_event_types": [ "m.room.message", "m.room.encrypted" ],
|
||||
}
|
||||
},
|
||||
// Room Subscriptions API
|
||||
|
@ -779,10 +780,6 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
"!sub1:bar": {
|
||||
"required_state": [ ["*","*"] ],
|
||||
"timeline_limit": 10,
|
||||
"include_old_rooms": {
|
||||
"timeline_limit": 1,
|
||||
"required_state": [ ["m.room.tombstone", ""], ["m.room.create", ""] ],
|
||||
}
|
||||
}
|
||||
},
|
||||
// Extensions API
|
||||
|
@ -791,7 +788,7 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
|
||||
Response JSON::
|
||||
{
|
||||
"next_pos": "s58_224_0_13_10_1_1_16_0_1",
|
||||
"pos": "s58_224_0_13_10_1_1_16_0_1",
|
||||
"lists": {
|
||||
"foo-list": {
|
||||
"count": 1337,
|
||||
|
@ -830,7 +827,8 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
"joined_count": 41,
|
||||
"invited_count": 1,
|
||||
"notification_count": 1,
|
||||
"highlight_count": 0
|
||||
"highlight_count": 0,
|
||||
"num_live": 2"
|
||||
},
|
||||
// rooms from list
|
||||
"!foo:bar": {
|
||||
|
@ -855,7 +853,8 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
"joined_count": 4,
|
||||
"invited_count": 0,
|
||||
"notification_count": 54,
|
||||
"highlight_count": 3
|
||||
"highlight_count": 3,
|
||||
"num_live": 1,
|
||||
},
|
||||
// ... 99 more items
|
||||
},
|
||||
|
@ -871,12 +870,16 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
super().__init__()
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
self.clock = hs.get_clock()
|
||||
self.filtering = hs.get_filtering()
|
||||
self.sliding_sync_handler = hs.get_sliding_sync_handler()
|
||||
self.event_serializer = hs.get_event_client_serializer()
|
||||
|
||||
# TODO: Update this to `on_GET` once we figure out how we want to handle params
|
||||
async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
requester = await self.auth.get_user_by_req(request, allow_guest=True)
|
||||
requester = await self.auth.get_user_by_req_experimental_feature(
|
||||
request, allow_guest=True, feature=ExperimentalFeature.MSC3575
|
||||
)
|
||||
|
||||
user = requester.user
|
||||
device_id = requester.device_id
|
||||
|
||||
|
@ -920,22 +923,25 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
logger.info("Client has disconnected; not serializing response.")
|
||||
return 200, {}
|
||||
|
||||
response_content = await self.encode_response(sliding_sync_results)
|
||||
response_content = await self.encode_response(requester, sliding_sync_results)
|
||||
|
||||
return 200, response_content
|
||||
|
||||
# TODO: Is there a better way to encode things?
|
||||
async def encode_response(
|
||||
self,
|
||||
requester: Requester,
|
||||
sliding_sync_result: SlidingSyncResult,
|
||||
) -> JsonDict:
|
||||
response: JsonDict = defaultdict(dict)
|
||||
|
||||
response["next_pos"] = await sliding_sync_result.next_pos.to_string(self.store)
|
||||
response["pos"] = await sliding_sync_result.next_pos.to_string(self.store)
|
||||
serialized_lists = self.encode_lists(sliding_sync_result.lists)
|
||||
if serialized_lists:
|
||||
response["lists"] = serialized_lists
|
||||
response["rooms"] = {} # TODO: sliding_sync_result.rooms
|
||||
response["rooms"] = await self.encode_rooms(
|
||||
requester, sliding_sync_result.rooms
|
||||
)
|
||||
response["extensions"] = {} # TODO: sliding_sync_result.extensions
|
||||
|
||||
return response
|
||||
|
@ -961,10 +967,95 @@ class SlidingSyncRestServlet(RestServlet):
|
|||
|
||||
return serialized_lists
|
||||
|
||||
async def encode_rooms(
|
||||
self,
|
||||
requester: Requester,
|
||||
rooms: Dict[str, SlidingSyncResult.RoomResult],
|
||||
) -> JsonDict:
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
serialize_options = SerializeEventConfig(
|
||||
event_format=format_event_for_client_v2_without_room_id,
|
||||
requester=requester,
|
||||
)
|
||||
|
||||
serialized_rooms: Dict[str, JsonDict] = {}
|
||||
for room_id, room_result in rooms.items():
|
||||
serialized_rooms[room_id] = {
|
||||
"joined_count": room_result.joined_count,
|
||||
"invited_count": room_result.invited_count,
|
||||
"notification_count": room_result.notification_count,
|
||||
"highlight_count": room_result.highlight_count,
|
||||
}
|
||||
|
||||
if room_result.name:
|
||||
serialized_rooms[room_id]["name"] = room_result.name
|
||||
|
||||
if room_result.avatar:
|
||||
serialized_rooms[room_id]["avatar"] = room_result.avatar
|
||||
|
||||
if room_result.heroes:
|
||||
serialized_rooms[room_id]["heroes"] = room_result.heroes
|
||||
|
||||
# We should only include the `initial` key if it's `True` to save bandwidth.
|
||||
# The absense of this flag means `False`.
|
||||
if room_result.initial:
|
||||
serialized_rooms[room_id]["initial"] = room_result.initial
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.required_state is not None:
|
||||
serialized_required_state = (
|
||||
await self.event_serializer.serialize_events(
|
||||
room_result.required_state,
|
||||
time_now,
|
||||
config=serialize_options,
|
||||
)
|
||||
)
|
||||
serialized_rooms[room_id]["required_state"] = serialized_required_state
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.timeline_events is not None:
|
||||
serialized_timeline = await self.event_serializer.serialize_events(
|
||||
room_result.timeline_events,
|
||||
time_now,
|
||||
config=serialize_options,
|
||||
bundle_aggregations=room_result.bundled_aggregations,
|
||||
)
|
||||
serialized_rooms[room_id]["timeline"] = serialized_timeline
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.limited is not None:
|
||||
serialized_rooms[room_id]["limited"] = room_result.limited
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.prev_batch is not None:
|
||||
serialized_rooms[room_id]["prev_batch"] = (
|
||||
await room_result.prev_batch.to_string(self.store)
|
||||
)
|
||||
|
||||
# This will be omitted for invite/knock rooms with `stripped_state`
|
||||
if room_result.num_live is not None:
|
||||
serialized_rooms[room_id]["num_live"] = room_result.num_live
|
||||
|
||||
# Field should be absent on non-DM rooms
|
||||
if room_result.is_dm:
|
||||
serialized_rooms[room_id]["is_dm"] = room_result.is_dm
|
||||
|
||||
# Stripped state only applies to invite/knock rooms
|
||||
if room_result.stripped_state is not None:
|
||||
# TODO: `knocked_state` but that isn't specced yet.
|
||||
#
|
||||
# TODO: Instead of adding `knocked_state`, it would be good to rename
|
||||
# this to `stripped_state` so it can be shared between invite and knock
|
||||
# rooms, see
|
||||
# https://github.com/matrix-org/matrix-spec-proposals/pull/3575#discussion_r1117629919
|
||||
serialized_rooms[room_id]["invite_state"] = room_result.stripped_state
|
||||
|
||||
return serialized_rooms
|
||||
|
||||
|
||||
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||
SyncRestServlet(hs).register(http_server)
|
||||
|
||||
if hs.config.experimental.msc3575_enabled:
|
||||
SlidingSyncRestServlet(hs).register(http_server)
|
||||
SlidingSyncE2eeRestServlet(hs).register(http_server)
|
||||
SlidingSyncRestServlet(hs).register(http_server)
|
||||
SlidingSyncE2eeRestServlet(hs).register(http_server)
|
||||
|
|
|
@ -25,11 +25,11 @@ import logging
|
|||
import re
|
||||
from typing import TYPE_CHECKING, Tuple
|
||||
|
||||
from twisted.web.server import Request
|
||||
|
||||
from synapse.api.constants import RoomCreationPreset
|
||||
from synapse.http.server import HttpServer
|
||||
from synapse.http.servlet import RestServlet
|
||||
from synapse.http.site import SynapseRequest
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.types import JsonDict
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -45,6 +45,8 @@ class VersionsRestServlet(RestServlet):
|
|||
def __init__(self, hs: "HomeServer"):
|
||||
super().__init__()
|
||||
self.config = hs.config
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastores().main
|
||||
|
||||
# Calculate these once since they shouldn't change after start-up.
|
||||
self.e2ee_forced_public = (
|
||||
|
@ -60,7 +62,17 @@ class VersionsRestServlet(RestServlet):
|
|||
in self.config.room.encryption_enabled_by_default_for_room_presets
|
||||
)
|
||||
|
||||
def on_GET(self, request: Request) -> Tuple[int, JsonDict]:
|
||||
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
|
||||
msc3881_enabled = self.config.experimental.msc3881_enabled
|
||||
|
||||
if self.auth.has_access_token(request):
|
||||
requester = await self.auth.get_user_by_req(request)
|
||||
user_id = requester.user.to_string()
|
||||
|
||||
msc3881_enabled = await self.store.is_feature_enabled(
|
||||
user_id, ExperimentalFeature.MSC3881
|
||||
)
|
||||
|
||||
return (
|
||||
200,
|
||||
{
|
||||
|
@ -124,7 +136,7 @@ class VersionsRestServlet(RestServlet):
|
|||
# TODO: this is no longer needed once unstable MSC3882 does not need to be supported:
|
||||
"org.matrix.msc3882": self.config.auth.login_via_existing_enabled,
|
||||
# Adds support for remotely enabling/disabling pushers, as per MSC3881
|
||||
"org.matrix.msc3881": self.config.experimental.msc3881_enabled,
|
||||
"org.matrix.msc3881": msc3881_enabled,
|
||||
# Adds support for filtering /messages by event relation.
|
||||
"org.matrix.msc3874": self.config.experimental.msc3874_enabled,
|
||||
# Adds support for simple HTTP rendezvous as per MSC3886
|
||||
|
|
|
@ -105,4 +105,5 @@ class DownloadResource(RestServlet):
|
|||
file_name,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
False,
|
||||
)
|
||||
|
|
|
@ -88,11 +88,25 @@ class ThumbnailResource(RestServlet):
|
|||
if self._is_mine_server_name(server_name):
|
||||
if self.dynamic_thumbnails:
|
||||
await self.thumbnail_provider.select_or_generate_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms
|
||||
request,
|
||||
media_id,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
)
|
||||
else:
|
||||
await self.thumbnail_provider.respond_local_thumbnail(
|
||||
request, media_id, width, height, method, m_type, max_timeout_ms
|
||||
request,
|
||||
media_id,
|
||||
width,
|
||||
height,
|
||||
method,
|
||||
m_type,
|
||||
max_timeout_ms,
|
||||
False,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(None, media_id)
|
||||
else:
|
||||
|
@ -120,5 +134,6 @@ class ThumbnailResource(RestServlet):
|
|||
m_type,
|
||||
max_timeout_ms,
|
||||
ip_address,
|
||||
False,
|
||||
)
|
||||
self.media_repo.mark_recently_accessed(server_name, media_id)
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
import abc
|
||||
import functools
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast
|
||||
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type, TypeVar, cast
|
||||
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
|
@ -161,6 +161,7 @@ if TYPE_CHECKING:
|
|||
from synapse.handlers.jwt import JwtHandler
|
||||
from synapse.handlers.oidc import OidcHandler
|
||||
from synapse.handlers.saml import SamlHandler
|
||||
from synapse.storage._base import SQLBaseStore
|
||||
|
||||
|
||||
# The annotation for `cache_in_self` used to be
|
||||
|
@ -255,10 +256,13 @@ class HomeServer(metaclass=abc.ABCMeta):
|
|||
"stats",
|
||||
]
|
||||
|
||||
# This is overridden in derived application classes
|
||||
# (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
|
||||
# instantiated during setup() for future return by get_datastores()
|
||||
DATASTORE_CLASS = abc.abstractproperty()
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def DATASTORE_CLASS(self) -> Type["SQLBaseStore"]:
|
||||
# This is overridden in derived application classes
|
||||
# (such as synapse.app.homeserver.SynapseHomeServer) and gives the class to be
|
||||
# instantiated during setup() for future return by get_datastores()
|
||||
pass
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
|
|
@ -409,7 +409,7 @@ class StateStorageController:
|
|||
|
||||
return state_ids
|
||||
|
||||
async def get_state_at(
|
||||
async def get_state_ids_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
|
@ -436,6 +436,9 @@ class StateStorageController:
|
|||
)
|
||||
)
|
||||
|
||||
# FIXME: This will return incorrect results when there are timeline gaps. For
|
||||
# example, when you try to get a point in the room we haven't backfilled before.
|
||||
|
||||
if last_event_id:
|
||||
state = await self.get_state_after_event(
|
||||
last_event_id,
|
||||
|
@ -457,6 +460,30 @@ class StateStorageController:
|
|||
)
|
||||
return state
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_state_at(
|
||||
self,
|
||||
room_id: str,
|
||||
stream_position: StreamToken,
|
||||
state_filter: Optional[StateFilter] = None,
|
||||
await_full_state: bool = True,
|
||||
) -> StateMap[EventBase]:
|
||||
"""Same as `get_state_ids_at` but also fetches the events"""
|
||||
state_map_ids = await self.get_state_ids_at(
|
||||
room_id, stream_position, state_filter, await_full_state
|
||||
)
|
||||
|
||||
event_map = await self.stores.main.get_events(list(state_map_ids.values()))
|
||||
|
||||
state_map = {}
|
||||
for key, event_id in state_map_ids.items():
|
||||
event = event_map.get(event_id)
|
||||
if event:
|
||||
state_map[key] = event
|
||||
|
||||
return state_map
|
||||
|
||||
@trace
|
||||
@tag_args
|
||||
async def get_state_for_groups(
|
||||
|
|
|
@ -825,14 +825,13 @@ class DeviceInboxWorkerStore(SQLBaseStore):
|
|||
# Check if we've already inserted a matching message_id for that
|
||||
# origin. This can happen if the origin doesn't receive our
|
||||
# acknowledgement from the first time we received the message.
|
||||
already_inserted = self.db_pool.simple_select_one_txn(
|
||||
already_inserted = self.db_pool.simple_select_list_txn(
|
||||
txn,
|
||||
table="device_federation_inbox",
|
||||
keyvalues={"origin": origin, "message_id": message_id},
|
||||
retcols=("message_id",),
|
||||
allow_none=True,
|
||||
)
|
||||
if already_inserted is not None:
|
||||
if already_inserted:
|
||||
return
|
||||
|
||||
# Add an entry for this message_id so that we know we've processed
|
||||
|
|
|
@ -55,7 +55,7 @@ from synapse.api.room_versions import (
|
|||
)
|
||||
from synapse.events import EventBase, make_event_from_dict
|
||||
from synapse.events.snapshot import EventContext
|
||||
from synapse.events.utils import prune_event
|
||||
from synapse.events.utils import prune_event, strip_event
|
||||
from synapse.logging.context import (
|
||||
PreserveLoggingContext,
|
||||
current_context,
|
||||
|
@ -1025,15 +1025,7 @@ class EventsWorkerStore(SQLBaseStore):
|
|||
|
||||
state_to_include = await self.get_events(selected_state_ids.values())
|
||||
|
||||
return [
|
||||
{
|
||||
"type": e.type,
|
||||
"state_key": e.state_key,
|
||||
"content": e.content,
|
||||
"sender": e.sender,
|
||||
}
|
||||
for e in state_to_include.values()
|
||||
]
|
||||
return [strip_event(e) for e in state_to_include.values()]
|
||||
|
||||
def _maybe_start_fetch_thread(self) -> None:
|
||||
"""Starts an event fetch thread if we are not yet at the maximum number."""
|
||||
|
|
|
@ -21,7 +21,11 @@
|
|||
|
||||
from typing import TYPE_CHECKING, Dict, FrozenSet, List, Tuple, cast
|
||||
|
||||
from synapse.storage.database import DatabasePool, LoggingDatabaseConnection
|
||||
from synapse.storage.database import (
|
||||
DatabasePool,
|
||||
LoggingDatabaseConnection,
|
||||
LoggingTransaction,
|
||||
)
|
||||
from synapse.storage.databases.main import CacheInvalidationWorkerStore
|
||||
from synapse.util.caches.descriptors import cached
|
||||
|
||||
|
@ -73,12 +77,54 @@ class ExperimentalFeaturesStore(CacheInvalidationWorkerStore):
|
|||
features:
|
||||
pairs of features and True/False for whether the feature should be enabled
|
||||
"""
|
||||
for feature, enabled in features.items():
|
||||
await self.db_pool.simple_upsert(
|
||||
table="per_user_experimental_features",
|
||||
keyvalues={"feature": feature, "user_id": user},
|
||||
values={"enabled": enabled},
|
||||
insertion_values={"user_id": user, "feature": feature},
|
||||
)
|
||||
|
||||
await self.invalidate_cache_and_stream("list_enabled_features", (user,))
|
||||
def set_features_for_user_txn(txn: LoggingTransaction) -> None:
|
||||
for feature, enabled in features.items():
|
||||
self.db_pool.simple_upsert_txn(
|
||||
txn,
|
||||
table="per_user_experimental_features",
|
||||
keyvalues={"feature": feature, "user_id": user},
|
||||
values={"enabled": enabled},
|
||||
insertion_values={"user_id": user, "feature": feature},
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream(
|
||||
txn, self.is_feature_enabled, (user, feature)
|
||||
)
|
||||
|
||||
self._invalidate_cache_and_stream(txn, self.list_enabled_features, (user,))
|
||||
|
||||
return await self.db_pool.runInteraction(
|
||||
"set_features_for_user", set_features_for_user_txn
|
||||
)
|
||||
|
||||
@cached()
|
||||
async def is_feature_enabled(
|
||||
self, user_id: str, feature: "ExperimentalFeature"
|
||||
) -> bool:
|
||||
"""
|
||||
Checks to see if a given feature is enabled for the user
|
||||
Args:
|
||||
user_id: the user to be queried on
|
||||
feature: the feature in question
|
||||
Returns:
|
||||
True if the feature is enabled, False if it is not or if the feature was
|
||||
not found.
|
||||
"""
|
||||
|
||||
if feature.is_globally_enabled(self.hs.config):
|
||||
return True
|
||||
|
||||
# if it's not enabled globally, check if it is enabled per-user
|
||||
res = await self.db_pool.simple_select_one_onecol(
|
||||
table="per_user_experimental_features",
|
||||
keyvalues={"user_id": user_id, "feature": feature},
|
||||
retcol="enabled",
|
||||
allow_none=True,
|
||||
desc="get_feature_enabled",
|
||||
)
|
||||
|
||||
# None and false are treated the same
|
||||
db_enabled = bool(res)
|
||||
|
||||
return db_enabled
|
||||
|
|
|
@ -44,6 +44,7 @@ what sort order was used:
|
|||
import logging
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
AbstractSet,
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
|
@ -62,7 +63,7 @@ from typing_extensions import Literal
|
|||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import Direction
|
||||
from synapse.api.constants import Direction, EventTypes, Membership
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.events import EventBase
|
||||
from synapse.logging.context import make_deferred_yieldable, run_in_background
|
||||
|
@ -111,6 +112,32 @@ class _EventsAround:
|
|||
end: RoomStreamToken
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class CurrentStateDeltaMembership:
|
||||
"""
|
||||
Attributes:
|
||||
event_id: The "current" membership event ID in this room.
|
||||
event_pos: The position of the "current" membership event in the event stream.
|
||||
prev_event_id: The previous membership event in this room that was replaced by
|
||||
the "current" one. May be `None` if there was no previous membership event.
|
||||
room_id: The room ID of the membership event.
|
||||
membership: The membership state of the user in the room
|
||||
sender: The person who sent the membership event
|
||||
"""
|
||||
|
||||
room_id: str
|
||||
# Event
|
||||
event_id: Optional[str]
|
||||
event_pos: PersistedEventPosition
|
||||
membership: str
|
||||
sender: Optional[str]
|
||||
# Prev event
|
||||
prev_event_id: Optional[str]
|
||||
prev_event_pos: Optional[PersistedEventPosition]
|
||||
prev_membership: Optional[str]
|
||||
prev_sender: Optional[str]
|
||||
|
||||
|
||||
def generate_pagination_where_clause(
|
||||
direction: Direction,
|
||||
column_names: Tuple[str, str],
|
||||
|
@ -390,6 +417,43 @@ def _filter_results(
|
|||
return True
|
||||
|
||||
|
||||
def _filter_results_by_stream(
|
||||
lower_token: Optional[RoomStreamToken],
|
||||
upper_token: Optional[RoomStreamToken],
|
||||
instance_name: str,
|
||||
stream_ordering: int,
|
||||
) -> bool:
|
||||
"""
|
||||
This function only works with "live" tokens with `stream_ordering` only. See
|
||||
`_filter_results(...)` if you want to work with all tokens.
|
||||
|
||||
Returns True if the event persisted by the given instance at the given
|
||||
stream_ordering falls between the two tokens (taking a None
|
||||
token to mean unbounded).
|
||||
|
||||
Used to filter results from fetching events in the DB against the given
|
||||
tokens. This is necessary to handle the case where the tokens include
|
||||
position maps, which we handle by fetching more than necessary from the DB
|
||||
and then filtering (rather than attempting to construct a complicated SQL
|
||||
query).
|
||||
"""
|
||||
if lower_token:
|
||||
assert lower_token.topological is None
|
||||
|
||||
# If these are live tokens we compare the stream ordering against the
|
||||
# writers stream position.
|
||||
if stream_ordering <= lower_token.get_stream_pos_for_instance(instance_name):
|
||||
return False
|
||||
|
||||
if upper_token:
|
||||
assert upper_token.topological is None
|
||||
|
||||
if upper_token.get_stream_pos_for_instance(instance_name) < stream_ordering:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def filter_to_clause(event_filter: Optional[Filter]) -> Tuple[str, List[str]]:
|
||||
# NB: This may create SQL clauses that don't optimise well (and we don't
|
||||
# have indices on all possible clauses). E.g. it may create
|
||||
|
@ -734,6 +798,191 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
|
||||
return ret, key
|
||||
|
||||
async def get_current_state_delta_membership_changes_for_user(
|
||||
self,
|
||||
user_id: str,
|
||||
from_key: RoomStreamToken,
|
||||
to_key: RoomStreamToken,
|
||||
excluded_room_ids: Optional[List[str]] = None,
|
||||
) -> List[CurrentStateDeltaMembership]:
|
||||
"""
|
||||
Fetch membership events (and the previous event that was replaced by that one)
|
||||
for a given user.
|
||||
|
||||
Note: This function only works with "live" tokens with `stream_ordering` only.
|
||||
|
||||
We're looking for membership changes in the token range (> `from_key` and <=
|
||||
`to_key`).
|
||||
|
||||
Please be mindful to only use this with `from_key` and `to_key` tokens that are
|
||||
recent enough to be after when the first local user joined the room. Otherwise,
|
||||
the results may be incomplete or too greedy. For example, if you use a token
|
||||
range before the first local user joined the room, you will see 0 events since
|
||||
`current_state_delta_stream` tracks what the server thinks is the current state
|
||||
of the room as time goes. It does not track how state progresses from the
|
||||
beginning of the room. So for example, when you remotely join a room, the first
|
||||
rows will just be the state when you joined and progress from there.
|
||||
|
||||
You can probably reasonably use this with `/sync` because the `to_key` passed in
|
||||
will be the "current" now token and the range will cover when the user joined
|
||||
the room.
|
||||
|
||||
Args:
|
||||
user_id: The user ID to fetch membership events for.
|
||||
from_key: The point in the stream to sync from (fetching events > this point).
|
||||
to_key: The token to fetch rooms up to (fetching events <= this point).
|
||||
excluded_room_ids: Optional list of room IDs to exclude from the results.
|
||||
|
||||
Returns:
|
||||
All membership changes to the current state in the token range. Events are
|
||||
sorted by `stream_ordering` ascending.
|
||||
"""
|
||||
# Start by ruling out cases where a DB query is not necessary.
|
||||
if from_key == to_key:
|
||||
return []
|
||||
|
||||
if from_key:
|
||||
has_changed = self._membership_stream_cache.has_entity_changed(
|
||||
user_id, int(from_key.stream)
|
||||
)
|
||||
if not has_changed:
|
||||
return []
|
||||
|
||||
def f(txn: LoggingTransaction) -> List[CurrentStateDeltaMembership]:
|
||||
# To handle tokens with a non-empty instance_map we fetch more
|
||||
# results than necessary and then filter down
|
||||
min_from_id = from_key.stream
|
||||
max_to_id = to_key.get_max_stream_pos()
|
||||
|
||||
args: List[Any] = [min_from_id, max_to_id, EventTypes.Member, user_id]
|
||||
|
||||
# TODO: It would be good to assert that the `from_token`/`to_token` is >=
|
||||
# the first row in `current_state_delta_stream` for the rooms we're
|
||||
# interested in. Otherwise, we will end up with empty results and not know
|
||||
# it.
|
||||
|
||||
# We could `COALESCE(e.stream_ordering, s.stream_id)` to get more accurate
|
||||
# stream positioning when available but given our usages, we can avoid the
|
||||
# complexity. Between two (valid) stream tokens, we will still get all of
|
||||
# the state changes. Since those events are persisted in a batch, valid
|
||||
# tokens will either be before or after the batch of events.
|
||||
#
|
||||
# `stream_ordering` from the `events` table is more accurate when available
|
||||
# since the `current_state_delta_stream` table only tracks that the current
|
||||
# state is at this stream position (not what stream position the state event
|
||||
# was added) and uses the *minimum* stream position for batches of events.
|
||||
sql = """
|
||||
SELECT
|
||||
s.room_id,
|
||||
e.event_id,
|
||||
s.instance_name,
|
||||
s.stream_id,
|
||||
m.membership,
|
||||
e.sender,
|
||||
s.prev_event_id,
|
||||
e_prev.instance_name AS prev_instance_name,
|
||||
e_prev.stream_ordering AS prev_stream_ordering,
|
||||
m_prev.membership AS prev_membership,
|
||||
e_prev.sender AS prev_sender
|
||||
FROM current_state_delta_stream AS s
|
||||
LEFT JOIN events AS e ON e.event_id = s.event_id
|
||||
LEFT JOIN room_memberships AS m ON m.event_id = s.event_id
|
||||
LEFT JOIN events AS e_prev ON e_prev.event_id = s.prev_event_id
|
||||
LEFT JOIN room_memberships AS m_prev ON m_prev.event_id = s.prev_event_id
|
||||
WHERE s.stream_id > ? AND s.stream_id <= ?
|
||||
AND s.type = ?
|
||||
AND s.state_key = ?
|
||||
ORDER BY s.stream_id ASC
|
||||
"""
|
||||
|
||||
txn.execute(sql, args)
|
||||
|
||||
membership_changes: List[CurrentStateDeltaMembership] = []
|
||||
for (
|
||||
room_id,
|
||||
event_id,
|
||||
instance_name,
|
||||
stream_ordering,
|
||||
membership,
|
||||
sender,
|
||||
prev_event_id,
|
||||
prev_instance_name,
|
||||
prev_stream_ordering,
|
||||
prev_membership,
|
||||
prev_sender,
|
||||
) in txn:
|
||||
assert room_id is not None
|
||||
assert instance_name is not None
|
||||
assert stream_ordering is not None
|
||||
|
||||
if _filter_results_by_stream(
|
||||
from_key,
|
||||
to_key,
|
||||
instance_name,
|
||||
stream_ordering,
|
||||
):
|
||||
# When the server leaves a room, it will insert new rows into the
|
||||
# `current_state_delta_stream` table with `event_id = null` for all
|
||||
# current state. This means we might already have a row for the
|
||||
# leave event and then another for the same leave where the
|
||||
# `event_id=null` but the `prev_event_id` is pointing back at the
|
||||
# earlier leave event. We don't want to report the leave, if we
|
||||
# already have a leave event.
|
||||
if event_id is None and prev_membership == Membership.LEAVE:
|
||||
continue
|
||||
|
||||
membership_change = CurrentStateDeltaMembership(
|
||||
room_id=room_id,
|
||||
# Event
|
||||
event_id=event_id,
|
||||
event_pos=PersistedEventPosition(
|
||||
instance_name=instance_name,
|
||||
stream=stream_ordering,
|
||||
),
|
||||
# When `s.event_id = null`, we won't be able to get respective
|
||||
# `room_membership` but can assume the user has left the room
|
||||
# because this only happens when the server leaves a room
|
||||
# (meaning everyone locally left) or a state reset which removed
|
||||
# the person from the room.
|
||||
membership=(
|
||||
membership if membership is not None else Membership.LEAVE
|
||||
),
|
||||
sender=sender,
|
||||
# Prev event
|
||||
prev_event_id=prev_event_id,
|
||||
prev_event_pos=(
|
||||
PersistedEventPosition(
|
||||
instance_name=prev_instance_name,
|
||||
stream=prev_stream_ordering,
|
||||
)
|
||||
if (
|
||||
prev_instance_name is not None
|
||||
and prev_stream_ordering is not None
|
||||
)
|
||||
else None
|
||||
),
|
||||
prev_membership=prev_membership,
|
||||
prev_sender=prev_sender,
|
||||
)
|
||||
|
||||
membership_changes.append(membership_change)
|
||||
|
||||
return membership_changes
|
||||
|
||||
membership_changes = await self.db_pool.runInteraction(
|
||||
"get_current_state_delta_membership_changes_for_user", f
|
||||
)
|
||||
|
||||
room_ids_to_exclude: AbstractSet[str] = set()
|
||||
if excluded_room_ids is not None:
|
||||
room_ids_to_exclude = set(excluded_room_ids)
|
||||
|
||||
return [
|
||||
membership_change
|
||||
for membership_change in membership_changes
|
||||
if membership_change.room_id not in room_ids_to_exclude
|
||||
]
|
||||
|
||||
@cancellable
|
||||
async def get_membership_changes_for_user(
|
||||
self,
|
||||
|
@ -769,10 +1018,11 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
|
||||
ignore_room_clause = ""
|
||||
if excluded_rooms is not None and len(excluded_rooms) > 0:
|
||||
ignore_room_clause = "AND e.room_id NOT IN (%s)" % ",".join(
|
||||
"?" for _ in excluded_rooms
|
||||
ignore_room_clause, ignore_room_args = make_in_list_sql_clause(
|
||||
txn.database_engine, "e.room_id", excluded_rooms, negative=True
|
||||
)
|
||||
args = args + excluded_rooms
|
||||
ignore_room_clause = f"AND {ignore_room_clause}"
|
||||
args += ignore_room_args
|
||||
|
||||
sql = """
|
||||
SELECT m.event_id, instance_name, topological_ordering, stream_ordering
|
||||
|
@ -1554,6 +1804,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
) -> Tuple[List[EventBase], RoomStreamToken]:
|
||||
"""Returns list of events before or after a given token.
|
||||
|
||||
When Direction.FORWARDS: from_key < x <= to_key
|
||||
When Direction.BACKWARDS: from_key >= x > to_key
|
||||
|
||||
Args:
|
||||
room_id
|
||||
from_key: The token used to stream from
|
||||
|
@ -1570,6 +1823,27 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore):
|
|||
and `to_key`).
|
||||
"""
|
||||
|
||||
# We can bail early if we're looking forwards, and our `to_key` is already
|
||||
# before our `from_key`.
|
||||
if (
|
||||
direction == Direction.FORWARDS
|
||||
and to_key is not None
|
||||
and to_key.is_before_or_eq(from_key)
|
||||
):
|
||||
# Token selection matches what we do in `_paginate_room_events_txn` if there
|
||||
# are no rows
|
||||
return [], to_key if to_key else from_key
|
||||
# Or vice-versa, if we're looking backwards and our `from_key` is already before
|
||||
# our `to_key`.
|
||||
elif (
|
||||
direction == Direction.BACKWARDS
|
||||
and to_key is not None
|
||||
and from_key.is_before_or_eq(to_key)
|
||||
):
|
||||
# Token selection matches what we do in `_paginate_room_events_txn` if there
|
||||
# are no rows
|
||||
return [], to_key if to_key else from_key
|
||||
|
||||
rows, token = await self.db_pool.runInteraction(
|
||||
"paginate_room_events",
|
||||
self._paginate_room_events_txn,
|
||||
|
|
|
@ -32,7 +32,10 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
-- Tracks what the server thinks is the current state of the room as time goes. It does
|
||||
-- not track how state progresses from the beginning of the room. So for example, when
|
||||
-- you remotely join a room, the first rows will just be the state when you joined and
|
||||
-- progress from there.
|
||||
CREATE TABLE current_state_delta_stream (
|
||||
stream_id BIGINT NOT NULL,
|
||||
room_id TEXT NOT NULL,
|
||||
|
|
|
@ -1096,6 +1096,9 @@ class PersistedPosition:
|
|||
stream: int
|
||||
|
||||
def persisted_after(self, token: AbstractMultiWriterStreamToken) -> bool:
|
||||
"""
|
||||
Checks whether this position happened after the token
|
||||
"""
|
||||
return token.get_stream_pos_for_instance(self.instance_name) < self.stream
|
||||
|
||||
|
||||
|
|
|
@ -31,9 +31,12 @@ else:
|
|||
from pydantic import Extra
|
||||
|
||||
from synapse.events import EventBase
|
||||
from synapse.types import JsonMapping, StreamToken, UserID
|
||||
from synapse.types import JsonDict, JsonMapping, StreamToken, UserID
|
||||
from synapse.types.rest.client import SlidingSyncBody
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from synapse.handlers.relations import BundledAggregations
|
||||
|
||||
|
||||
class ShutdownRoomParams(TypedDict):
|
||||
"""
|
||||
|
@ -153,21 +156,33 @@ class SlidingSyncResult:
|
|||
avatar: Room avatar
|
||||
heroes: List of stripped membership events (containing `user_id` and optionally
|
||||
`avatar_url` and `displayname`) for the users used to calculate the room name.
|
||||
is_dm: Flag to specify whether the room is a direct-message room (most likely
|
||||
between two people).
|
||||
initial: Flag which is set when this is the first time the server is sending this
|
||||
data on this connection. Clients can use this flag to replace or update
|
||||
their local state. When there is an update, servers MUST omit this flag
|
||||
entirely and NOT send "initial":false as this is wasteful on bandwidth. The
|
||||
absence of this flag means 'false'.
|
||||
required_state: The current state of the room
|
||||
timeline: Latest events in the room. The last event is the most recent
|
||||
is_dm: Flag to specify whether the room is a direct-message room (most likely
|
||||
between two people).
|
||||
invite_state: Stripped state events. Same as `rooms.invite.$room_id.invite_state`
|
||||
in sync v2, absent on joined/left rooms
|
||||
timeline: Latest events in the room. The last event is the most recent.
|
||||
bundled_aggregations: A mapping of event ID to the bundled aggregations for
|
||||
the timeline events above. This allows clients to show accurate reaction
|
||||
counts (or edits, threads), even if some of the reaction events were skipped
|
||||
over in a gappy sync.
|
||||
stripped_state: Stripped state events (for rooms where the usre is
|
||||
invited/knocked). Same as `rooms.invite.$room_id.invite_state` in sync v2,
|
||||
absent on joined/left rooms
|
||||
prev_batch: A token that can be passed as a start parameter to the
|
||||
`/rooms/<room_id>/messages` API to retrieve earlier messages.
|
||||
limited: True if their are more events than fit between the given position and now.
|
||||
Sync again to get more.
|
||||
num_live: The number of timeline events which have just occurred and are not historical.
|
||||
The last N events are 'live' and should be treated as such. This is mostly
|
||||
useful to determine whether a given @mention event should make a noise or not.
|
||||
Clients cannot rely solely on the absence of `initial: true` to determine live
|
||||
events because if a room not in the sliding window bumps into the window because
|
||||
of an @mention it will have `initial: true` yet contain a single live event
|
||||
(with potentially other old events in the timeline).
|
||||
joined_count: The number of users with membership of join, including the client's
|
||||
own user ID. (same as sync `v2 m.joined_member_count`)
|
||||
invited_count: The number of users with membership of invite. (same as sync v2
|
||||
|
@ -176,30 +191,30 @@ class SlidingSyncResult:
|
|||
as sync v2)
|
||||
highlight_count: The number of unread notifications for this room with the highlight
|
||||
flag set. (same as sync v2)
|
||||
num_live: The number of timeline events which have just occurred and are not historical.
|
||||
The last N events are 'live' and should be treated as such. This is mostly
|
||||
useful to determine whether a given @mention event should make a noise or not.
|
||||
Clients cannot rely solely on the absence of `initial: true` to determine live
|
||||
events because if a room not in the sliding window bumps into the window because
|
||||
of an @mention it will have `initial: true` yet contain a single live event
|
||||
(with potentially other old events in the timeline).
|
||||
"""
|
||||
|
||||
name: str
|
||||
name: Optional[str]
|
||||
avatar: Optional[str]
|
||||
heroes: Optional[List[EventBase]]
|
||||
initial: bool
|
||||
required_state: List[EventBase]
|
||||
timeline: List[EventBase]
|
||||
is_dm: bool
|
||||
invite_state: List[EventBase]
|
||||
prev_batch: StreamToken
|
||||
limited: bool
|
||||
initial: bool
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
required_state: Optional[List[EventBase]]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
timeline_events: Optional[List[EventBase]]
|
||||
bundled_aggregations: Optional[Dict[str, "BundledAggregations"]]
|
||||
# Optional because it's only relevant to invite/knock rooms
|
||||
stripped_state: Optional[List[JsonDict]]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
prev_batch: Optional[StreamToken]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
limited: Optional[bool]
|
||||
# Only optional because it won't be included for invite/knock rooms with `stripped_state`
|
||||
num_live: Optional[int]
|
||||
joined_count: int
|
||||
invited_count: int
|
||||
notification_count: int
|
||||
highlight_count: int
|
||||
num_live: int
|
||||
|
||||
@attr.s(slots=True, frozen=True, auto_attribs=True)
|
||||
class SlidingWindowList:
|
||||
|
|
|
@ -152,22 +152,14 @@ class SlidingSyncBody(RequestBodyModel):
|
|||
anyway.
|
||||
timeline_limit: The maximum number of timeline events to return per response.
|
||||
(Max 1000 messages)
|
||||
include_old_rooms: Determines if `predecessor` rooms are included in the
|
||||
`rooms` response. The user MUST be joined to old rooms for them to show up
|
||||
in the response.
|
||||
"""
|
||||
|
||||
class IncludeOldRooms(RequestBodyModel):
|
||||
timeline_limit: StrictInt
|
||||
required_state: List[Tuple[StrictStr, StrictStr]]
|
||||
|
||||
required_state: List[Tuple[StrictStr, StrictStr]]
|
||||
# mypy workaround via https://github.com/pydantic/pydantic/issues/156#issuecomment-1130883884
|
||||
if TYPE_CHECKING:
|
||||
timeline_limit: int
|
||||
else:
|
||||
timeline_limit: conint(le=1000, strict=True) # type: ignore[valid-type]
|
||||
include_old_rooms: Optional[IncludeOldRooms] = None
|
||||
|
||||
class SlidingSyncList(CommonRoomParameters):
|
||||
"""
|
||||
|
@ -208,9 +200,6 @@ class SlidingSyncBody(RequestBodyModel):
|
|||
}
|
||||
|
||||
timeline_limit: The maximum number of timeline events to return per response.
|
||||
include_old_rooms: Determines if `predecessor` rooms are included in the
|
||||
`rooms` response. The user MUST be joined to old rooms for them to show up
|
||||
in the response.
|
||||
include_heroes: Return a stripped variant of membership events (containing
|
||||
`user_id` and optionally `avatar_url` and `displayname`) for the users used
|
||||
to calculate the room name.
|
||||
|
@ -270,7 +259,7 @@ class SlidingSyncBody(RequestBodyModel):
|
|||
is_encrypted: Optional[StrictBool] = None
|
||||
is_invite: Optional[StrictBool] = None
|
||||
room_types: Optional[List[Union[StrictStr, None]]] = None
|
||||
not_room_types: Optional[List[StrictStr]] = None
|
||||
not_room_types: Optional[List[Union[StrictStr, None]]] = None
|
||||
room_name_like: Optional[StrictStr] = None
|
||||
tags: Optional[List[StrictStr]] = None
|
||||
not_tags: Optional[List[StrictStr]] = None
|
||||
|
|
|
@ -35,11 +35,11 @@ from synapse.types import UserID
|
|||
from synapse.util import Clock
|
||||
|
||||
from tests import unittest
|
||||
from tests.media.test_media_storage import small_png
|
||||
from tests.test_utils import SMALL_PNG
|
||||
from tests.unittest import override_config
|
||||
|
||||
|
||||
class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
|
||||
class FederationMediaDownloadsTest(unittest.FederatingHomeserverTestCase):
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
super().prepare(reactor, clock, hs)
|
||||
|
@ -65,9 +65,6 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
|
|||
)
|
||||
self.media_repo = hs.get_media_repository()
|
||||
|
||||
@override_config(
|
||||
{"experimental_features": {"msc3916_authenticated_media_enabled": True}}
|
||||
)
|
||||
def test_file_download(self) -> None:
|
||||
content = io.BytesIO(b"file_to_stream")
|
||||
content_uri = self.get_success(
|
||||
|
@ -82,7 +79,7 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
|
|||
# test with a text file
|
||||
channel = self.make_signed_federation_request(
|
||||
"GET",
|
||||
f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
|
||||
f"/_matrix/federation/v1/media/download/{content_uri.media_id}",
|
||||
)
|
||||
self.pump()
|
||||
self.assertEqual(200, channel.code)
|
||||
|
@ -106,7 +103,8 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
|
|||
|
||||
# check that the text file and expected value exist
|
||||
found_file = any(
|
||||
"\r\nContent-Type: text/plain\r\n\r\nfile_to_stream" in field
|
||||
"\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_to_stream"
|
||||
in field
|
||||
for field in stripped
|
||||
)
|
||||
self.assertTrue(found_file)
|
||||
|
@ -124,7 +122,7 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
|
|||
# test with an image file
|
||||
channel = self.make_signed_federation_request(
|
||||
"GET",
|
||||
f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
|
||||
f"/_matrix/federation/v1/media/download/{content_uri.media_id}",
|
||||
)
|
||||
self.pump()
|
||||
self.assertEqual(200, channel.code)
|
||||
|
@ -150,24 +148,111 @@ class FederationUnstableMediaDownloadsTest(unittest.FederatingHomeserverTestCase
|
|||
found_file = any(SMALL_PNG in field for field in stripped_bytes)
|
||||
self.assertTrue(found_file)
|
||||
|
||||
@override_config(
|
||||
{"experimental_features": {"msc3916_authenticated_media_enabled": False}}
|
||||
)
|
||||
def test_disable_config(self) -> None:
|
||||
content = io.BytesIO(b"file_to_stream")
|
||||
|
||||
class FederationThumbnailTest(unittest.FederatingHomeserverTestCase):
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
super().prepare(reactor, clock, hs)
|
||||
self.test_dir = tempfile.mkdtemp(prefix="synapse-tests-")
|
||||
self.addCleanup(shutil.rmtree, self.test_dir)
|
||||
self.primary_base_path = os.path.join(self.test_dir, "primary")
|
||||
self.secondary_base_path = os.path.join(self.test_dir, "secondary")
|
||||
|
||||
hs.config.media.media_store_path = self.primary_base_path
|
||||
|
||||
storage_providers = [
|
||||
StorageProviderWrapper(
|
||||
FileStorageProviderBackend(hs, self.secondary_base_path),
|
||||
store_local=True,
|
||||
store_remote=False,
|
||||
store_synchronous=True,
|
||||
)
|
||||
]
|
||||
|
||||
self.filepaths = MediaFilePaths(self.primary_base_path)
|
||||
self.media_storage = MediaStorage(
|
||||
hs, self.primary_base_path, self.filepaths, storage_providers
|
||||
)
|
||||
self.media_repo = hs.get_media_repository()
|
||||
|
||||
def test_thumbnail_download_scaled(self) -> None:
|
||||
content = io.BytesIO(small_png.data)
|
||||
content_uri = self.get_success(
|
||||
self.media_repo.create_content(
|
||||
"text/plain",
|
||||
"test_upload",
|
||||
"image/png",
|
||||
"test_png_thumbnail",
|
||||
content,
|
||||
46,
|
||||
67,
|
||||
UserID.from_string("@user_id:whatever.org"),
|
||||
)
|
||||
)
|
||||
# test with an image file
|
||||
channel = self.make_signed_federation_request(
|
||||
"GET",
|
||||
f"/_matrix/federation/unstable/org.matrix.msc3916/media/download/{content_uri.media_id}",
|
||||
f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=scale",
|
||||
)
|
||||
self.pump()
|
||||
self.assertEqual(404, channel.code)
|
||||
self.assertEqual(channel.json_body.get("errcode"), "M_UNRECOGNIZED")
|
||||
self.assertEqual(200, channel.code)
|
||||
|
||||
content_type = channel.headers.getRawHeaders("content-type")
|
||||
assert content_type is not None
|
||||
assert "multipart/mixed" in content_type[0]
|
||||
assert "boundary" in content_type[0]
|
||||
|
||||
# extract boundary
|
||||
boundary = content_type[0].split("boundary=")[1]
|
||||
# split on boundary and check that json field and expected value exist
|
||||
body = channel.result.get("body")
|
||||
assert body is not None
|
||||
stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
|
||||
found_json = any(
|
||||
b"\r\nContent-Type: application/json\r\n\r\n{}" in field
|
||||
for field in stripped_bytes
|
||||
)
|
||||
self.assertTrue(found_json)
|
||||
|
||||
# check that the png file exists and matches the expected scaled bytes
|
||||
found_file = any(small_png.expected_scaled in field for field in stripped_bytes)
|
||||
self.assertTrue(found_file)
|
||||
|
||||
def test_thumbnail_download_cropped(self) -> None:
|
||||
content = io.BytesIO(small_png.data)
|
||||
content_uri = self.get_success(
|
||||
self.media_repo.create_content(
|
||||
"image/png",
|
||||
"test_png_thumbnail",
|
||||
content,
|
||||
67,
|
||||
UserID.from_string("@user_id:whatever.org"),
|
||||
)
|
||||
)
|
||||
# test with an image file
|
||||
channel = self.make_signed_federation_request(
|
||||
"GET",
|
||||
f"/_matrix/federation/v1/media/thumbnail/{content_uri.media_id}?width=32&height=32&method=crop",
|
||||
)
|
||||
self.pump()
|
||||
self.assertEqual(200, channel.code)
|
||||
|
||||
content_type = channel.headers.getRawHeaders("content-type")
|
||||
assert content_type is not None
|
||||
assert "multipart/mixed" in content_type[0]
|
||||
assert "boundary" in content_type[0]
|
||||
|
||||
# extract boundary
|
||||
boundary = content_type[0].split("boundary=")[1]
|
||||
# split on boundary and check that json field and expected value exist
|
||||
body = channel.result.get("body")
|
||||
assert body is not None
|
||||
stripped_bytes = body.split(b"\r\n" + b"--" + boundary.encode("utf-8"))
|
||||
found_json = any(
|
||||
b"\r\nContent-Type: application/json\r\n\r\n{}" in field
|
||||
for field in stripped_bytes
|
||||
)
|
||||
self.assertTrue(found_json)
|
||||
|
||||
# check that the png file exists and matches the expected cropped bytes
|
||||
found_file = any(
|
||||
small_png.expected_cropped in field for field in stripped_bytes
|
||||
)
|
||||
self.assertTrue(found_file)
|
||||
|
|
|
@ -461,3 +461,25 @@ class DeactivateAccountTestCase(HomeserverTestCase):
|
|||
# Validate that there is no displayname in any of the events
|
||||
for event in events:
|
||||
self.assertTrue("displayname" not in event.content)
|
||||
|
||||
def test_rooms_forgotten_upon_deactivation(self) -> None:
|
||||
"""
|
||||
Tests that the user 'forgets' the rooms they left upon deactivation.
|
||||
"""
|
||||
# Create a room
|
||||
room_id = self.helper.create_room_as(
|
||||
self.user,
|
||||
is_public=True,
|
||||
tok=self.token,
|
||||
)
|
||||
|
||||
# Deactivate the account
|
||||
self._deactivate_my_account()
|
||||
|
||||
# Get all of the user's forgotten rooms
|
||||
forgotten_rooms = self.get_success(
|
||||
self._store.get_forgotten_rooms_for_user(self.user)
|
||||
)
|
||||
|
||||
# Validate that the created room is forgotten
|
||||
self.assertTrue(room_id in forgotten_rooms)
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -37,18 +37,155 @@ from synapse.http.client import (
|
|||
BlocklistingAgentWrapper,
|
||||
BlocklistingReactorWrapper,
|
||||
BodyExceededMaxSize,
|
||||
MultipartResponse,
|
||||
_DiscardBodyWithMaxSizeProtocol,
|
||||
_MultipartParserProtocol,
|
||||
read_body_with_max_size,
|
||||
read_multipart_response,
|
||||
)
|
||||
|
||||
from tests.server import FakeTransport, get_clock
|
||||
from tests.unittest import TestCase
|
||||
|
||||
|
||||
class ReadMultipartResponseTests(TestCase):
|
||||
data1 = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_"
|
||||
data2 = b"to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
|
||||
|
||||
redirect_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nLocation: https://cdn.example.org/ab/c1/2345.txt\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
|
||||
|
||||
def _build_multipart_response(
|
||||
self, response_length: Union[int, str], max_length: int
|
||||
) -> Tuple[
|
||||
BytesIO,
|
||||
"Deferred[MultipartResponse]",
|
||||
_MultipartParserProtocol,
|
||||
]:
|
||||
"""Start reading the body, returns the response, result and proto"""
|
||||
response = Mock(length=response_length)
|
||||
result = BytesIO()
|
||||
boundary = "6067d4698f8d40a0a794ea7d7379d53a"
|
||||
deferred = read_multipart_response(response, result, boundary, max_length)
|
||||
|
||||
# Fish the protocol out of the response.
|
||||
protocol = response.deliverBody.call_args[0][0]
|
||||
protocol.transport = Mock()
|
||||
|
||||
return result, deferred, protocol
|
||||
|
||||
def _assert_error(
|
||||
self,
|
||||
deferred: "Deferred[MultipartResponse]",
|
||||
protocol: _MultipartParserProtocol,
|
||||
) -> None:
|
||||
"""Ensure that the expected error is received."""
|
||||
assert isinstance(deferred.result, Failure)
|
||||
self.assertIsInstance(deferred.result.value, BodyExceededMaxSize)
|
||||
assert protocol.transport is not None
|
||||
# type-ignore: presumably abortConnection has been replaced with a Mock.
|
||||
protocol.transport.abortConnection.assert_called_once() # type: ignore[attr-defined]
|
||||
|
||||
def _cleanup_error(self, deferred: "Deferred[MultipartResponse]") -> None:
|
||||
"""Ensure that the error in the Deferred is handled gracefully."""
|
||||
called = [False]
|
||||
|
||||
def errback(f: Failure) -> None:
|
||||
called[0] = True
|
||||
|
||||
deferred.addErrback(errback)
|
||||
self.assertTrue(called[0])
|
||||
|
||||
def test_parse_file(self) -> None:
|
||||
"""
|
||||
Check that a multipart response containing a file is properly parsed
|
||||
into the json/file parts, and the json and file are properly captured
|
||||
"""
|
||||
result, deferred, protocol = self._build_multipart_response(249, 250)
|
||||
|
||||
# Start sending data.
|
||||
protocol.dataReceived(self.data1)
|
||||
protocol.dataReceived(self.data2)
|
||||
# Close the connection.
|
||||
protocol.connectionLost(Failure(ResponseDone()))
|
||||
|
||||
multipart_response: MultipartResponse = deferred.result # type: ignore[assignment]
|
||||
|
||||
self.assertEqual(multipart_response.json, b"{}")
|
||||
self.assertEqual(result.getvalue(), b"file_to_stream")
|
||||
self.assertEqual(multipart_response.length, len(b"file_to_stream"))
|
||||
self.assertEqual(multipart_response.content_type, b"text/plain")
|
||||
self.assertEqual(
|
||||
multipart_response.disposition, b"inline; filename=test_upload"
|
||||
)
|
||||
|
||||
def test_parse_redirect(self) -> None:
|
||||
"""
|
||||
check that a multipart response containing a redirect is properly parsed and redirect url is
|
||||
returned
|
||||
"""
|
||||
result, deferred, protocol = self._build_multipart_response(249, 250)
|
||||
|
||||
# Start sending data.
|
||||
protocol.dataReceived(self.redirect_data)
|
||||
# Close the connection.
|
||||
protocol.connectionLost(Failure(ResponseDone()))
|
||||
|
||||
multipart_response: MultipartResponse = deferred.result # type: ignore[assignment]
|
||||
|
||||
self.assertEqual(multipart_response.json, b"{}")
|
||||
self.assertEqual(result.getvalue(), b"")
|
||||
self.assertEqual(
|
||||
multipart_response.url, b"https://cdn.example.org/ab/c1/2345.txt"
|
||||
)
|
||||
|
||||
def test_too_large(self) -> None:
|
||||
"""A response which is too large raises an exception."""
|
||||
result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180)
|
||||
|
||||
# Start sending data.
|
||||
protocol.dataReceived(self.data1)
|
||||
|
||||
self.assertEqual(result.getvalue(), b"file_")
|
||||
self._assert_error(deferred, protocol)
|
||||
self._cleanup_error(deferred)
|
||||
|
||||
def test_additional_data(self) -> None:
|
||||
"""A connection can receive data after being closed."""
|
||||
result, deferred, protocol = self._build_multipart_response(UNKNOWN_LENGTH, 180)
|
||||
|
||||
# Start sending data.
|
||||
protocol.dataReceived(self.data1)
|
||||
self._assert_error(deferred, protocol)
|
||||
|
||||
# More data might have come in.
|
||||
protocol.dataReceived(self.data2)
|
||||
|
||||
self.assertEqual(result.getvalue(), b"file_")
|
||||
self._assert_error(deferred, protocol)
|
||||
self._cleanup_error(deferred)
|
||||
|
||||
def test_content_length(self) -> None:
|
||||
"""The body shouldn't be read (at all) if the Content-Length header is too large."""
|
||||
result, deferred, protocol = self._build_multipart_response(250, 1)
|
||||
|
||||
# Deferred shouldn't be called yet.
|
||||
self.assertFalse(deferred.called)
|
||||
|
||||
# Start sending data.
|
||||
protocol.dataReceived(self.data1)
|
||||
self._assert_error(deferred, protocol)
|
||||
self._cleanup_error(deferred)
|
||||
|
||||
# The data is never consumed.
|
||||
self.assertEqual(result.getvalue(), b"")
|
||||
|
||||
|
||||
class ReadBodyWithMaxSizeTests(TestCase):
|
||||
def _build_response(
|
||||
self, length: Union[int, str] = UNKNOWN_LENGTH
|
||||
) -> Tuple[BytesIO, "Deferred[int]", _DiscardBodyWithMaxSizeProtocol]:
|
||||
def _build_response(self, length: Union[int, str] = UNKNOWN_LENGTH) -> Tuple[
|
||||
BytesIO,
|
||||
"Deferred[int]",
|
||||
_DiscardBodyWithMaxSizeProtocol,
|
||||
]:
|
||||
"""Start reading the body, returns the response, result and proto"""
|
||||
response = Mock(length=length)
|
||||
result = BytesIO()
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
# [This file includes modifications made by New Vector Limited]
|
||||
#
|
||||
#
|
||||
import itertools
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
@ -129,7 +128,7 @@ class MediaStorageTests(unittest.HomeserverTestCase):
|
|||
|
||||
|
||||
@attr.s(auto_attribs=True, slots=True, frozen=True)
|
||||
class _TestImage:
|
||||
class TestImage:
|
||||
"""An image for testing thumbnailing with the expected results
|
||||
|
||||
Attributes:
|
||||
|
@ -158,7 +157,7 @@ class _TestImage:
|
|||
is_inline: bool = True
|
||||
|
||||
|
||||
small_png = _TestImage(
|
||||
small_png = TestImage(
|
||||
SMALL_PNG,
|
||||
b"image/png",
|
||||
b".png",
|
||||
|
@ -175,7 +174,7 @@ small_png = _TestImage(
|
|||
),
|
||||
)
|
||||
|
||||
small_png_with_transparency = _TestImage(
|
||||
small_png_with_transparency = TestImage(
|
||||
unhexlify(
|
||||
b"89504e470d0a1a0a0000000d49484452000000010000000101000"
|
||||
b"00000376ef9240000000274524e5300010194fdae0000000a4944"
|
||||
|
@ -188,7 +187,7 @@ small_png_with_transparency = _TestImage(
|
|||
# different versions of Pillow.
|
||||
)
|
||||
|
||||
small_lossless_webp = _TestImage(
|
||||
small_lossless_webp = TestImage(
|
||||
unhexlify(
|
||||
b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700"
|
||||
),
|
||||
|
@ -196,7 +195,7 @@ small_lossless_webp = _TestImage(
|
|||
b".webp",
|
||||
)
|
||||
|
||||
empty_file = _TestImage(
|
||||
empty_file = TestImage(
|
||||
b"",
|
||||
b"image/gif",
|
||||
b".gif",
|
||||
|
@ -204,7 +203,7 @@ empty_file = _TestImage(
|
|||
unable_to_thumbnail=True,
|
||||
)
|
||||
|
||||
SVG = _TestImage(
|
||||
SVG = TestImage(
|
||||
b"""<?xml version="1.0"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN"
|
||||
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
|
@ -227,19 +226,15 @@ test_images = [
|
|||
empty_file,
|
||||
SVG,
|
||||
]
|
||||
urls = [
|
||||
"_matrix/media/r0/thumbnail",
|
||||
"_matrix/client/unstable/org.matrix.msc3916/media/thumbnail",
|
||||
]
|
||||
input_values = [(x,) for x in test_images]
|
||||
|
||||
|
||||
@parameterized_class(("test_image", "url"), itertools.product(test_images, urls))
|
||||
@parameterized_class(("test_image",), input_values)
|
||||
class MediaRepoTests(unittest.HomeserverTestCase):
|
||||
servlets = [media.register_servlets]
|
||||
test_image: ClassVar[_TestImage]
|
||||
test_image: ClassVar[TestImage]
|
||||
hijack_auth = True
|
||||
user_id = "@test:user"
|
||||
url: ClassVar[str]
|
||||
|
||||
def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
|
||||
self.fetches: List[
|
||||
|
@ -304,7 +299,6 @@ class MediaRepoTests(unittest.HomeserverTestCase):
|
|||
"config": {"directory": self.storage_path},
|
||||
}
|
||||
config["media_storage_providers"] = [provider_config]
|
||||
config["experimental_features"] = {"msc3916_authenticated_media_enabled": True}
|
||||
|
||||
hs = self.setup_test_homeserver(config=config, federation_http_client=client)
|
||||
|
||||
|
@ -509,7 +503,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
|
|||
params = "?width=32&height=32&method=scale"
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
f"/{self.url}/{self.media_id}{params}",
|
||||
f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
|
||||
shorthand=False,
|
||||
await_result=False,
|
||||
)
|
||||
|
@ -537,7 +531,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
|
|||
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
f"/{self.url}/{self.media_id}{params}",
|
||||
f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
|
||||
shorthand=False,
|
||||
await_result=False,
|
||||
)
|
||||
|
@ -573,7 +567,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
|
|||
params = "?width=32&height=32&method=" + method
|
||||
channel = self.make_request(
|
||||
"GET",
|
||||
f"/{self.url}/{self.media_id}{params}",
|
||||
f"/_matrix/media/r0/thumbnail/{self.media_id}{params}",
|
||||
shorthand=False,
|
||||
await_result=False,
|
||||
)
|
||||
|
@ -608,7 +602,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
|
|||
channel.json_body,
|
||||
{
|
||||
"errcode": "M_UNKNOWN",
|
||||
"error": f"Cannot find any thumbnails for the requested media ('/{self.url}/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
|
||||
"error": "Cannot find any thumbnails for the requested media ('/_matrix/media/r0/thumbnail/example.com/12345'). This might mean the media is not a supported_media_format=(image/jpeg, image/jpg, image/webp, image/gif, image/png) or that thumbnailing failed for some other reason. (Dynamic thumbnails are disabled on this server.)",
|
||||
},
|
||||
)
|
||||
else:
|
||||
|
@ -618,7 +612,7 @@ class MediaRepoTests(unittest.HomeserverTestCase):
|
|||
channel.json_body,
|
||||
{
|
||||
"errcode": "M_NOT_FOUND",
|
||||
"error": f"Not found '/{self.url}/example.com/12345'",
|
||||
"error": "Not found '/_matrix/media/r0/thumbnail/example.com/12345'",
|
||||
},
|
||||
)
|
||||
|
||||
|
|
|
@ -26,7 +26,8 @@ from twisted.test.proto_helpers import MemoryReactor
|
|||
import synapse.rest.admin
|
||||
from synapse.logging.context import make_deferred_yieldable
|
||||
from synapse.push import PusherConfig, PusherConfigException
|
||||
from synapse.rest.client import login, push_rule, pusher, receipts, room
|
||||
from synapse.rest.admin.experimental_features import ExperimentalFeature
|
||||
from synapse.rest.client import login, push_rule, pusher, receipts, room, versions
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict
|
||||
from synapse.util import Clock
|
||||
|
@ -42,6 +43,7 @@ class HTTPPusherTests(HomeserverTestCase):
|
|||
receipts.register_servlets,
|
||||
push_rule.register_servlets,
|
||||
pusher.register_servlets,
|
||||
versions.register_servlets,
|
||||
]
|
||||
user_id = True
|
||||
hijack_auth = False
|
||||
|
@ -969,6 +971,84 @@ class HTTPPusherTests(HomeserverTestCase):
|
|||
lookup_result.device_id,
|
||||
)
|
||||
|
||||
def test_device_id_feature_flag(self) -> None:
|
||||
"""Tests that a pusher created with a given device ID shows that device ID in
|
||||
GET /pushers requests when feature is enabled for the user
|
||||
"""
|
||||
user_id = self.register_user("user", "pass")
|
||||
access_token = self.login("user", "pass")
|
||||
|
||||
# We create the pusher with an HTTP request rather than with
|
||||
# _make_user_with_pusher so that we can test the device ID is correctly set when
|
||||
# creating a pusher via an API call.
|
||||
self.make_request(
|
||||
method="POST",
|
||||
path="/pushers/set",
|
||||
content={
|
||||
"kind": "http",
|
||||
"app_id": "m.http",
|
||||
"app_display_name": "HTTP Push Notifications",
|
||||
"device_display_name": "pushy push",
|
||||
"pushkey": "a@example.com",
|
||||
"lang": "en",
|
||||
"data": {"url": "http://example.com/_matrix/push/v1/notify"},
|
||||
},
|
||||
access_token=access_token,
|
||||
)
|
||||
|
||||
# Look up the user info for the access token so we can compare the device ID.
|
||||
store = self.hs.get_datastores().main
|
||||
lookup_result = self.get_success(store.get_user_by_access_token(access_token))
|
||||
assert lookup_result is not None
|
||||
|
||||
# Check field is not there before we enable the feature flag
|
||||
channel = self.make_request("GET", "/pushers", access_token=access_token)
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertEqual(len(channel.json_body["pushers"]), 1)
|
||||
self.assertNotIn(
|
||||
"org.matrix.msc3881.device_id", channel.json_body["pushers"][0]
|
||||
)
|
||||
|
||||
self.get_success(
|
||||
store.set_features_for_user(user_id, {ExperimentalFeature.MSC3881: True})
|
||||
)
|
||||
|
||||
# Get the user's devices and check it has the correct device ID.
|
||||
channel = self.make_request("GET", "/pushers", access_token=access_token)
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertEqual(len(channel.json_body["pushers"]), 1)
|
||||
self.assertEqual(
|
||||
channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"],
|
||||
lookup_result.device_id,
|
||||
)
|
||||
|
||||
def test_msc3881_client_versions_flag(self) -> None:
|
||||
"""Tests that MSC3881 only appears in /versions if user has it enabled."""
|
||||
|
||||
user_id = self.register_user("user", "pass")
|
||||
access_token = self.login("user", "pass")
|
||||
|
||||
# Check feature is disabled in /versions
|
||||
channel = self.make_request(
|
||||
"GET", "/_matrix/client/versions", access_token=access_token
|
||||
)
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertFalse(channel.json_body["unstable_features"]["org.matrix.msc3881"])
|
||||
|
||||
# Enable feature for user
|
||||
self.get_success(
|
||||
self.hs.get_datastores().main.set_features_for_user(
|
||||
user_id, {ExperimentalFeature.MSC3881: True}
|
||||
)
|
||||
)
|
||||
|
||||
# Check feature is now enabled in /versions for user
|
||||
channel = self.make_request(
|
||||
"GET", "/_matrix/client/versions", access_token=access_token
|
||||
)
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertTrue(channel.json_body["unstable_features"]["org.matrix.msc3881"])
|
||||
|
||||
@override_config({"push": {"jitter_delay": "10s"}})
|
||||
def test_jitter(self) -> None:
|
||||
"""Tests that enabling jitter actually delays sending push."""
|
||||
|
|
|
@ -28,7 +28,7 @@ from twisted.web.http import HTTPChannel
|
|||
from twisted.web.server import Request
|
||||
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import login
|
||||
from synapse.rest.client import login, media
|
||||
from synapse.server import HomeServer
|
||||
from synapse.util import Clock
|
||||
|
||||
|
@ -255,6 +255,238 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
|
|||
return sum(len(files) for _, _, files in os.walk(path))
|
||||
|
||||
|
||||
class AuthenticatedMediaRepoShardTestCase(BaseMultiWorkerStreamTestCase):
|
||||
"""Checks running multiple media repos work correctly using autheticated media paths"""
|
||||
|
||||
servlets = [
|
||||
admin.register_servlets_for_client_rest_resource,
|
||||
login.register_servlets,
|
||||
media.register_servlets,
|
||||
]
|
||||
|
||||
file_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: text/plain\r\nContent-Disposition: inline; filename=test_upload\r\n\r\nfile_to_stream\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n"
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.user_id = self.register_user("user", "pass")
|
||||
self.access_token = self.login("user", "pass")
|
||||
|
||||
self.reactor.lookups["example.com"] = "1.2.3.4"
|
||||
|
||||
def default_config(self) -> dict:
|
||||
conf = super().default_config()
|
||||
conf["federation_custom_ca_list"] = [get_test_ca_cert_file()]
|
||||
return conf
|
||||
|
||||
def make_worker_hs(
|
||||
self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any
|
||||
) -> HomeServer:
|
||||
worker_hs = super().make_worker_hs(worker_app, extra_config, **kwargs)
|
||||
# Force the media paths onto the replication resource.
|
||||
worker_hs.get_media_repository_resource().register_servlets(
|
||||
self._hs_to_site[worker_hs].resource, worker_hs
|
||||
)
|
||||
return worker_hs
|
||||
|
||||
def _get_media_req(
|
||||
self, hs: HomeServer, target: str, media_id: str
|
||||
) -> Tuple[FakeChannel, Request]:
|
||||
"""Request some remote media from the given HS by calling the download
|
||||
API.
|
||||
|
||||
This then triggers an outbound request from the HS to the target.
|
||||
|
||||
Returns:
|
||||
The channel for the *client* request and the *outbound* request for
|
||||
the media which the caller should respond to.
|
||||
"""
|
||||
channel = make_request(
|
||||
self.reactor,
|
||||
self._hs_to_site[hs],
|
||||
"GET",
|
||||
f"/_matrix/client/v1/media/download/{target}/{media_id}",
|
||||
shorthand=False,
|
||||
access_token=self.access_token,
|
||||
await_result=False,
|
||||
)
|
||||
self.pump()
|
||||
|
||||
clients = self.reactor.tcpClients
|
||||
self.assertGreaterEqual(len(clients), 1)
|
||||
(host, port, client_factory, _timeout, _bindAddress) = clients.pop()
|
||||
|
||||
# build the test server
|
||||
server_factory = Factory.forProtocol(HTTPChannel)
|
||||
# Request.finish expects the factory to have a 'log' method.
|
||||
server_factory.log = _log_request
|
||||
|
||||
server_tls_protocol = wrap_server_factory_for_tls(
|
||||
server_factory, self.reactor, sanlist=[b"DNS:example.com"]
|
||||
).buildProtocol(None)
|
||||
|
||||
# now, tell the client protocol factory to build the client protocol (it will be a
|
||||
# _WrappingProtocol, around a TLSMemoryBIOProtocol, around an
|
||||
# HTTP11ClientProtocol) and wire the output of said protocol up to the server via
|
||||
# a FakeTransport.
|
||||
#
|
||||
# Normally this would be done by the TCP socket code in Twisted, but we are
|
||||
# stubbing that out here.
|
||||
client_protocol = client_factory.buildProtocol(None)
|
||||
client_protocol.makeConnection(
|
||||
FakeTransport(server_tls_protocol, self.reactor, client_protocol)
|
||||
)
|
||||
|
||||
# tell the server tls protocol to send its stuff back to the client, too
|
||||
server_tls_protocol.makeConnection(
|
||||
FakeTransport(client_protocol, self.reactor, server_tls_protocol)
|
||||
)
|
||||
|
||||
# fish the test server back out of the server-side TLS protocol.
|
||||
http_server: HTTPChannel = server_tls_protocol.wrappedProtocol
|
||||
|
||||
# give the reactor a pump to get the TLS juices flowing.
|
||||
self.reactor.pump((0.1,))
|
||||
|
||||
self.assertEqual(len(http_server.requests), 1)
|
||||
request = http_server.requests[0]
|
||||
|
||||
self.assertEqual(request.method, b"GET")
|
||||
self.assertEqual(
|
||||
request.path,
|
||||
f"/_matrix/federation/v1/media/download/{media_id}".encode(),
|
||||
)
|
||||
self.assertEqual(
|
||||
request.requestHeaders.getRawHeaders(b"host"), [target.encode("utf-8")]
|
||||
)
|
||||
|
||||
return channel, request
|
||||
|
||||
def test_basic(self) -> None:
|
||||
"""Test basic fetching of remote media from a single worker."""
|
||||
hs1 = self.make_worker_hs("synapse.app.generic_worker")
|
||||
|
||||
channel, request = self._get_media_req(hs1, "example.com:443", "ABC123")
|
||||
|
||||
request.setResponseCode(200)
|
||||
request.responseHeaders.setRawHeaders(
|
||||
b"Content-Type",
|
||||
["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
|
||||
)
|
||||
request.write(self.file_data)
|
||||
request.finish()
|
||||
|
||||
self.pump(0.1)
|
||||
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertEqual(channel.result["body"], b"file_to_stream")
|
||||
|
||||
def test_download_simple_file_race(self) -> None:
|
||||
"""Test that fetching remote media from two different processes at the
|
||||
same time works.
|
||||
"""
|
||||
hs1 = self.make_worker_hs("synapse.app.generic_worker")
|
||||
hs2 = self.make_worker_hs("synapse.app.generic_worker")
|
||||
|
||||
start_count = self._count_remote_media()
|
||||
|
||||
# Make two requests without responding to the outbound media requests.
|
||||
channel1, request1 = self._get_media_req(hs1, "example.com:443", "ABC123")
|
||||
channel2, request2 = self._get_media_req(hs2, "example.com:443", "ABC123")
|
||||
|
||||
# Respond to the first outbound media request and check that the client
|
||||
# request is successful
|
||||
request1.setResponseCode(200)
|
||||
request1.responseHeaders.setRawHeaders(
|
||||
b"Content-Type",
|
||||
["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
|
||||
)
|
||||
request1.write(self.file_data)
|
||||
request1.finish()
|
||||
|
||||
self.pump(0.1)
|
||||
|
||||
self.assertEqual(channel1.code, 200, channel1.result["body"])
|
||||
self.assertEqual(channel1.result["body"], b"file_to_stream")
|
||||
|
||||
# Now respond to the second with the same content.
|
||||
request2.setResponseCode(200)
|
||||
request2.responseHeaders.setRawHeaders(
|
||||
b"Content-Type",
|
||||
["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
|
||||
)
|
||||
request2.write(self.file_data)
|
||||
request2.finish()
|
||||
|
||||
self.pump(0.1)
|
||||
|
||||
self.assertEqual(channel2.code, 200, channel2.result["body"])
|
||||
self.assertEqual(channel2.result["body"], b"file_to_stream")
|
||||
|
||||
# We expect only one new file to have been persisted.
|
||||
self.assertEqual(start_count + 1, self._count_remote_media())
|
||||
|
||||
def test_download_image_race(self) -> None:
|
||||
"""Test that fetching remote *images* from two different processes at
|
||||
the same time works.
|
||||
|
||||
This checks that races generating thumbnails are handled correctly.
|
||||
"""
|
||||
hs1 = self.make_worker_hs("synapse.app.generic_worker")
|
||||
hs2 = self.make_worker_hs("synapse.app.generic_worker")
|
||||
|
||||
start_count = self._count_remote_thumbnails()
|
||||
|
||||
channel1, request1 = self._get_media_req(hs1, "example.com:443", "PIC1")
|
||||
channel2, request2 = self._get_media_req(hs2, "example.com:443", "PIC1")
|
||||
|
||||
request1.setResponseCode(200)
|
||||
request1.responseHeaders.setRawHeaders(
|
||||
b"Content-Type",
|
||||
["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
|
||||
)
|
||||
img_data = b"\r\n\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: application/json\r\n\r\n{}\r\n--6067d4698f8d40a0a794ea7d7379d53a\r\nContent-Type: image/png\r\nContent-Disposition: inline; filename=test_img\r\n\r\n"
|
||||
request1.write(img_data)
|
||||
request1.write(SMALL_PNG)
|
||||
request1.write(b"\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n")
|
||||
request1.finish()
|
||||
|
||||
self.pump(0.1)
|
||||
|
||||
self.assertEqual(channel1.code, 200, channel1.result["body"])
|
||||
self.assertEqual(channel1.result["body"], SMALL_PNG)
|
||||
|
||||
request2.setResponseCode(200)
|
||||
request2.responseHeaders.setRawHeaders(
|
||||
b"Content-Type",
|
||||
["multipart/mixed; boundary=6067d4698f8d40a0a794ea7d7379d53a"],
|
||||
)
|
||||
request2.write(img_data)
|
||||
request2.write(SMALL_PNG)
|
||||
request2.write(b"\r\n--6067d4698f8d40a0a794ea7d7379d53a--\r\n\r\n")
|
||||
request2.finish()
|
||||
|
||||
self.pump(0.1)
|
||||
|
||||
self.assertEqual(channel2.code, 200, channel2.result["body"])
|
||||
self.assertEqual(channel2.result["body"], SMALL_PNG)
|
||||
|
||||
# We expect only three new thumbnails to have been persisted.
|
||||
self.assertEqual(start_count + 3, self._count_remote_thumbnails())
|
||||
|
||||
def _count_remote_media(self) -> int:
|
||||
"""Count the number of files in our remote media directory."""
|
||||
path = os.path.join(
|
||||
self.hs.get_media_repository().primary_base_path, "remote_content"
|
||||
)
|
||||
return sum(len(files) for _, _, files in os.walk(path))
|
||||
|
||||
def _count_remote_thumbnails(self) -> int:
|
||||
"""Count the number of files in our remote thumbnails directory."""
|
||||
path = os.path.join(
|
||||
self.hs.get_media_repository().primary_base_path, "remote_thumbnail"
|
||||
)
|
||||
return sum(len(files) for _, _, files in os.walk(path))
|
||||
|
||||
|
||||
def _log_request(request: Request) -> None:
|
||||
"""Implements Factory.log, which is expected by Request.finish"""
|
||||
logger.info("Completed request %s", request)
|
||||
|
|
|
@ -384,7 +384,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
|
|||
"PUT",
|
||||
url,
|
||||
content={
|
||||
"features": {"msc3026": True, "msc3881": True},
|
||||
"features": {"msc3881": True},
|
||||
},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
|
@ -399,10 +399,6 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
|
|||
access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200)
|
||||
self.assertEqual(
|
||||
True,
|
||||
channel.json_body["features"]["msc3026"],
|
||||
)
|
||||
self.assertEqual(
|
||||
True,
|
||||
channel.json_body["features"]["msc3881"],
|
||||
|
@ -413,7 +409,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
|
|||
channel = self.make_request(
|
||||
"PUT",
|
||||
url,
|
||||
content={"features": {"msc3026": False}},
|
||||
content={"features": {"msc3881": False}},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200)
|
||||
|
@ -429,10 +425,6 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
|
|||
self.assertEqual(channel.code, 200)
|
||||
self.assertEqual(
|
||||
False,
|
||||
channel.json_body["features"]["msc3026"],
|
||||
)
|
||||
self.assertEqual(
|
||||
True,
|
||||
channel.json_body["features"]["msc3881"],
|
||||
)
|
||||
|
||||
|
@ -441,7 +433,7 @@ class ExperimentalFeaturesTestCase(unittest.HomeserverTestCase):
|
|||
channel = self.make_request(
|
||||
"PUT",
|
||||
url,
|
||||
content={"features": {"msc3026": False}},
|
||||
content={"features": {"msc3881": False}},
|
||||
access_token=self.admin_user_tok,
|
||||
)
|
||||
self.assertEqual(channel.code, 200)
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -261,9 +261,9 @@ class RestHelper:
|
|||
targ: str,
|
||||
expect_code: int = HTTPStatus.OK,
|
||||
tok: Optional[str] = None,
|
||||
) -> None:
|
||||
) -> JsonDict:
|
||||
"""A convenience helper: `change_membership` with `membership` preset to "ban"."""
|
||||
self.change_membership(
|
||||
return self.change_membership(
|
||||
room=room,
|
||||
src=src,
|
||||
targ=targ,
|
||||
|
|
|
@ -946,7 +946,7 @@ def connect_client(
|
|||
|
||||
|
||||
class TestHomeServer(HomeServer):
|
||||
DATASTORE_CLASS = DataStore # type: ignore[assignment]
|
||||
DATASTORE_CLASS = DataStore
|
||||
|
||||
|
||||
def setup_test_homeserver(
|
||||
|
|
|
@ -21,20 +21,32 @@
|
|||
|
||||
import logging
|
||||
from typing import List, Tuple
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
from immutabledict import immutabledict
|
||||
|
||||
from twisted.test.proto_helpers import MemoryReactor
|
||||
|
||||
from synapse.api.constants import Direction, EventTypes, RelationTypes
|
||||
from synapse.api.constants import Direction, EventTypes, Membership, RelationTypes
|
||||
from synapse.api.filtering import Filter
|
||||
from synapse.crypto.event_signing import add_hashes_and_signatures
|
||||
from synapse.events import FrozenEventV3
|
||||
from synapse.federation.federation_client import SendJoinResult
|
||||
from synapse.rest import admin
|
||||
from synapse.rest.client import login, room
|
||||
from synapse.server import HomeServer
|
||||
from synapse.types import JsonDict, PersistedEventPosition, RoomStreamToken
|
||||
from synapse.storage.databases.main.stream import CurrentStateDeltaMembership
|
||||
from synapse.types import (
|
||||
JsonDict,
|
||||
PersistedEventPosition,
|
||||
RoomStreamToken,
|
||||
UserID,
|
||||
create_requester,
|
||||
)
|
||||
from synapse.util import Clock
|
||||
|
||||
from tests.unittest import HomeserverTestCase
|
||||
from tests.test_utils.event_injection import create_event
|
||||
from tests.unittest import FederatingHomeserverTestCase, HomeserverTestCase
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -543,3 +555,859 @@ class GetLastEventInRoomBeforeStreamOrderingTestCase(HomeserverTestCase):
|
|||
}
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class GetCurrentStateDeltaMembershipChangesForUserTestCase(HomeserverTestCase):
|
||||
"""
|
||||
Test `get_current_state_delta_membership_changes_for_user(...)`
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
admin.register_servlets,
|
||||
room.register_servlets,
|
||||
login.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.store = hs.get_datastores().main
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.state_handler = self.hs.get_state_handler()
|
||||
persistence = hs.get_storage_controllers().persistence
|
||||
assert persistence is not None
|
||||
self.persistence = persistence
|
||||
|
||||
def test_returns_membership_events(self) -> None:
|
||||
"""
|
||||
A basic test that a membership event in the token range is returned for the user.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
before_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
join_response = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
join_pos = self.get_success(
|
||||
self.store.get_position_for_event(join_response["event_id"])
|
||||
)
|
||||
|
||||
after_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_room1_token.room_key,
|
||||
to_key=after_room1_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=join_response["event_id"],
|
||||
event_pos=join_pos,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
def test_server_left_room_after_us(self) -> None:
|
||||
"""
|
||||
Test that when probing over part of the DAG where the server left the room *after
|
||||
us*, we still see the join and leave changes.
|
||||
|
||||
This is to make sure we play nicely with this behavior: When the server leaves a
|
||||
room, it will insert new rows with `event_id = null` into the
|
||||
`current_state_delta_stream` table for all current state.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
before_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"power_level_content_override": {
|
||||
"users": {
|
||||
user2_id: 100,
|
||||
# Allow user1 to send state in the room
|
||||
user1_id: 100,
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
join_pos1 = self.get_success(
|
||||
self.store.get_position_for_event(join_response1["event_id"])
|
||||
)
|
||||
# Make sure that random other non-member state that happens to have a `state_key`
|
||||
# matching the user ID doesn't mess with things.
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="foobarbazdummy",
|
||||
state_key=user1_id,
|
||||
body={"foo": "bar"},
|
||||
tok=user1_tok,
|
||||
)
|
||||
# User1 should leave the room first
|
||||
leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
|
||||
leave_pos1 = self.get_success(
|
||||
self.store.get_position_for_event(leave_response1["event_id"])
|
||||
)
|
||||
|
||||
# User2 should also leave the room (everyone has left the room which means the
|
||||
# server is no longer in the room).
|
||||
self.helper.leave(room_id1, user2_id, tok=user2_tok)
|
||||
|
||||
after_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
# Get the membership changes for the user.
|
||||
#
|
||||
# At this point, the `current_state_delta_stream` table should look like the
|
||||
# following. When the server leaves a room, it will insert new rows with
|
||||
# `event_id = null` for all current state.
|
||||
#
|
||||
# | stream_id | room_id | type | state_key | event_id | prev_event_id |
|
||||
# |-----------|----------|-----------------------------|----------------|----------|---------------|
|
||||
# | 2 | !x:test | 'm.room.create' | '' | $xxx | None |
|
||||
# | 3 | !x:test | 'm.room.member' | '@user2:test' | $aaa | None |
|
||||
# | 4 | !x:test | 'm.room.history_visibility' | '' | $xxx | None |
|
||||
# | 4 | !x:test | 'm.room.join_rules' | '' | $xxx | None |
|
||||
# | 4 | !x:test | 'm.room.power_levels' | '' | $xxx | None |
|
||||
# | 7 | !x:test | 'm.room.member' | '@user1:test' | $ooo | None |
|
||||
# | 8 | !x:test | 'foobarbazdummy' | '@user1:test' | $xxx | None |
|
||||
# | 9 | !x:test | 'm.room.member' | '@user1:test' | $ppp | $ooo |
|
||||
# | 10 | !x:test | 'foobarbazdummy' | '@user1:test' | None | $xxx |
|
||||
# | 10 | !x:test | 'm.room.create' | '' | None | $xxx |
|
||||
# | 10 | !x:test | 'm.room.history_visibility' | '' | None | $xxx |
|
||||
# | 10 | !x:test | 'm.room.join_rules' | '' | None | $xxx |
|
||||
# | 10 | !x:test | 'm.room.member' | '@user1:test' | None | $ppp |
|
||||
# | 10 | !x:test | 'm.room.member' | '@user2:test' | None | $aaa |
|
||||
# | 10 | !x:test | 'm.room.power_levels' | | None | $xxx |
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_room1_token.room_key,
|
||||
to_key=after_room1_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=join_response1["event_id"],
|
||||
event_pos=join_pos1,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
),
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=leave_response1["event_id"],
|
||||
event_pos=leave_pos1,
|
||||
membership="leave",
|
||||
sender=user1_id,
|
||||
prev_event_id=join_response1["event_id"],
|
||||
prev_event_pos=join_pos1,
|
||||
prev_membership="join",
|
||||
prev_sender=user1_id,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def test_server_left_room_after_us_later(self) -> None:
|
||||
"""
|
||||
Test when the user leaves the room, then sometime later, everyone else leaves
|
||||
the room, causing the server to leave the room, we shouldn't see any membership
|
||||
changes.
|
||||
|
||||
This is to make sure we play nicely with this behavior: When the server leaves a
|
||||
room, it will insert new rows with `event_id = null` into the
|
||||
`current_state_delta_stream` table for all current state.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
# User1 should leave the room first
|
||||
self.helper.leave(room_id1, user1_id, tok=user1_tok)
|
||||
|
||||
after_user1_leave_token = self.event_sources.get_current_token()
|
||||
|
||||
# User2 should also leave the room (everyone has left the room which means the
|
||||
# server is no longer in the room).
|
||||
self.helper.leave(room_id1, user2_id, tok=user2_tok)
|
||||
|
||||
after_server_leave_token = self.event_sources.get_current_token()
|
||||
|
||||
# Join another room as user1 just to advance the stream_ordering and bust
|
||||
# `_membership_stream_cache`
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
|
||||
# Get the membership changes for the user.
|
||||
#
|
||||
# At this point, the `current_state_delta_stream` table should look like the
|
||||
# following. When the server leaves a room, it will insert new rows with
|
||||
# `event_id = null` for all current state.
|
||||
#
|
||||
# TODO: Add DB rows to better see what's going on.
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=after_user1_leave_token.room_key,
|
||||
to_key=after_server_leave_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[],
|
||||
)
|
||||
|
||||
def test_we_cause_server_left_room(self) -> None:
|
||||
"""
|
||||
Test that when probing over part of the DAG where the user leaves the room
|
||||
causing the server to leave the room (because we were the last local user in the
|
||||
room), we still see the join and leave changes.
|
||||
|
||||
This is to make sure we play nicely with this behavior: When the server leaves a
|
||||
room, it will insert new rows with `event_id = null` into the
|
||||
`current_state_delta_stream` table for all current state.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
before_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
room_id1 = self.helper.create_room_as(
|
||||
user2_id,
|
||||
tok=user2_tok,
|
||||
extra_content={
|
||||
"power_level_content_override": {
|
||||
"users": {
|
||||
user2_id: 100,
|
||||
# Allow user1 to send state in the room
|
||||
user1_id: 100,
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
join_pos1 = self.get_success(
|
||||
self.store.get_position_for_event(join_response1["event_id"])
|
||||
)
|
||||
# Make sure that random other non-member state that happens to have a `state_key`
|
||||
# matching the user ID doesn't mess with things.
|
||||
self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="foobarbazdummy",
|
||||
state_key=user1_id,
|
||||
body={"foo": "bar"},
|
||||
tok=user1_tok,
|
||||
)
|
||||
|
||||
# User2 should leave the room first.
|
||||
self.helper.leave(room_id1, user2_id, tok=user2_tok)
|
||||
|
||||
# User1 (the person we're testing with) should also leave the room (everyone has
|
||||
# left the room which means the server is no longer in the room).
|
||||
leave_response1 = self.helper.leave(room_id1, user1_id, tok=user1_tok)
|
||||
leave_pos1 = self.get_success(
|
||||
self.store.get_position_for_event(leave_response1["event_id"])
|
||||
)
|
||||
|
||||
after_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
# Get the membership changes for the user.
|
||||
#
|
||||
# At this point, the `current_state_delta_stream` table should look like the
|
||||
# following. When the server leaves a room, it will insert new rows with
|
||||
# `event_id = null` for all current state.
|
||||
#
|
||||
# | stream_id | room_id | type | state_key | event_id | prev_event_id |
|
||||
# |-----------|-----------|-----------------------------|---------------|----------|---------------|
|
||||
# | 2 | '!x:test' | 'm.room.create' | '' | '$xxx' | None |
|
||||
# | 3 | '!x:test' | 'm.room.member' | '@user2:test' | '$aaa' | None |
|
||||
# | 4 | '!x:test' | 'm.room.history_visibility' | '' | '$xxx' | None |
|
||||
# | 4 | '!x:test' | 'm.room.join_rules' | '' | '$xxx' | None |
|
||||
# | 4 | '!x:test' | 'm.room.power_levels' | '' | '$xxx' | None |
|
||||
# | 7 | '!x:test' | 'm.room.member' | '@user1:test' | '$ooo' | None |
|
||||
# | 8 | '!x:test' | 'foobarbazdummy' | '@user1:test' | '$xxx' | None |
|
||||
# | 9 | '!x:test' | 'm.room.member' | '@user2:test' | '$bbb' | '$aaa' |
|
||||
# | 10 | '!x:test' | 'foobarbazdummy' | '@user1:test' | None | '$xxx' |
|
||||
# | 10 | '!x:test' | 'm.room.create' | '' | None | '$xxx' |
|
||||
# | 10 | '!x:test' | 'm.room.history_visibility' | '' | None | '$xxx' |
|
||||
# | 10 | '!x:test' | 'm.room.join_rules' | '' | None | '$xxx' |
|
||||
# | 10 | '!x:test' | 'm.room.member' | '@user1:test' | None | '$ooo' |
|
||||
# | 10 | '!x:test' | 'm.room.member' | '@user2:test' | None | '$bbb' |
|
||||
# | 10 | '!x:test' | 'm.room.power_levels' | '' | None | '$xxx' |
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_room1_token.room_key,
|
||||
to_key=after_room1_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=join_response1["event_id"],
|
||||
event_pos=join_pos1,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
),
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=None, # leave_response1["event_id"],
|
||||
event_pos=leave_pos1,
|
||||
membership="leave",
|
||||
sender=None, # user1_id,
|
||||
prev_event_id=join_response1["event_id"],
|
||||
prev_event_pos=join_pos1,
|
||||
prev_membership="join",
|
||||
prev_sender=user1_id,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def test_different_user_membership_persisted_in_same_batch(self) -> None:
|
||||
"""
|
||||
Test batch of membership events from different users being processed at once.
|
||||
This will result in all of the memberships being stored in the
|
||||
`current_state_delta_stream` table with the same `stream_ordering` even though
|
||||
the individual events have different `stream_ordering`s.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
_user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
user3_id = self.register_user("user3", "pass")
|
||||
_user3_tok = self.login(user3_id, "pass")
|
||||
user4_id = self.register_user("user4", "pass")
|
||||
_user4_tok = self.login(user4_id, "pass")
|
||||
|
||||
before_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
# User2 is just the designated person to create the room (we do this across the
|
||||
# tests to be consistent)
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
|
||||
# Persist the user1, user3, and user4 join events in the same batch so they all
|
||||
# end up in the `current_state_delta_stream` table with the same
|
||||
# stream_ordering.
|
||||
join_event3, join_event_context3 = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
sender=user3_id,
|
||||
type=EventTypes.Member,
|
||||
state_key=user3_id,
|
||||
content={"membership": "join"},
|
||||
room_id=room_id1,
|
||||
)
|
||||
)
|
||||
# We want to put user1 in the middle of the batch. This way, regardless of the
|
||||
# implementation that inserts rows into current_state_delta_stream` (whether it
|
||||
# be minimum/maximum of stream position of the batch), we will still catch bugs.
|
||||
join_event1, join_event_context1 = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
sender=user1_id,
|
||||
type=EventTypes.Member,
|
||||
state_key=user1_id,
|
||||
content={"membership": "join"},
|
||||
room_id=room_id1,
|
||||
)
|
||||
)
|
||||
join_event4, join_event_context4 = self.get_success(
|
||||
create_event(
|
||||
self.hs,
|
||||
sender=user4_id,
|
||||
type=EventTypes.Member,
|
||||
state_key=user4_id,
|
||||
content={"membership": "join"},
|
||||
room_id=room_id1,
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.persistence.persist_events(
|
||||
[
|
||||
(join_event3, join_event_context3),
|
||||
(join_event1, join_event_context1),
|
||||
(join_event4, join_event_context4),
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
after_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
# Get the membership changes for the user.
|
||||
#
|
||||
# At this point, the `current_state_delta_stream` table should look like (notice
|
||||
# those three memberships at the end with `stream_id=7` because we persisted
|
||||
# them in the same batch):
|
||||
#
|
||||
# | stream_id | room_id | type | state_key | event_id | prev_event_id |
|
||||
# |-----------|-----------|----------------------------|------------------|----------|---------------|
|
||||
# | 2 | '!x:test' | 'm.room.create' | '' | '$xxx' | None |
|
||||
# | 3 | '!x:test' | 'm.room.member' | '@user2:test' | '$xxx' | None |
|
||||
# | 4 | '!x:test' | 'm.room.history_visibility'| '' | '$xxx' | None |
|
||||
# | 4 | '!x:test' | 'm.room.join_rules' | '' | '$xxx' | None |
|
||||
# | 4 | '!x:test' | 'm.room.power_levels' | '' | '$xxx' | None |
|
||||
# | 7 | '!x:test' | 'm.room.member' | '@user3:test' | '$xxx' | None |
|
||||
# | 7 | '!x:test' | 'm.room.member' | '@user1:test' | '$xxx' | None |
|
||||
# | 7 | '!x:test' | 'm.room.member' | '@user4:test' | '$xxx' | None |
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_room1_token.room_key,
|
||||
to_key=after_room1_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
join_pos3 = self.get_success(
|
||||
self.store.get_position_for_event(join_event3.event_id)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=join_event1.event_id,
|
||||
# Ideally, this would be `join_pos1` (to match the `event_id`) but
|
||||
# when events are persisted in a batch, they are all stored in the
|
||||
# `current_state_delta_stream` table with the minimum
|
||||
# `stream_ordering` from the batch.
|
||||
event_pos=join_pos3,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def test_state_reset(self) -> None:
|
||||
"""
|
||||
Test a state reset scenario where the user gets removed from the room (when
|
||||
there is no corresponding leave event)
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
join_pos1 = self.get_success(
|
||||
self.store.get_position_for_event(join_response1["event_id"])
|
||||
)
|
||||
|
||||
before_reset_token = self.event_sources.get_current_token()
|
||||
|
||||
# Send another state event to make a position for the state reset to happen at
|
||||
dummy_state_response = self.helper.send_state(
|
||||
room_id1,
|
||||
event_type="foobarbaz",
|
||||
state_key="",
|
||||
body={"foo": "bar"},
|
||||
tok=user2_tok,
|
||||
)
|
||||
dummy_state_pos = self.get_success(
|
||||
self.store.get_position_for_event(dummy_state_response["event_id"])
|
||||
)
|
||||
|
||||
# Mock a state reset removing the membership for user1 in the current state
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_delete(
|
||||
table="current_state_events",
|
||||
keyvalues={
|
||||
"room_id": room_id1,
|
||||
"type": EventTypes.Member,
|
||||
"state_key": user1_id,
|
||||
},
|
||||
desc="state reset user in current_state_delta_stream",
|
||||
)
|
||||
)
|
||||
self.get_success(
|
||||
self.store.db_pool.simple_insert(
|
||||
table="current_state_delta_stream",
|
||||
values={
|
||||
"stream_id": dummy_state_pos.stream,
|
||||
"room_id": room_id1,
|
||||
"type": EventTypes.Member,
|
||||
"state_key": user1_id,
|
||||
"event_id": None,
|
||||
"prev_event_id": join_response1["event_id"],
|
||||
"instance_name": dummy_state_pos.instance_name,
|
||||
},
|
||||
desc="state reset user in current_state_delta_stream",
|
||||
)
|
||||
)
|
||||
|
||||
# Manually bust the cache since we we're just manually messing with the database
|
||||
# and not causing an actual state reset.
|
||||
self.store._membership_stream_cache.entity_has_changed(
|
||||
user1_id, dummy_state_pos.stream
|
||||
)
|
||||
|
||||
after_reset_token = self.event_sources.get_current_token()
|
||||
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_reset_token.room_key,
|
||||
to_key=after_reset_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=None,
|
||||
event_pos=dummy_state_pos,
|
||||
membership="leave",
|
||||
sender=None, # user1_id,
|
||||
prev_event_id=join_response1["event_id"],
|
||||
prev_event_pos=join_pos1,
|
||||
prev_membership="join",
|
||||
prev_sender=user1_id,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
def test_excluded_room_ids(self) -> None:
|
||||
"""
|
||||
Test that the `excluded_room_ids` option excludes changes from the specified rooms.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
user1_tok = self.login(user1_id, "pass")
|
||||
user2_id = self.register_user("user2", "pass")
|
||||
user2_tok = self.login(user2_id, "pass")
|
||||
|
||||
before_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
join_response1 = self.helper.join(room_id1, user1_id, tok=user1_tok)
|
||||
join_pos1 = self.get_success(
|
||||
self.store.get_position_for_event(join_response1["event_id"])
|
||||
)
|
||||
|
||||
room_id2 = self.helper.create_room_as(user2_id, tok=user2_tok)
|
||||
join_response2 = self.helper.join(room_id2, user1_id, tok=user1_tok)
|
||||
join_pos2 = self.get_success(
|
||||
self.store.get_position_for_event(join_response2["event_id"])
|
||||
)
|
||||
|
||||
after_room1_token = self.event_sources.get_current_token()
|
||||
|
||||
# First test the the room is returned without the `excluded_room_ids` option
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_room1_token.room_key,
|
||||
to_key=after_room1_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=join_response1["event_id"],
|
||||
event_pos=join_pos1,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
),
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id2,
|
||||
event_id=join_response2["event_id"],
|
||||
event_pos=join_pos2,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# The test that `excluded_room_ids` excludes room2 as expected
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_room1_token.room_key,
|
||||
to_key=after_room1_token.room_key,
|
||||
excluded_room_ids=[room_id2],
|
||||
)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=room_id1,
|
||||
event_id=join_response1["event_id"],
|
||||
event_pos=join_pos1,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
)
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class GetCurrentStateDeltaMembershipChangesForUserFederationTestCase(
|
||||
FederatingHomeserverTestCase
|
||||
):
|
||||
"""
|
||||
Test `get_current_state_delta_membership_changes_for_user(...)` when joining remote federated rooms.
|
||||
"""
|
||||
|
||||
servlets = [
|
||||
admin.register_servlets_for_client_rest_resource,
|
||||
room.register_servlets,
|
||||
login.register_servlets,
|
||||
]
|
||||
|
||||
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
|
||||
self.sliding_sync_handler = self.hs.get_sliding_sync_handler()
|
||||
self.store = self.hs.get_datastores().main
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.room_member_handler = hs.get_room_member_handler()
|
||||
|
||||
def test_remote_join(self) -> None:
|
||||
"""
|
||||
Test remote join where the first rows in `current_state_delta_stream` will just
|
||||
be the state when you joined the remote room.
|
||||
"""
|
||||
user1_id = self.register_user("user1", "pass")
|
||||
_user1_tok = self.login(user1_id, "pass")
|
||||
|
||||
before_join_token = self.event_sources.get_current_token()
|
||||
|
||||
intially_unjoined_room_id = f"!example:{self.OTHER_SERVER_NAME}"
|
||||
|
||||
# Remotely join a room on another homeserver.
|
||||
#
|
||||
# To do this we have to mock the responses from the remote homeserver. We also
|
||||
# patch out a bunch of event checks on our end.
|
||||
create_event_source = {
|
||||
"auth_events": [],
|
||||
"content": {
|
||||
"creator": f"@creator:{self.OTHER_SERVER_NAME}",
|
||||
"room_version": self.hs.config.server.default_room_version.identifier,
|
||||
},
|
||||
"depth": 0,
|
||||
"origin_server_ts": 0,
|
||||
"prev_events": [],
|
||||
"room_id": intially_unjoined_room_id,
|
||||
"sender": f"@creator:{self.OTHER_SERVER_NAME}",
|
||||
"state_key": "",
|
||||
"type": EventTypes.Create,
|
||||
}
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
create_event_source,
|
||||
self.hs.config.server.default_room_version,
|
||||
)
|
||||
create_event = FrozenEventV3(
|
||||
create_event_source,
|
||||
self.hs.config.server.default_room_version,
|
||||
{},
|
||||
None,
|
||||
)
|
||||
creator_join_event_source = {
|
||||
"auth_events": [create_event.event_id],
|
||||
"content": {
|
||||
"membership": "join",
|
||||
},
|
||||
"depth": 1,
|
||||
"origin_server_ts": 1,
|
||||
"prev_events": [],
|
||||
"room_id": intially_unjoined_room_id,
|
||||
"sender": f"@creator:{self.OTHER_SERVER_NAME}",
|
||||
"state_key": f"@creator:{self.OTHER_SERVER_NAME}",
|
||||
"type": EventTypes.Member,
|
||||
}
|
||||
self.add_hashes_and_signatures_from_other_server(
|
||||
creator_join_event_source,
|
||||
self.hs.config.server.default_room_version,
|
||||
)
|
||||
creator_join_event = FrozenEventV3(
|
||||
creator_join_event_source,
|
||||
self.hs.config.server.default_room_version,
|
||||
{},
|
||||
None,
|
||||
)
|
||||
|
||||
# Our local user is going to remote join the room
|
||||
join_event_source = {
|
||||
"auth_events": [create_event.event_id],
|
||||
"content": {"membership": "join"},
|
||||
"depth": 1,
|
||||
"origin_server_ts": 100,
|
||||
"prev_events": [creator_join_event.event_id],
|
||||
"sender": user1_id,
|
||||
"state_key": user1_id,
|
||||
"room_id": intially_unjoined_room_id,
|
||||
"type": EventTypes.Member,
|
||||
}
|
||||
add_hashes_and_signatures(
|
||||
self.hs.config.server.default_room_version,
|
||||
join_event_source,
|
||||
self.hs.hostname,
|
||||
self.hs.signing_key,
|
||||
)
|
||||
join_event = FrozenEventV3(
|
||||
join_event_source,
|
||||
self.hs.config.server.default_room_version,
|
||||
{},
|
||||
None,
|
||||
)
|
||||
|
||||
mock_make_membership_event = AsyncMock(
|
||||
return_value=(
|
||||
self.OTHER_SERVER_NAME,
|
||||
join_event,
|
||||
self.hs.config.server.default_room_version,
|
||||
)
|
||||
)
|
||||
mock_send_join = AsyncMock(
|
||||
return_value=SendJoinResult(
|
||||
join_event,
|
||||
self.OTHER_SERVER_NAME,
|
||||
state=[create_event, creator_join_event],
|
||||
auth_chain=[create_event, creator_join_event],
|
||||
partial_state=False,
|
||||
servers_in_room=frozenset(),
|
||||
)
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
self.room_member_handler.federation_handler.federation_client,
|
||||
"make_membership_event",
|
||||
mock_make_membership_event,
|
||||
), patch.object(
|
||||
self.room_member_handler.federation_handler.federation_client,
|
||||
"send_join",
|
||||
mock_send_join,
|
||||
), patch(
|
||||
"synapse.event_auth._is_membership_change_allowed",
|
||||
return_value=None,
|
||||
), patch(
|
||||
"synapse.handlers.federation_event.check_state_dependent_auth_rules",
|
||||
return_value=None,
|
||||
):
|
||||
self.get_success(
|
||||
self.room_member_handler.update_membership(
|
||||
requester=create_requester(user1_id),
|
||||
target=UserID.from_string(user1_id),
|
||||
room_id=intially_unjoined_room_id,
|
||||
action=Membership.JOIN,
|
||||
remote_room_hosts=[self.OTHER_SERVER_NAME],
|
||||
)
|
||||
)
|
||||
|
||||
after_join_token = self.event_sources.get_current_token()
|
||||
|
||||
# Get the membership changes for the user.
|
||||
#
|
||||
# At this point, the `current_state_delta_stream` table should look like the
|
||||
# following. Notice that all of the events are at the same `stream_id` because
|
||||
# the current state starts out where we remotely joined:
|
||||
#
|
||||
# | stream_id | room_id | type | state_key | event_id | prev_event_id |
|
||||
# |-----------|------------------------------|-----------------|------------------------------|----------|---------------|
|
||||
# | 2 | '!example:other.example.com' | 'm.room.member' | '@user1:test' | '$xxx' | None |
|
||||
# | 2 | '!example:other.example.com' | 'm.room.create' | '' | '$xxx' | None |
|
||||
# | 2 | '!example:other.example.com' | 'm.room.member' | '@creator:other.example.com' | '$xxx' | None |
|
||||
membership_changes = self.get_success(
|
||||
self.store.get_current_state_delta_membership_changes_for_user(
|
||||
user1_id,
|
||||
from_key=before_join_token.room_key,
|
||||
to_key=after_join_token.room_key,
|
||||
)
|
||||
)
|
||||
|
||||
join_pos = self.get_success(
|
||||
self.store.get_position_for_event(join_event.event_id)
|
||||
)
|
||||
|
||||
# Let the whole diff show on failure
|
||||
self.maxDiff = None
|
||||
self.assertEqual(
|
||||
membership_changes,
|
||||
[
|
||||
CurrentStateDeltaMembership(
|
||||
room_id=intially_unjoined_room_id,
|
||||
event_id=join_event.event_id,
|
||||
event_pos=join_pos,
|
||||
membership="join",
|
||||
sender=user1_id,
|
||||
prev_event_id=None,
|
||||
prev_event_pos=None,
|
||||
prev_membership=None,
|
||||
prev_sender=None,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
|
|
@ -125,13 +125,15 @@ async def mark_event_as_partial_state(
|
|||
in this table).
|
||||
"""
|
||||
store = hs.get_datastores().main
|
||||
await store.db_pool.simple_upsert(
|
||||
table="partial_state_rooms",
|
||||
keyvalues={"room_id": room_id},
|
||||
values={},
|
||||
insertion_values={"room_id": room_id},
|
||||
# Use the store helper to insert into the database so the caches are busted
|
||||
await store.store_partial_state_room(
|
||||
room_id=room_id,
|
||||
servers={hs.hostname},
|
||||
device_lists_stream_id=0,
|
||||
joined_via=hs.hostname,
|
||||
)
|
||||
|
||||
# FIXME: Bust the cache
|
||||
await store.db_pool.simple_insert(
|
||||
table="partial_state_events",
|
||||
values={
|
||||
|
|
Loading…
Reference in a new issue