mirror of
https://github.com/element-hq/synapse.git
synced 2024-12-19 09:31:35 +03:00
Merge remote-tracking branch 'origin/develop' into matrix-org-hotfixes
This commit is contained in:
commit
d9db944600
30 changed files with 210 additions and 63 deletions
2
.github/workflows/docs-pr-netlify.yaml
vendored
2
.github/workflows/docs-pr-netlify.yaml
vendored
|
@ -14,7 +14,7 @@ jobs:
|
||||||
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
# There's a 'download artifact' action, but it hasn't been updated for the workflow_run action
|
||||||
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
# (https://github.com/actions/download-artifact/issues/60) so instead we get this mess:
|
||||||
- name: 📥 Download artifact
|
- name: 📥 Download artifact
|
||||||
uses: dawidd6/action-download-artifact@e7466d1a7587ed14867642c2ca74b5bcc1e19a2d # v3.0.0
|
uses: dawidd6/action-download-artifact@72aaadce3bc708349fc665eee3785cbb1b6e51d0 # v3.1.1
|
||||||
with:
|
with:
|
||||||
workflow: docs-pr.yaml
|
workflow: docs-pr.yaml
|
||||||
run_id: ${{ github.event.workflow_run.id }}
|
run_id: ${{ github.event.workflow_run.id }}
|
||||||
|
|
2
.github/workflows/latest_deps.yml
vendored
2
.github/workflows/latest_deps.yml
vendored
|
@ -226,7 +226,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
|
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
|
|
2
.github/workflows/twisted_trunk.yml
vendored
2
.github/workflows/twisted_trunk.yml
vendored
|
@ -207,7 +207,7 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- uses: JasonEtco/create-an-issue@e27dddc79c92bc6e4562f268fffa5ed752639abd # v2.9.1
|
- uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -1,3 +1,10 @@
|
||||||
|
# Synapse 1.102.0 (2024-03-05)
|
||||||
|
|
||||||
|
### Bugfixes
|
||||||
|
|
||||||
|
- Revert https://github.com/element-hq/synapse/pull/16756, which caused incorrect notification counts on mobile clients since v1.100.0. ([\#16979](https://github.com/element-hq/synapse/issues/16979))
|
||||||
|
|
||||||
|
|
||||||
# Synapse 1.102.0rc1 (2024-02-20)
|
# Synapse 1.102.0rc1 (2024-02-20)
|
||||||
|
|
||||||
### Features
|
### Features
|
||||||
|
|
4
Cargo.lock
generated
4
Cargo.lock
generated
|
@ -13,9 +13,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "anyhow"
|
name = "anyhow"
|
||||||
version = "1.0.79"
|
version = "1.0.80"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca"
|
checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "arc-swap"
|
name = "arc-swap"
|
||||||
|
|
1
changelog.d/16768.doc
Normal file
1
changelog.d/16768.doc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Add HAProxy example for single port operation to reverse proxy documentation. Contributed by Georg Pfuetzenreuter (@tacerus).
|
1
changelog.d/16874.feature
Normal file
1
changelog.d/16874.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Add a new [List Accounts v3](https://element-hq.github.io/synapse/v1.103/admin_api/user_admin_api.html#list-accounts-v3) Admin API with improved deactivated user filtering capabilities.
|
1
changelog.d/16946.doc
Normal file
1
changelog.d/16946.doc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Improve the documentation around running Complement tests with new configuration parameters.
|
1
changelog.d/16947.feature
Normal file
1
changelog.d/16947.feature
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Include `Retry-After` header by default per [MSC4041](https://github.com/matrix-org/matrix-spec-proposals/pull/4041). Contributed by @clokep.
|
1
changelog.d/16951.doc
Normal file
1
changelog.d/16951.doc
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Add docs on upgrading from a very old version.
|
1
changelog.d/16973.bugfix
Normal file
1
changelog.d/16973.bugfix
Normal file
|
@ -0,0 +1 @@
|
||||||
|
Fix joining remote rooms when a module uses the `on_new_event` callback. This callback may now pass partial state events instead of the full state for remote rooms.
|
6
debian/changelog
vendored
6
debian/changelog
vendored
|
@ -1,3 +1,9 @@
|
||||||
|
matrix-synapse-py3 (1.102.0) stable; urgency=medium
|
||||||
|
|
||||||
|
* New Synapse release 1.102.0.
|
||||||
|
|
||||||
|
-- Synapse Packaging team <packages@matrix.org> Tue, 05 Mar 2024 14:47:03 +0000
|
||||||
|
|
||||||
matrix-synapse-py3 (1.102.0~rc1) stable; urgency=medium
|
matrix-synapse-py3 (1.102.0~rc1) stable; urgency=medium
|
||||||
|
|
||||||
* New Synapse release 1.102.0rc1.
|
* New Synapse release 1.102.0rc1.
|
||||||
|
|
|
@ -30,3 +30,14 @@ Consult `scripts-dev/complement.sh` in the repository root for a real example.
|
||||||
|
|
||||||
[complement]: https://github.com/matrix-org/complement
|
[complement]: https://github.com/matrix-org/complement
|
||||||
[complementEnv]: https://github.com/matrix-org/complement/pull/382
|
[complementEnv]: https://github.com/matrix-org/complement/pull/382
|
||||||
|
|
||||||
|
## How to modify homeserver.yaml for Complement tests
|
||||||
|
|
||||||
|
It's common for MSCs to be gated behind a feature flag like this:
|
||||||
|
```yaml
|
||||||
|
experimental_features:
|
||||||
|
faster_joins: true
|
||||||
|
```
|
||||||
|
To modify this for the Complement image, modify `./conf/workers-shared-extra.yaml.j2`. Despite the name,
|
||||||
|
this will affect non-worker mode as well. Remember to _rebuild_ the image (so don't use `-e` if using
|
||||||
|
`complement.sh`).
|
||||||
|
|
|
@ -164,6 +164,7 @@ Body parameters:
|
||||||
Other allowed options are: `bot` and `support`.
|
Other allowed options are: `bot` and `support`.
|
||||||
|
|
||||||
## List Accounts
|
## List Accounts
|
||||||
|
### List Accounts (V2)
|
||||||
|
|
||||||
This API returns all local user accounts.
|
This API returns all local user accounts.
|
||||||
By default, the response is ordered by ascending user ID.
|
By default, the response is ordered by ascending user ID.
|
||||||
|
@ -287,6 +288,19 @@ The following fields are returned in the JSON response body:
|
||||||
|
|
||||||
*Added in Synapse 1.93:* the `locked` query parameter and response field.
|
*Added in Synapse 1.93:* the `locked` query parameter and response field.
|
||||||
|
|
||||||
|
### List Accounts (V3)
|
||||||
|
|
||||||
|
This API returns all local user accounts (see v2). In contrast to v2, the query parameter `deactivated` is handled differently.
|
||||||
|
|
||||||
|
```
|
||||||
|
GET /_synapse/admin/v3/users
|
||||||
|
```
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
- `deactivated` - Optional flag to filter deactivated users. If `true`, only deactivated users are returned.
|
||||||
|
If `false`, deactivated users are excluded from the query. When the flag is absent (the default),
|
||||||
|
users are not filtered by deactivation status.
|
||||||
|
|
||||||
## Query current sessions for a user
|
## Query current sessions for a user
|
||||||
|
|
||||||
This API returns information about the active sessions for a specific user.
|
This API returns information about the active sessions for a specific user.
|
||||||
|
|
|
@ -142,6 +142,10 @@ Called after sending an event into a room. The module is passed the event, as we
|
||||||
as the state of the room _after_ the event. This means that if the event is a state event,
|
as the state of the room _after_ the event. This means that if the event is a state event,
|
||||||
it will be included in this state.
|
it will be included in this state.
|
||||||
|
|
||||||
|
The state map may not be complete if Synapse hasn't yet loaded the full state
|
||||||
|
of the room. This can happen for events in rooms that were just joined from
|
||||||
|
a remote server.
|
||||||
|
|
||||||
Note that this callback is called when the event has already been processed and stored
|
Note that this callback is called when the event has already been processed and stored
|
||||||
into the room, which means this callback cannot be used to deny persisting the event. To
|
into the room, which means this callback cannot be used to deny persisting the event. To
|
||||||
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
|
deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead.
|
||||||
|
|
|
@ -186,6 +186,25 @@ Example configuration, if using a UNIX socket. The configuration lines regarding
|
||||||
backend matrix
|
backend matrix
|
||||||
server matrix unix@/run/synapse/main_public.sock
|
server matrix unix@/run/synapse/main_public.sock
|
||||||
```
|
```
|
||||||
|
Example configuration when using a single port for both client and federation traffic.
|
||||||
|
```
|
||||||
|
frontend https
|
||||||
|
bind *:443,[::]:443 ssl crt /etc/ssl/haproxy/ strict-sni alpn h2,http/1.1
|
||||||
|
http-request set-header X-Forwarded-Proto https if { ssl_fc }
|
||||||
|
http-request set-header X-Forwarded-Proto http if !{ ssl_fc }
|
||||||
|
http-request set-header X-Forwarded-For %[src]
|
||||||
|
|
||||||
|
acl matrix-host hdr(host) -i matrix.example.com matrix.example.com:443
|
||||||
|
acl matrix-sni ssl_fc_sni matrix.example.com
|
||||||
|
acl matrix-path path_beg /_matrix
|
||||||
|
acl matrix-path path_beg /_synapse/client
|
||||||
|
|
||||||
|
use_backend matrix if matrix-host matrix-path
|
||||||
|
use_backend matrix if matrix-sni
|
||||||
|
|
||||||
|
backend matrix
|
||||||
|
server matrix 127.0.0.1:8008
|
||||||
|
```
|
||||||
|
|
||||||
[Delegation](delegate.md) example:
|
[Delegation](delegate.md) example:
|
||||||
```
|
```
|
||||||
|
|
|
@ -97,6 +97,26 @@ v1.61.0.
|
||||||
|
|
||||||
<!-- REPLACE_WITH_SCHEMA_VERSIONS -->
|
<!-- REPLACE_WITH_SCHEMA_VERSIONS -->
|
||||||
|
|
||||||
|
## Upgrading from a very old version
|
||||||
|
|
||||||
|
You need to read all of the upgrade notes for each version between your current
|
||||||
|
version and the latest so that you can update your dependencies, environment,
|
||||||
|
config files, etc. if necessary. But you do not need to perform an
|
||||||
|
upgrade to each individual version that was missed.
|
||||||
|
|
||||||
|
We do not have a list of which versions must be installed. Instead, we recommend
|
||||||
|
that you upgrade through each incompatible database schema version, which would
|
||||||
|
give you the ability to roll back the maximum number of versions should anything
|
||||||
|
go wrong. See [Rolling back to older versions](#rolling-back-to-older-versions)
|
||||||
|
above.
|
||||||
|
|
||||||
|
Additionally, new versions of Synapse will occasionally run database migrations
|
||||||
|
and background updates to update the database. Synapse will not start until
|
||||||
|
database migrations are complete. You should wait until background updates from
|
||||||
|
each upgrade are complete before moving on to the next upgrade, to avoid
|
||||||
|
stacking them up. You can monitor the currently running background updates with
|
||||||
|
[the Admin API](usage/administration/admin_api/background_updates.html#status).
|
||||||
|
|
||||||
# Upgrading to v1.100.0
|
# Upgrading to v1.100.0
|
||||||
|
|
||||||
## Minimum supported Rust version
|
## Minimum supported Rust version
|
||||||
|
|
|
@ -120,6 +120,11 @@ for file in $source_directory/*; do
|
||||||
done
|
done
|
||||||
```
|
```
|
||||||
|
|
||||||
|
How do I upgrade from a very old version of Synapse to the latest?
|
||||||
|
---
|
||||||
|
See [this](../../upgrade.html#upgrading-from-a-very-old-version) section in the
|
||||||
|
upgrade docs.
|
||||||
|
|
||||||
Manually resetting passwords
|
Manually resetting passwords
|
||||||
---
|
---
|
||||||
Users can reset their password through their client. Alternatively, a server admin
|
Users can reset their password through their client. Alternatively, a server admin
|
||||||
|
|
22
poetry.lock
generated
22
poetry.lock
generated
|
@ -559,13 +559,13 @@ dev = ["Sphinx", "coverage", "flake8", "lxml", "lxml-stubs", "memory-profiler",
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "furo"
|
name = "furo"
|
||||||
version = "2023.9.10"
|
version = "2024.1.29"
|
||||||
description = "A clean customisable Sphinx documentation theme."
|
description = "A clean customisable Sphinx documentation theme."
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
files = [
|
files = [
|
||||||
{file = "furo-2023.9.10-py3-none-any.whl", hash = "sha256:513092538537dc5c596691da06e3c370714ec99bc438680edc1debffb73e5bfc"},
|
{file = "furo-2024.1.29-py3-none-any.whl", hash = "sha256:3548be2cef45a32f8cdc0272d415fcb3e5fa6a0eb4ddfe21df3ecf1fe45a13cf"},
|
||||||
{file = "furo-2023.9.10.tar.gz", hash = "sha256:5707530a476d2a63b8cad83b4f961f3739a69f4b058bcf38a03a39fa537195b2"},
|
{file = "furo-2024.1.29.tar.gz", hash = "sha256:4d6b2fe3f10a6e36eb9cc24c1e7beb38d7a23fc7b3c382867503b7fcac8a1e02"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
|
@ -2081,17 +2081,17 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pyopenssl"
|
name = "pyopenssl"
|
||||||
version = "23.3.0"
|
version = "24.0.0"
|
||||||
description = "Python wrapper module around the OpenSSL library"
|
description = "Python wrapper module around the OpenSSL library"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.7"
|
python-versions = ">=3.7"
|
||||||
files = [
|
files = [
|
||||||
{file = "pyOpenSSL-23.3.0-py3-none-any.whl", hash = "sha256:6756834481d9ed5470f4a9393455154bc92fe7a64b7bc6ee2c804e78c52099b2"},
|
{file = "pyOpenSSL-24.0.0-py3-none-any.whl", hash = "sha256:ba07553fb6fd6a7a2259adb9b84e12302a9a8a75c44046e8bb5d3e5ee887e3c3"},
|
||||||
{file = "pyOpenSSL-23.3.0.tar.gz", hash = "sha256:6b2cba5cc46e822750ec3e5a81ee12819850b11303630d575e98108a079c2b12"},
|
{file = "pyOpenSSL-24.0.0.tar.gz", hash = "sha256:6aa33039a93fffa4563e655b61d11364d01264be8ccb49906101e02a334530bf"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
cryptography = ">=41.0.5,<42"
|
cryptography = ">=41.0.5,<43"
|
||||||
|
|
||||||
[package.extras]
|
[package.extras]
|
||||||
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"]
|
docs = ["sphinx (!=5.2.0,!=5.2.0.post0,!=7.2.5)", "sphinx-rtd-theme"]
|
||||||
|
@ -3070,13 +3070,13 @@ referencing = "*"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "types-netaddr"
|
name = "types-netaddr"
|
||||||
version = "0.10.0.20240106"
|
version = "1.2.0.20240219"
|
||||||
description = "Typing stubs for netaddr"
|
description = "Typing stubs for netaddr"
|
||||||
optional = false
|
optional = false
|
||||||
python-versions = ">=3.8"
|
python-versions = ">=3.8"
|
||||||
files = [
|
files = [
|
||||||
{file = "types-netaddr-0.10.0.20240106.tar.gz", hash = "sha256:7cc6c16bc76f57faf4a042184f748a05e9642b189caf7fe7e36c07cb87c057b3"},
|
{file = "types-netaddr-1.2.0.20240219.tar.gz", hash = "sha256:984e70ad838218d3032f37f05a7e294f7b007fe274ec9d774265c8c06698395f"},
|
||||||
{file = "types_netaddr-0.10.0.20240106-py3-none-any.whl", hash = "sha256:0acd8116293b06abe66484cf033c2d597f039326c28e3df83b8abd5709f6c65d"},
|
{file = "types_netaddr-1.2.0.20240219-py3-none-any.whl", hash = "sha256:b26144e878acb8a1a9008e6997863714db04f8029a0f7f6bfe483c977d21b522"},
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
@ -3434,4 +3434,4 @@ user-search = ["pyicu"]
|
||||||
[metadata]
|
[metadata]
|
||||||
lock-version = "2.0"
|
lock-version = "2.0"
|
||||||
python-versions = "^3.8.0"
|
python-versions = "^3.8.0"
|
||||||
content-hash = "053bda662e95c273f4eda41d7ece8de0e404783ac66d54cdbedc396e196fb63a"
|
content-hash = "e4ca55af1dcb6b28b8064b7551008fd16f6cdfa9cb9bf90d18c6b47766b56ae6"
|
||||||
|
|
|
@ -96,7 +96,7 @@ module-name = "synapse.synapse_rust"
|
||||||
|
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "matrix-synapse"
|
name = "matrix-synapse"
|
||||||
version = "1.102.0rc1"
|
version = "1.102.0"
|
||||||
description = "Homeserver for the Matrix decentralised comms protocol"
|
description = "Homeserver for the Matrix decentralised comms protocol"
|
||||||
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
|
||||||
license = "AGPL-3.0-or-later"
|
license = "AGPL-3.0-or-later"
|
||||||
|
@ -372,7 +372,7 @@ optional = true
|
||||||
sphinx = {version = "^6.1", python = "^3.8"}
|
sphinx = {version = "^6.1", python = "^3.8"}
|
||||||
sphinx-autodoc2 = {version = ">=0.4.2,<0.6.0", python = "^3.8"}
|
sphinx-autodoc2 = {version = ">=0.4.2,<0.6.0", python = "^3.8"}
|
||||||
myst-parser = {version = "^1.0.0", python = "^3.8"}
|
myst-parser = {version = "^1.0.0", python = "^3.8"}
|
||||||
furo = ">=2022.12.7,<2024.0.0"
|
furo = ">=2022.12.7,<2025.0.0"
|
||||||
|
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
|
|
|
@ -517,8 +517,6 @@ class InvalidCaptchaError(SynapseError):
|
||||||
class LimitExceededError(SynapseError):
|
class LimitExceededError(SynapseError):
|
||||||
"""A client has sent too many requests and is being throttled."""
|
"""A client has sent too many requests and is being throttled."""
|
||||||
|
|
||||||
include_retry_after_header = False
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
limiter_name: str,
|
limiter_name: str,
|
||||||
|
@ -526,9 +524,10 @@ class LimitExceededError(SynapseError):
|
||||||
retry_after_ms: Optional[int] = None,
|
retry_after_ms: Optional[int] = None,
|
||||||
errcode: str = Codes.LIMIT_EXCEEDED,
|
errcode: str = Codes.LIMIT_EXCEEDED,
|
||||||
):
|
):
|
||||||
|
# Use HTTP header Retry-After to enable library-assisted retry handling.
|
||||||
headers = (
|
headers = (
|
||||||
{"Retry-After": str(math.ceil(retry_after_ms / 1000))}
|
{"Retry-After": str(math.ceil(retry_after_ms / 1000))}
|
||||||
if self.include_retry_after_header and retry_after_ms is not None
|
if retry_after_ms is not None
|
||||||
else None
|
else None
|
||||||
)
|
)
|
||||||
super().__init__(code, "Too Many Requests", errcode, headers=headers)
|
super().__init__(code, "Too Many Requests", errcode, headers=headers)
|
||||||
|
|
|
@ -25,7 +25,6 @@ from typing import TYPE_CHECKING, Any, Optional
|
||||||
import attr
|
import attr
|
||||||
import attr.validators
|
import attr.validators
|
||||||
|
|
||||||
from synapse.api.errors import LimitExceededError
|
|
||||||
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
|
||||||
from synapse.config import ConfigError
|
from synapse.config import ConfigError
|
||||||
from synapse.config._base import Config, RootConfig
|
from synapse.config._base import Config, RootConfig
|
||||||
|
@ -415,14 +414,6 @@ class ExperimentalConfig(Config):
|
||||||
"msc4010_push_rules_account_data", False
|
"msc4010_push_rules_account_data", False
|
||||||
)
|
)
|
||||||
|
|
||||||
# MSC4041: Use HTTP header Retry-After to enable library-assisted retry handling
|
|
||||||
#
|
|
||||||
# This is a bit hacky, but the most reasonable way to *alway* include the
|
|
||||||
# headers.
|
|
||||||
LimitExceededError.include_retry_after_header = experimental.get(
|
|
||||||
"msc4041_enabled", False
|
|
||||||
)
|
|
||||||
|
|
||||||
self.msc4028_push_encrypted_events = experimental.get(
|
self.msc4028_push_encrypted_events = experimental.get(
|
||||||
"msc4028_push_encrypted_events", False
|
"msc4028_push_encrypted_events", False
|
||||||
)
|
)
|
||||||
|
|
|
@ -366,7 +366,7 @@ class ThirdPartyEventRulesModuleApiCallbacks:
|
||||||
if len(self._check_threepid_can_be_invited_callbacks) == 0:
|
if len(self._check_threepid_can_be_invited_callbacks) == 0:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
state_events = await self._get_state_map_for_room(room_id)
|
state_events = await self._storage_controllers.state.get_current_state(room_id)
|
||||||
|
|
||||||
for callback in self._check_threepid_can_be_invited_callbacks:
|
for callback in self._check_threepid_can_be_invited_callbacks:
|
||||||
try:
|
try:
|
||||||
|
@ -399,7 +399,7 @@ class ThirdPartyEventRulesModuleApiCallbacks:
|
||||||
if len(self._check_visibility_can_be_modified_callbacks) == 0:
|
if len(self._check_visibility_can_be_modified_callbacks) == 0:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
state_events = await self._get_state_map_for_room(room_id)
|
state_events = await self._storage_controllers.state.get_current_state(room_id)
|
||||||
|
|
||||||
for callback in self._check_visibility_can_be_modified_callbacks:
|
for callback in self._check_visibility_can_be_modified_callbacks:
|
||||||
try:
|
try:
|
||||||
|
@ -427,7 +427,13 @@ class ThirdPartyEventRulesModuleApiCallbacks:
|
||||||
return
|
return
|
||||||
|
|
||||||
event = await self.store.get_event(event_id)
|
event = await self.store.get_event(event_id)
|
||||||
state_events = await self._get_state_map_for_room(event.room_id)
|
|
||||||
|
# We *don't* want to wait for the full state here, because waiting for full
|
||||||
|
# state will persist event, which in turn will call this method.
|
||||||
|
# This would end up in a deadlock.
|
||||||
|
state_events = await self._storage_controllers.state.get_current_state(
|
||||||
|
event.room_id, await_full_state=False
|
||||||
|
)
|
||||||
|
|
||||||
for callback in self._on_new_event_callbacks:
|
for callback in self._on_new_event_callbacks:
|
||||||
try:
|
try:
|
||||||
|
@ -490,17 +496,6 @@ class ThirdPartyEventRulesModuleApiCallbacks:
|
||||||
)
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def _get_state_map_for_room(self, room_id: str) -> StateMap[EventBase]:
|
|
||||||
"""Given a room ID, return the state events of that room.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id: The ID of the room.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A dict mapping (event type, state key) to state event.
|
|
||||||
"""
|
|
||||||
return await self._storage_controllers.state.get_current_state(room_id)
|
|
||||||
|
|
||||||
async def on_profile_update(
|
async def on_profile_update(
|
||||||
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool
|
self, user_id: str, new_profile: ProfileInfo, by_admin: bool, deactivation: bool
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
|
@ -109,6 +109,7 @@ from synapse.rest.admin.users import (
|
||||||
UserReplaceMasterCrossSigningKeyRestServlet,
|
UserReplaceMasterCrossSigningKeyRestServlet,
|
||||||
UserRestServletV2,
|
UserRestServletV2,
|
||||||
UsersRestServletV2,
|
UsersRestServletV2,
|
||||||
|
UsersRestServletV3,
|
||||||
UserTokenRestServlet,
|
UserTokenRestServlet,
|
||||||
WhoisRestServlet,
|
WhoisRestServlet,
|
||||||
)
|
)
|
||||||
|
@ -289,6 +290,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
|
||||||
UserTokenRestServlet(hs).register(http_server)
|
UserTokenRestServlet(hs).register(http_server)
|
||||||
UserRestServletV2(hs).register(http_server)
|
UserRestServletV2(hs).register(http_server)
|
||||||
UsersRestServletV2(hs).register(http_server)
|
UsersRestServletV2(hs).register(http_server)
|
||||||
|
UsersRestServletV3(hs).register(http_server)
|
||||||
UserMediaStatisticsRestServlet(hs).register(http_server)
|
UserMediaStatisticsRestServlet(hs).register(http_server)
|
||||||
LargestRoomsStatistics(hs).register(http_server)
|
LargestRoomsStatistics(hs).register(http_server)
|
||||||
EventReportDetailRestServlet(hs).register(http_server)
|
EventReportDetailRestServlet(hs).register(http_server)
|
||||||
|
|
|
@ -23,7 +23,7 @@ import hmac
|
||||||
import logging
|
import logging
|
||||||
import secrets
|
import secrets
|
||||||
from http import HTTPStatus
|
from http import HTTPStatus
|
||||||
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
|
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
|
||||||
|
@ -118,7 +118,8 @@ class UsersRestServletV2(RestServlet):
|
||||||
errcode=Codes.INVALID_PARAM,
|
errcode=Codes.INVALID_PARAM,
|
||||||
)
|
)
|
||||||
|
|
||||||
deactivated = parse_boolean(request, "deactivated", default=False)
|
deactivated = self._parse_parameter_deactivated(request)
|
||||||
|
|
||||||
locked = parse_boolean(request, "locked", default=False)
|
locked = parse_boolean(request, "locked", default=False)
|
||||||
admins = parse_boolean(request, "admins")
|
admins = parse_boolean(request, "admins")
|
||||||
|
|
||||||
|
@ -182,6 +183,22 @@ class UsersRestServletV2(RestServlet):
|
||||||
|
|
||||||
return HTTPStatus.OK, ret
|
return HTTPStatus.OK, ret
|
||||||
|
|
||||||
|
def _parse_parameter_deactivated(self, request: SynapseRequest) -> Optional[bool]:
|
||||||
|
"""
|
||||||
|
Return None (no filtering) if `deactivated` is `true`, otherwise return `False`
|
||||||
|
(exclude deactivated users from the results).
|
||||||
|
"""
|
||||||
|
return None if parse_boolean(request, "deactivated") else False
|
||||||
|
|
||||||
|
|
||||||
|
class UsersRestServletV3(UsersRestServletV2):
|
||||||
|
PATTERNS = admin_patterns("/users$", "v3")
|
||||||
|
|
||||||
|
def _parse_parameter_deactivated(
|
||||||
|
self, request: SynapseRequest
|
||||||
|
) -> Union[bool, None]:
|
||||||
|
return parse_boolean(request, "deactivated")
|
||||||
|
|
||||||
|
|
||||||
class UserRestServletV2(RestServlet):
|
class UserRestServletV2(RestServlet):
|
||||||
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)$", "v2")
|
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)$", "v2")
|
||||||
|
|
|
@ -562,10 +562,15 @@ class StateStorageController:
|
||||||
@trace
|
@trace
|
||||||
@tag_args
|
@tag_args
|
||||||
async def get_current_state(
|
async def get_current_state(
|
||||||
self, room_id: str, state_filter: Optional[StateFilter] = None
|
self,
|
||||||
|
room_id: str,
|
||||||
|
state_filter: Optional[StateFilter] = None,
|
||||||
|
await_full_state: bool = True,
|
||||||
) -> StateMap[EventBase]:
|
) -> StateMap[EventBase]:
|
||||||
"""Same as `get_current_state_ids` but also fetches the events"""
|
"""Same as `get_current_state_ids` but also fetches the events"""
|
||||||
state_map_ids = await self.get_current_state_ids(room_id, state_filter)
|
state_map_ids = await self.get_current_state_ids(
|
||||||
|
room_id, state_filter, await_full_state
|
||||||
|
)
|
||||||
|
|
||||||
event_map = await self.stores.main.get_events(list(state_map_ids.values()))
|
event_map = await self.stores.main.get_events(list(state_map_ids.values()))
|
||||||
|
|
||||||
|
|
|
@ -176,7 +176,7 @@ class DataStore(
|
||||||
user_id: Optional[str] = None,
|
user_id: Optional[str] = None,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
guests: bool = True,
|
guests: bool = True,
|
||||||
deactivated: bool = False,
|
deactivated: Optional[bool] = None,
|
||||||
admins: Optional[bool] = None,
|
admins: Optional[bool] = None,
|
||||||
order_by: str = UserSortOrder.NAME.value,
|
order_by: str = UserSortOrder.NAME.value,
|
||||||
direction: Direction = Direction.FORWARDS,
|
direction: Direction = Direction.FORWARDS,
|
||||||
|
@ -232,8 +232,11 @@ class DataStore(
|
||||||
if not guests:
|
if not guests:
|
||||||
filters.append("is_guest = 0")
|
filters.append("is_guest = 0")
|
||||||
|
|
||||||
if not deactivated:
|
if deactivated is not None:
|
||||||
filters.append("deactivated = 0")
|
if deactivated:
|
||||||
|
filters.append("deactivated = 1")
|
||||||
|
else:
|
||||||
|
filters.append("deactivated = 0")
|
||||||
|
|
||||||
if not locked:
|
if not locked:
|
||||||
filters.append("locked IS FALSE")
|
filters.append("locked IS FALSE")
|
||||||
|
|
|
@ -33,18 +33,14 @@ class LimitExceededErrorTestCase(unittest.TestCase):
|
||||||
self.assertIn("needle", err.debug_context)
|
self.assertIn("needle", err.debug_context)
|
||||||
self.assertNotIn("needle", serialised)
|
self.assertNotIn("needle", serialised)
|
||||||
|
|
||||||
# Create a sub-class to avoid mutating the class-level property.
|
|
||||||
class LimitExceededErrorHeaders(LimitExceededError):
|
|
||||||
include_retry_after_header = True
|
|
||||||
|
|
||||||
def test_limit_exceeded_header(self) -> None:
|
def test_limit_exceeded_header(self) -> None:
|
||||||
err = self.LimitExceededErrorHeaders(limiter_name="test", retry_after_ms=100)
|
err = LimitExceededError(limiter_name="test", retry_after_ms=100)
|
||||||
self.assertEqual(err.error_dict(None).get("retry_after_ms"), 100)
|
self.assertEqual(err.error_dict(None).get("retry_after_ms"), 100)
|
||||||
assert err.headers is not None
|
assert err.headers is not None
|
||||||
self.assertEqual(err.headers.get("Retry-After"), "1")
|
self.assertEqual(err.headers.get("Retry-After"), "1")
|
||||||
|
|
||||||
def test_limit_exceeded_rounding(self) -> None:
|
def test_limit_exceeded_rounding(self) -> None:
|
||||||
err = self.LimitExceededErrorHeaders(limiter_name="test", retry_after_ms=3001)
|
err = LimitExceededError(limiter_name="test", retry_after_ms=3001)
|
||||||
self.assertEqual(err.error_dict(None).get("retry_after_ms"), 3001)
|
self.assertEqual(err.error_dict(None).get("retry_after_ms"), 3001)
|
||||||
assert err.headers is not None
|
assert err.headers is not None
|
||||||
self.assertEqual(err.headers.get("Retry-After"), "4")
|
self.assertEqual(err.headers.get("Retry-After"), "4")
|
||||||
|
|
|
@ -503,7 +503,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||||
|
|
||||||
channel = self.make_request(
|
channel = self.make_request(
|
||||||
"GET",
|
"GET",
|
||||||
self.url + "?deactivated=true",
|
f"{self.url}?deactivated=true",
|
||||||
{},
|
{},
|
||||||
access_token=self.admin_user_tok,
|
access_token=self.admin_user_tok,
|
||||||
)
|
)
|
||||||
|
@ -982,6 +982,56 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||||
self.assertEqual(1, channel.json_body["total"])
|
self.assertEqual(1, channel.json_body["total"])
|
||||||
self.assertFalse(channel.json_body["users"][0]["admin"])
|
self.assertFalse(channel.json_body["users"][0]["admin"])
|
||||||
|
|
||||||
|
def test_filter_deactivated_users(self) -> None:
|
||||||
|
"""
|
||||||
|
Tests whether the various values of the query parameter `deactivated` lead to the
|
||||||
|
expected result set.
|
||||||
|
"""
|
||||||
|
users_url_v3 = self.url.replace("v2", "v3")
|
||||||
|
|
||||||
|
# Register an additional non admin user
|
||||||
|
user_id = self.register_user("user", "pass", admin=False)
|
||||||
|
|
||||||
|
# Deactivate that user, requesting erasure.
|
||||||
|
deactivate_account_handler = self.hs.get_deactivate_account_handler()
|
||||||
|
self.get_success(
|
||||||
|
deactivate_account_handler.deactivate_account(
|
||||||
|
user_id, erase_data=True, requester=create_requester(user_id)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Query all users
|
||||||
|
channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
users_url_v3,
|
||||||
|
access_token=self.admin_user_tok,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(200, channel.code, channel.result)
|
||||||
|
self.assertEqual(2, channel.json_body["total"])
|
||||||
|
|
||||||
|
# Query deactivated users
|
||||||
|
channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
f"{users_url_v3}?deactivated=true",
|
||||||
|
access_token=self.admin_user_tok,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(200, channel.code, channel.result)
|
||||||
|
self.assertEqual(1, channel.json_body["total"])
|
||||||
|
self.assertEqual("@user:test", channel.json_body["users"][0]["name"])
|
||||||
|
|
||||||
|
# Query non-deactivated users
|
||||||
|
channel = self.make_request(
|
||||||
|
"GET",
|
||||||
|
f"{users_url_v3}?deactivated=false",
|
||||||
|
access_token=self.admin_user_tok,
|
||||||
|
)
|
||||||
|
|
||||||
|
self.assertEqual(200, channel.code, channel.result)
|
||||||
|
self.assertEqual(1, channel.json_body["total"])
|
||||||
|
self.assertEqual("@admin:test", channel.json_body["users"][0]["name"])
|
||||||
|
|
||||||
@override_config(
|
@override_config(
|
||||||
{
|
{
|
||||||
"experimental_features": {
|
"experimental_features": {
|
||||||
|
@ -1130,7 +1180,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||||
# They should appear in the list users API, marked as not erased.
|
# They should appear in the list users API, marked as not erased.
|
||||||
channel = self.make_request(
|
channel = self.make_request(
|
||||||
"GET",
|
"GET",
|
||||||
self.url + "?deactivated=true",
|
f"{self.url}?deactivated=true",
|
||||||
access_token=self.admin_user_tok,
|
access_token=self.admin_user_tok,
|
||||||
)
|
)
|
||||||
users = {user["name"]: user for user in channel.json_body["users"]}
|
users = {user["name"]: user for user in channel.json_body["users"]}
|
||||||
|
@ -1194,7 +1244,7 @@ class UsersListTestCase(unittest.HomeserverTestCase):
|
||||||
dir: The direction of ordering to give the server
|
dir: The direction of ordering to give the server
|
||||||
"""
|
"""
|
||||||
|
|
||||||
url = self.url + "?deactivated=true&"
|
url = f"{self.url}?deactivated=true&"
|
||||||
if order_by is not None:
|
if order_by is not None:
|
||||||
url += "order_by=%s&" % (order_by,)
|
url += "order_by=%s&" % (order_by,)
|
||||||
if dir is not None and dir in ("b", "f"):
|
if dir is not None and dir in ("b", "f"):
|
||||||
|
|
|
@ -177,7 +177,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
|
||||||
# rc_login dict here, we need to set this manually as well
|
# rc_login dict here, we need to set this manually as well
|
||||||
"account": {"per_second": 10000, "burst_count": 10000},
|
"account": {"per_second": 10000, "burst_count": 10000},
|
||||||
},
|
},
|
||||||
"experimental_features": {"msc4041_enabled": True},
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
def test_POST_ratelimiting_per_address(self) -> None:
|
def test_POST_ratelimiting_per_address(self) -> None:
|
||||||
|
@ -229,7 +228,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
|
||||||
# rc_login dict here, we need to set this manually as well
|
# rc_login dict here, we need to set this manually as well
|
||||||
"address": {"per_second": 10000, "burst_count": 10000},
|
"address": {"per_second": 10000, "burst_count": 10000},
|
||||||
},
|
},
|
||||||
"experimental_features": {"msc4041_enabled": True},
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
def test_POST_ratelimiting_per_account(self) -> None:
|
def test_POST_ratelimiting_per_account(self) -> None:
|
||||||
|
@ -278,7 +276,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase):
|
||||||
"address": {"per_second": 10000, "burst_count": 10000},
|
"address": {"per_second": 10000, "burst_count": 10000},
|
||||||
"failed_attempts": {"per_second": 0.17, "burst_count": 5},
|
"failed_attempts": {"per_second": 0.17, "burst_count": 5},
|
||||||
},
|
},
|
||||||
"experimental_features": {"msc4041_enabled": True},
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
def test_POST_ratelimiting_per_account_failed_attempts(self) -> None:
|
def test_POST_ratelimiting_per_account_failed_attempts(self) -> None:
|
||||||
|
|
Loading…
Reference in a new issue